summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorhkuang <hkuang@google.com>2013-06-19 15:33:45 -0700
committerhkuang <hkuang@google.com>2013-06-20 10:24:31 -0700
commitba164dffc5a6795bce97fae02b51ccf3330e15e4 (patch)
tree9b83fdf84e5bd8bb4d47ac37a7ea9ae3eef840b6
parentca15b5fc158a9df465aaf1acfe38d8cb5042c81b (diff)
downloadandroid_external_libvpx-ba164dffc5a6795bce97fae02b51ccf3330e15e4.tar.gz
android_external_libvpx-ba164dffc5a6795bce97fae02b51ccf3330e15e4.tar.bz2
android_external_libvpx-ba164dffc5a6795bce97fae02b51ccf3330e15e4.zip
Updates libvpx to enable the VP9 decoder.
This change enables VP9 decoder for all build configurations. Checkout is from master branch(hash:12180c8329d56d72e8d4424b8fd82b1b2f8e846a). Change-Id: Ieaba85d0bc54e1ecdf9596398dafa03c43182f8c
-rw-r--r--UPDATING6
-rw-r--r--armv7a-neon/libvpx_srcs.txt96
-rw-r--r--armv7a-neon/vp8_rtcd.h (renamed from armv7a-neon/vpx_rtcd.h)95
-rw-r--r--armv7a-neon/vp9_rtcd.h190
-rw-r--r--armv7a-neon/vpx_config.c2
-rw-r--r--armv7a-neon/vpx_config.h13
-rw-r--r--armv7a-neon/vpx_scale_rtcd.h62
-rw-r--r--armv7a-neon/vpx_version.h6
-rw-r--r--armv7a/libvpx_srcs.txt96
-rw-r--r--armv7a/vp8_rtcd.h (renamed from armv7a/vpx_rtcd.h)92
-rw-r--r--armv7a/vp9_rtcd.h190
-rw-r--r--armv7a/vpx_config.c2
-rw-r--r--armv7a/vpx_config.h13
-rw-r--r--armv7a/vpx_scale_rtcd.h59
-rw-r--r--armv7a/vpx_version.h6
-rw-r--r--generic/libvpx_srcs.txt96
-rw-r--r--generic/vp8_rtcd.h (renamed from generic/vpx_rtcd.h)90
-rw-r--r--generic/vp9_rtcd.h185
-rw-r--r--generic/vpx_config.c2
-rw-r--r--generic/vpx_config.h13
-rw-r--r--generic/vpx_scale_rtcd.h54
-rw-r--r--generic/vpx_version.h6
-rw-r--r--libvpx.mk15
-rw-r--r--libvpx/CHANGELOG29
-rw-r--r--libvpx/args.c335
-rw-r--r--libvpx/args.h33
-rw-r--r--libvpx/build/arm-msvs/obj_int_extract.bat25
-rw-r--r--libvpx/build/make/Android.mk47
-rw-r--r--libvpx/build/make/Makefile40
-rwxr-xr-xlibvpx/build/make/ads2armasm_ms.pl38
-rwxr-xr-xlibvpx/build/make/ads2gas.pl28
-rwxr-xr-xlibvpx/build/make/ads2gas_apple.pl4
-rwxr-xr-xlibvpx/build/make/configure.sh94
-rwxr-xr-xlibvpx/build/make/gen_msvs_proj.sh51
-rwxr-xr-xlibvpx/build/make/gen_msvs_sln.sh33
-rwxr-xr-xlibvpx/build/make/gen_msvs_vcxproj.sh530
-rw-r--r--libvpx/build/make/obj_int_extract.c1449
-rwxr-xr-xlibvpx/build/make/rtcd.sh59
-rw-r--r--libvpx/build/make/thumb.pm70
-rw-r--r--libvpx/build/x86-msvs/obj_int_extract.bat20
-rw-r--r--libvpx/build/x86-msvs/yasm.rules115
-rwxr-xr-xlibvpx/configure88
-rw-r--r--libvpx/example_xma.c317
-rw-r--r--libvpx/examples.mk57
-rw-r--r--libvpx/examples/decoder_tmpl.c1
-rw-r--r--libvpx/examples/decoder_tmpl.txt2
-rw-r--r--libvpx/examples/encoder_tmpl.txt2
-rw-r--r--libvpx/examples/postproc.txt2
-rw-r--r--libvpx/libmkv/EbmlBufferWriter.c74
-rw-r--r--libvpx/libmkv/EbmlBufferWriter.h14
-rw-r--r--libvpx/libmkv/EbmlIDs.h200
-rw-r--r--libvpx/libmkv/EbmlWriter.c206
-rw-r--r--libvpx/libmkv/WebMElement.c182
-rw-r--r--libvpx/libmkv/WebMElement.h4
-rw-r--r--libvpx/libmkv/testlibmkv.c87
-rw-r--r--libvpx/libs.mk264
-rw-r--r--libvpx/md5_utils.c320
-rw-r--r--libvpx/md5_utils.h9
-rw-r--r--libvpx/nestegg/halloc/src/macros.h2
-rw-r--r--libvpx/nestegg/include/nestegg/nestegg.h1
-rw-r--r--libvpx/nestegg/src/nestegg.c4
-rw-r--r--libvpx/solution.mk8
-rw-r--r--libvpx/test/acm_random.h29
-rw-r--r--libvpx/test/altref_test.cc16
-rw-r--r--libvpx/test/borders_test.cc86
-rw-r--r--libvpx/test/clear_system_state.h31
-rw-r--r--libvpx/test/codec_factory.h232
-rw-r--r--libvpx/test/config_test.cc14
-rw-r--r--libvpx/test/convolve_test.cc549
-rw-r--r--libvpx/test/cq_test.cc21
-rw-r--r--libvpx/test/datarate_test.cc15
-rw-r--r--libvpx/test/dct16x16_test.cc366
-rw-r--r--libvpx/test/dct32x32_test.cc192
-rw-r--r--libvpx/test/decode_test_driver.cc31
-rw-r--r--libvpx/test/decode_test_driver.h38
-rw-r--r--libvpx/test/encode_test_driver.cc77
-rw-r--r--libvpx/test/encode_test_driver.h52
-rw-r--r--libvpx/test/error_resilience_test.cc160
-rw-r--r--libvpx/test/fdct4x4_test.cc149
-rw-r--r--libvpx/test/fdct8x8_test.cc185
-rw-r--r--libvpx/test/i420_video_source.h2
-rw-r--r--libvpx/test/idct8x8_test.cc140
-rw-r--r--libvpx/test/idct_test.cc (renamed from libvpx/test/idctllm_test.cc)74
-rw-r--r--libvpx/test/intrapred_test.cc15
-rw-r--r--libvpx/test/keyframe_test.cc16
-rw-r--r--libvpx/test/md5_helper.h64
-rw-r--r--libvpx/test/pp_filter_test.cc15
-rw-r--r--libvpx/test/register_state_check.h95
-rw-r--r--libvpx/test/resize_test.cc73
-rw-r--r--libvpx/test/sad_test.cc367
-rw-r--r--libvpx/test/sixtap_predict_test.cc23
-rw-r--r--libvpx/test/subtract_test.cc13
-rw-r--r--libvpx/test/superframe_test.cc100
-rw-r--r--libvpx/test/test-data.sha1223
-rw-r--r--libvpx/test/test.mk301
-rw-r--r--libvpx/test/test_libvpx.cc32
-rw-r--r--libvpx/test/test_vector_test.cc128
-rw-r--r--libvpx/test/tile_independence_test.cc108
-rw-r--r--libvpx/test/util.h30
-rw-r--r--libvpx/test/variance_test.cc211
-rw-r--r--libvpx/test/video_source.h2
-rw-r--r--libvpx/test/vp8_boolcoder_test.cc (renamed from libvpx/test/boolcoder_test.cc)32
-rw-r--r--libvpx/test/vp8_decrypt_test.cc71
-rw-r--r--libvpx/test/vp8_fdct4x4_test.cc169
-rw-r--r--libvpx/test/vp9_boolcoder_test.cc91
-rw-r--r--libvpx/test/webm_video_source.h184
-rw-r--r--libvpx/third_party/libyuv/source/scale.c2
-rw-r--r--libvpx/third_party/x86inc/LICENSE18
-rw-r--r--libvpx/third_party/x86inc/README.webm11
-rw-r--r--libvpx/third_party/x86inc/x86inc.asm1125
-rwxr-xr-xlibvpx/tools/all_builds.py72
-rwxr-xr-xlibvpx/tools/cpplint.py4020
-rw-r--r--libvpx/tools/diff.py127
-rwxr-xr-xlibvpx/tools/ftfy.sh16
-rwxr-xr-xlibvpx/tools/intersect-diffs.py118
-rwxr-xr-xlibvpx/tools/lint-hunks.py144
-rwxr-xr-xlibvpx/tools/vpx-astyle.sh27
-rw-r--r--libvpx/tools_common.c9
-rw-r--r--libvpx/tools_common.h2
-rw-r--r--libvpx/vp8/common/arm/armv6/filter_v6.asm2
-rw-r--r--libvpx/vp8/common/arm/armv6/idct_blk_v6.c2
-rw-r--r--libvpx/vp8/common/arm/bilinearfilter_arm.c2
-rw-r--r--libvpx/vp8/common/arm/filter_arm.c2
-rw-r--r--libvpx/vp8/common/arm/loopfilter_arm.c2
-rw-r--r--libvpx/vp8/common/arm/neon/idct_blk_neon.c2
-rw-r--r--libvpx/vp8/common/arm/neon/vp8_subpixelvariance16x16_neon.asm6
-rw-r--r--libvpx/vp8/common/arm/reconintra_arm.c2
-rw-r--r--libvpx/vp8/common/arm/variance_arm.c2
-rw-r--r--libvpx/vp8/common/dequantize.c2
-rw-r--r--libvpx/vp8/common/generic/systemdependent.c3
-rw-r--r--libvpx/vp8/common/idct_blk.c2
-rw-r--r--libvpx/vp8/common/invtrans.h2
-rw-r--r--libvpx/vp8/common/loopfilter.c65
-rw-r--r--libvpx/vp8/common/loopfilter.h2
-rw-r--r--libvpx/vp8/common/loopfilter_filters.c52
-rw-r--r--libvpx/vp8/common/mfqe.c4
-rw-r--r--libvpx/vp8/common/mips/dspr2/dequantize_dspr2.c2
-rw-r--r--libvpx/vp8/common/mips/dspr2/filter_dspr2.c2
-rw-r--r--libvpx/vp8/common/mips/dspr2/idct_blk_dspr2.c2
-rw-r--r--libvpx/vp8/common/mips/dspr2/idctllm_dspr2.c2
-rw-r--r--libvpx/vp8/common/mips/dspr2/loopfilter_filters_dspr2.c2
-rw-r--r--libvpx/vp8/common/mips/dspr2/reconinter_dspr2.c2
-rw-r--r--libvpx/vp8/common/onyxc_int.h5
-rw-r--r--libvpx/vp8/common/onyxd.h5
-rw-r--r--libvpx/vp8/common/postproc.c84
-rw-r--r--libvpx/vp8/common/ppc/systemdependent.c5
-rw-r--r--libvpx/vp8/common/reconinter.c2
-rw-r--r--libvpx/vp8/common/reconintra.c10
-rw-r--r--libvpx/vp8/common/reconintra4x4.c6
-rw-r--r--libvpx/vp8/common/rtcd.c94
-rw-r--r--libvpx/vp8/common/rtcd_defs.sh58
-rw-r--r--libvpx/vp8/common/systemdependent.h6
-rw-r--r--libvpx/vp8/common/variance_c.c10
-rw-r--r--libvpx/vp8/common/vp8_asm_com_offsets.c52
-rw-r--r--libvpx/vp8/common/x86/idct_blk_mmx.c2
-rw-r--r--libvpx/vp8/common/x86/idct_blk_sse2.c2
-rw-r--r--libvpx/vp8/common/x86/iwalsh_mmx.asm2
-rw-r--r--libvpx/vp8/common/x86/loopfilter_block_sse2.asm10
-rw-r--r--libvpx/vp8/common/x86/mfqe_sse2.asm6
-rw-r--r--libvpx/vp8/common/x86/postproc_mmx.asm3
-rw-r--r--libvpx/vp8/common/x86/recon_sse2.asm2
-rw-r--r--libvpx/vp8/common/x86/recon_wrapper_sse2.c2
-rw-r--r--libvpx/vp8/common/x86/sad_sse3.asm8
-rw-r--r--libvpx/vp8/common/x86/subpixel_ssse3.asm1
-rw-r--r--libvpx/vp8/common/x86/variance_mmx.c20
-rw-r--r--libvpx/vp8/common/x86/variance_sse2.c26
-rw-r--r--libvpx/vp8/common/x86/variance_ssse3.c4
-rw-r--r--libvpx/vp8/common/x86/vp8_asm_stubs.c2
-rw-r--r--libvpx/vp8/decoder/dboolhuff.c55
-rw-r--r--libvpx/vp8/decoder/dboolhuff.h56
-rw-r--r--libvpx/vp8/decoder/decodemv.h4
-rw-r--r--libvpx/vp8/decoder/decoderthreading.h20
-rw-r--r--libvpx/vp8/decoder/decodframe.c108
-rw-r--r--libvpx/vp8/decoder/detokenize.h7
-rw-r--r--libvpx/vp8/decoder/ec_types.h3
-rw-r--r--libvpx/vp8/decoder/error_concealment.c4
-rw-r--r--libvpx/vp8/decoder/error_concealment.h6
-rw-r--r--libvpx/vp8/decoder/onyxd_if.c182
-rw-r--r--libvpx/vp8/decoder/onyxd_int.h43
-rw-r--r--libvpx/vp8/decoder/threading.c5
-rw-r--r--libvpx/vp8/decoder/treereader.h9
-rw-r--r--libvpx/vp8/decoder/vp8_asm_dec_offsets.c (renamed from libvpx/vp8/decoder/asm_dec_offsets.c)0
-rw-r--r--libvpx/vp8/encoder/arm/armv5te/boolhuff_armv5te.asm2
-rw-r--r--libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_armv5.asm2
-rw-r--r--libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_mbrow_armv5.asm2
-rw-r--r--libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_partitions_armv5.asm2
-rw-r--r--libvpx/vp8/encoder/arm/armv6/vp8_fast_quantize_b_armv6.asm2
-rw-r--r--libvpx/vp8/encoder/arm/armv6/vp8_subtract_armv6.asm2
-rw-r--r--libvpx/vp8/encoder/arm/dct_arm.c2
-rw-r--r--libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.asm2
-rw-r--r--libvpx/vp8/encoder/arm/neon/shortfdct_neon.asm4
-rw-r--r--libvpx/vp8/encoder/arm/neon/subtract_neon.asm2
-rw-r--r--libvpx/vp8/encoder/arm/quantize_arm.c2
-rw-r--r--libvpx/vp8/encoder/bitstream.c64
-rw-r--r--libvpx/vp8/encoder/block.h19
-rw-r--r--libvpx/vp8/encoder/boolhuff.c2
-rw-r--r--libvpx/vp8/encoder/boolhuff.h2
-rw-r--r--libvpx/vp8/encoder/denoising.c16
-rw-r--r--libvpx/vp8/encoder/encodeframe.c83
-rw-r--r--libvpx/vp8/encoder/encodeintra.c2
-rw-r--r--libvpx/vp8/encoder/encodemb.c2
-rw-r--r--libvpx/vp8/encoder/encodemv.c6
-rw-r--r--libvpx/vp8/encoder/ethreading.c19
-rw-r--r--libvpx/vp8/encoder/firstpass.c25
-rw-r--r--libvpx/vp8/encoder/mcomp.c25
-rw-r--r--libvpx/vp8/encoder/mcomp.h2
-rw-r--r--libvpx/vp8/encoder/onyx_if.c488
-rw-r--r--libvpx/vp8/encoder/onyx_int.h41
-rw-r--r--libvpx/vp8/encoder/pickinter.c65
-rw-r--r--libvpx/vp8/encoder/picklpf.c3
-rw-r--r--libvpx/vp8/encoder/psnr.c2
-rw-r--r--libvpx/vp8/encoder/quantize.c52
-rw-r--r--libvpx/vp8/encoder/ratectrl.c49
-rw-r--r--libvpx/vp8/encoder/rdopt.c121
-rw-r--r--libvpx/vp8/encoder/rdopt.h2
-rw-r--r--libvpx/vp8/encoder/temporal_filter.c2
-rw-r--r--libvpx/vp8/encoder/tokenize.c4
-rw-r--r--libvpx/vp8/encoder/tokenize.h2
-rw-r--r--libvpx/vp8/encoder/vp8_asm_enc_offsets.c (renamed from libvpx/vp8/encoder/asm_enc_offsets.c)0
-rw-r--r--libvpx/vp8/encoder/x86/dct_sse2.asm4
-rw-r--r--libvpx/vp8/encoder/x86/denoising_sse2.c3
-rw-r--r--libvpx/vp8/encoder/x86/quantize_sse2.asm386
-rw-r--r--libvpx/vp8/encoder/x86/quantize_sse2.c229
-rw-r--r--libvpx/vp8/encoder/x86/quantize_sse4.asm8
-rw-r--r--libvpx/vp8/encoder/x86/quantize_ssse3.asm8
-rw-r--r--libvpx/vp8/encoder/x86/temporal_filter_apply_sse2.asm2
-rw-r--r--libvpx/vp8/encoder/x86/vp8_enc_stubs_mmx.c2
-rw-r--r--libvpx/vp8/encoder/x86/vp8_enc_stubs_sse2.c2
-rw-r--r--libvpx/vp8/vp8_common.mk7
-rw-r--r--libvpx/vp8/vp8_cx_iface.c10
-rw-r--r--libvpx/vp8/vp8_dx_iface.c210
-rw-r--r--libvpx/vp8/vp8cx.mk13
-rw-r--r--libvpx/vp8/vp8dx.mk31
-rw-r--r--libvpx/vp8_multi_resolution_encoder.c23
-rw-r--r--libvpx/vp9/common/generic/vp9_systemdependent.c18
-rw-r--r--libvpx/vp9/common/vp9_alloccommon.c205
-rw-r--r--libvpx/vp9/common/vp9_alloccommon.h31
-rw-r--r--libvpx/vp9/common/vp9_asm_com_offsets.c21
-rw-r--r--libvpx/vp9/common/vp9_blockd.h904
-rw-r--r--libvpx/vp9/common/vp9_common.h68
-rw-r--r--libvpx/vp9/common/vp9_convolve.c376
-rw-r--r--libvpx/vp9/common/vp9_convolve.h41
-rw-r--r--libvpx/vp9/common/vp9_debugmodes.c137
-rw-r--r--libvpx/vp9/common/vp9_default_coef_probs.h1384
-rw-r--r--libvpx/vp9/common/vp9_entropy.c737
-rw-r--r--libvpx/vp9/common/vp9_entropy.h225
-rw-r--r--libvpx/vp9/common/vp9_entropymode.c535
-rw-r--r--libvpx/vp9/common/vp9_entropymode.h96
-rw-r--r--libvpx/vp9/common/vp9_entropymv.c452
-rw-r--r--libvpx/vp9/common/vp9_entropymv.h142
-rw-r--r--libvpx/vp9/common/vp9_enums.h49
-rw-r--r--libvpx/vp9/common/vp9_extend.c133
-rw-r--r--libvpx/vp9/common/vp9_extend.h25
-rw-r--r--libvpx/vp9/common/vp9_filter.c97
-rw-r--r--libvpx/vp9/common/vp9_filter.h37
-rw-r--r--libvpx/vp9/common/vp9_findnearmv.c89
-rw-r--r--libvpx/vp9/common/vp9_findnearmv.h116
-rw-r--r--libvpx/vp9/common/vp9_idct.c1310
-rw-r--r--libvpx/vp9/common/vp9_idct.h80
-rw-r--r--libvpx/vp9/common/vp9_implicit_segmentation.c253
-rw-r--r--libvpx/vp9/common/vp9_loopfilter.c407
-rw-r--r--libvpx/vp9/common/vp9_loopfilter.h85
-rw-r--r--libvpx/vp9/common/vp9_loopfilter_filters.c308
-rw-r--r--libvpx/vp9/common/vp9_maskingmv.c803
-rw-r--r--libvpx/vp9/common/vp9_mbpitch.c28
-rw-r--r--libvpx/vp9/common/vp9_modecont.c23
-rw-r--r--libvpx/vp9/common/vp9_modecont.h19
-rw-r--r--libvpx/vp9/common/vp9_modecontext.c128
-rw-r--r--libvpx/vp9/common/vp9_mv.h36
-rw-r--r--libvpx/vp9/common/vp9_mvref_common.c306
-rw-r--r--libvpx/vp9/common/vp9_mvref_common.h37
-rw-r--r--libvpx/vp9/common/vp9_onyx.h247
-rw-r--r--libvpx/vp9/common/vp9_onyxc_int.h371
-rw-r--r--libvpx/vp9/common/vp9_postproc.c1017
-rw-r--r--libvpx/vp9/common/vp9_postproc.h36
-rw-r--r--libvpx/vp9/common/vp9_ppflags.h38
-rw-r--r--libvpx/vp9/common/vp9_pragmas.h (renamed from libvpx/vpx_scale/scale_mode.h)24
-rw-r--r--libvpx/vp9/common/vp9_pred_common.c520
-rw-r--r--libvpx/vp9/common/vp9_pred_common.h53
-rw-r--r--libvpx/vp9/common/vp9_quant_common.c69
-rw-r--r--libvpx/vp9/common/vp9_quant_common.h28
-rw-r--r--libvpx/vp9/common/vp9_reconinter.c528
-rw-r--r--libvpx/vp9/common/vp9_reconinter.h130
-rw-r--r--libvpx/vp9/common/vp9_reconintra.c357
-rw-r--r--libvpx/vp9/common/vp9_reconintra.h30
-rw-r--r--libvpx/vp9/common/vp9_rtcd.c20
-rw-r--r--libvpx/vp9/common/vp9_rtcd_defs.sh611
-rw-r--r--libvpx/vp9/common/vp9_sadmxn.h38
-rw-r--r--libvpx/vp9/common/vp9_seg_common.c80
-rw-r--r--libvpx/vp9/common/vp9_seg_common.h51
-rw-r--r--libvpx/vp9/common/vp9_subpelvar.h148
-rw-r--r--libvpx/vp9/common/vp9_systemdependent.h39
-rw-r--r--libvpx/vp9/common/vp9_tapify.py106
-rw-r--r--libvpx/vp9/common/vp9_textblit.c120
-rw-r--r--libvpx/vp9/common/vp9_textblit.h19
-rw-r--r--libvpx/vp9/common/vp9_tile_common.c63
-rw-r--r--libvpx/vp9/common/vp9_tile_common.h23
-rw-r--r--libvpx/vp9/common/vp9_treecoder.c78
-rw-r--r--libvpx/vp9/common/vp9_treecoder.h82
-rw-r--r--libvpx/vp9/common/x86/vp9_asm_stubs.c318
-rw-r--r--libvpx/vp9/common/x86/vp9_idct_intrin_sse2.c1985
-rw-r--r--libvpx/vp9/common/x86/vp9_iwalsh_mmx.asm173
-rw-r--r--libvpx/vp9/common/x86/vp9_iwalsh_sse2.asm119
-rw-r--r--libvpx/vp9/common/x86/vp9_loopfilter_intrin_sse2.c1013
-rw-r--r--libvpx/vp9/common/x86/vp9_loopfilter_mmx.asm626
-rw-r--r--libvpx/vp9/common/x86/vp9_loopfilter_sse2.asm872
-rw-r--r--libvpx/vp9/common/x86/vp9_loopfilter_x86.h35
-rw-r--r--libvpx/vp9/common/x86/vp9_mask_sse3.asm484
-rw-r--r--libvpx/vp9/common/x86/vp9_postproc_mmx.asm534
-rw-r--r--libvpx/vp9/common/x86/vp9_postproc_sse2.asm695
-rw-r--r--libvpx/vp9/common/x86/vp9_postproc_x86.h64
-rw-r--r--libvpx/vp9/common/x86/vp9_recon_mmx.asm272
-rw-r--r--libvpx/vp9/common/x86/vp9_recon_sse2.asm572
-rw-r--r--libvpx/vp9/common/x86/vp9_recon_wrapper_sse2.c101
-rw-r--r--libvpx/vp9/common/x86/vp9_sadmxn_sse2.c95
-rw-r--r--libvpx/vp9/common/x86/vp9_subpixel_8t_ssse3.asm1011
-rw-r--r--libvpx/vp9/decoder/vp9_asm_dec_offsets.c20
-rw-r--r--libvpx/vp9/decoder/vp9_dboolhuff.c69
-rw-r--r--libvpx/vp9/decoder/vp9_dboolhuff.h114
-rw-r--r--libvpx/vp9/decoder/vp9_decodemv.c823
-rw-r--r--libvpx/vp9/decoder/vp9_decodemv.h23
-rw-r--r--libvpx/vp9/decoder/vp9_decodframe.c1199
-rw-r--r--libvpx/vp9/decoder/vp9_decodframe.h22
-rw-r--r--libvpx/vp9/decoder/vp9_detokenize.c347
-rw-r--r--libvpx/vp9/decoder/vp9_detokenize.h19
-rw-r--r--libvpx/vp9/decoder/vp9_idct_blk.c159
-rw-r--r--libvpx/vp9/decoder/vp9_idct_blk.h30
-rw-r--r--libvpx/vp9/decoder/vp9_onyxd.h69
-rw-r--r--libvpx/vp9/decoder/vp9_onyxd_if.c433
-rw-r--r--libvpx/vp9/decoder/vp9_onyxd_int.h62
-rw-r--r--libvpx/vp9/decoder/vp9_read_bit_buffer.h54
-rw-r--r--libvpx/vp9/decoder/vp9_treereader.h32
-rw-r--r--libvpx/vp9/decoder/x86/vp9_dequantize_sse2.c220
-rw-r--r--libvpx/vp9/encoder/vp9_asm_enc_offsets.c17
-rw-r--r--libvpx/vp9/encoder/vp9_bitstream.c1821
-rw-r--r--libvpx/vp9/encoder/vp9_bitstream.h17
-rw-r--r--libvpx/vp9/encoder/vp9_block.h176
-rw-r--r--libvpx/vp9/encoder/vp9_boolhuff.c63
-rw-r--r--libvpx/vp9/encoder/vp9_boolhuff.h115
-rw-r--r--libvpx/vp9/encoder/vp9_dct.c1381
-rw-r--r--libvpx/vp9/encoder/vp9_encodeframe.c2109
-rw-r--r--libvpx/vp9/encoder/vp9_encodeframe.h24
-rw-r--r--libvpx/vp9/encoder/vp9_encodeintra.c32
-rw-r--r--libvpx/vp9/encoder/vp9_encodeintra.h22
-rw-r--r--libvpx/vp9/encoder/vp9_encodemb.c705
-rw-r--r--libvpx/vp9/encoder/vp9_encodemb.h53
-rw-r--r--libvpx/vp9/encoder/vp9_encodemv.c610
-rw-r--r--libvpx/vp9/encoder/vp9_encodemv.h33
-rw-r--r--libvpx/vp9/encoder/vp9_firstpass.c2648
-rw-r--r--libvpx/vp9/encoder/vp9_firstpass.h22
-rw-r--r--libvpx/vp9/encoder/vp9_lookahead.c190
-rw-r--r--libvpx/vp9/encoder/vp9_lookahead.h95
-rw-r--r--libvpx/vp9/encoder/vp9_mbgraph.c440
-rw-r--r--libvpx/vp9/encoder/vp9_mbgraph.h16
-rw-r--r--libvpx/vp9/encoder/vp9_mcomp.c2429
-rw-r--r--libvpx/vp9/encoder/vp9_mcomp.h93
-rw-r--r--libvpx/vp9/encoder/vp9_modecosts.c42
-rw-r--r--libvpx/vp9/encoder/vp9_modecosts.h17
-rw-r--r--libvpx/vp9/encoder/vp9_onyx_if.c3932
-rw-r--r--libvpx/vp9/encoder/vp9_onyx_int.h637
-rw-r--r--libvpx/vp9/encoder/vp9_picklpf.c236
-rw-r--r--libvpx/vp9/encoder/vp9_picklpf.h23
-rw-r--r--libvpx/vp9/encoder/vp9_psnr.c29
-rw-r--r--libvpx/vp9/encoder/vp9_psnr.h17
-rw-r--r--libvpx/vp9/encoder/vp9_quantize.c312
-rw-r--r--libvpx/vp9/encoder/vp9_quantize.h46
-rw-r--r--libvpx/vp9/encoder/vp9_ratectrl.c550
-rw-r--r--libvpx/vp9/encoder/vp9_ratectrl.h39
-rw-r--r--libvpx/vp9/encoder/vp9_rdopt.c3255
-rw-r--r--libvpx/vp9/encoder/vp9_rdopt.h36
-rw-r--r--libvpx/vp9/encoder/vp9_sad_c.c686
-rw-r--r--libvpx/vp9/encoder/vp9_segmentation.c295
-rw-r--r--libvpx/vp9/encoder/vp9_segmentation.h40
-rw-r--r--libvpx/vp9/encoder/vp9_ssim.c147
-rw-r--r--libvpx/vp9/encoder/vp9_temporal_filter.c529
-rw-r--r--libvpx/vp9/encoder/vp9_temporal_filter.h18
-rw-r--r--libvpx/vp9/encoder/vp9_tokenize.c458
-rw-r--r--libvpx/vp9/encoder/vp9_tokenize.h56
-rw-r--r--libvpx/vp9/encoder/vp9_treewriter.c38
-rw-r--r--libvpx/vp9/encoder/vp9_treewriter.h87
-rw-r--r--libvpx/vp9/encoder/vp9_variance.h104
-rw-r--r--libvpx/vp9/encoder/vp9_variance_c.c957
-rw-r--r--libvpx/vp9/encoder/vp9_write_bit_buffer.h48
-rw-r--r--libvpx/vp9/encoder/x86/vp9_dct_mmx.asm241
-rw-r--r--libvpx/vp9/encoder/x86/vp9_dct_mmx.h17
-rw-r--r--libvpx/vp9/encoder/x86/vp9_dct_sse2.c1000
-rw-r--r--libvpx/vp9/encoder/x86/vp9_encodeopt.asm125
-rw-r--r--libvpx/vp9/encoder/x86/vp9_fwalsh_sse2.asm164
-rw-r--r--libvpx/vp9/encoder/x86/vp9_mcomp_x86.h40
-rw-r--r--libvpx/vp9/encoder/x86/vp9_sad4d_sse2.asm231
-rw-r--r--libvpx/vp9/encoder/x86/vp9_sad_mmx.asm427
-rw-r--r--libvpx/vp9/encoder/x86/vp9_sad_sse2.asm211
-rw-r--r--libvpx/vp9/encoder/x86/vp9_sad_sse3.asm378
-rw-r--r--libvpx/vp9/encoder/x86/vp9_sad_sse4.asm359
-rw-r--r--libvpx/vp9/encoder/x86/vp9_sad_ssse3.asm370
-rw-r--r--libvpx/vp9/encoder/x86/vp9_ssim_opt.asm216
-rw-r--r--libvpx/vp9/encoder/x86/vp9_subpel_variance_impl_sse2.asm645
-rw-r--r--libvpx/vp9/encoder/x86/vp9_subtract_mmx.asm432
-rw-r--r--libvpx/vp9/encoder/x86/vp9_subtract_sse2.asm356
-rw-r--r--libvpx/vp9/encoder/x86/vp9_temporal_filter_apply_sse2.asm207
-rw-r--r--libvpx/vp9/encoder/x86/vp9_variance_impl_mmx.asm851
-rw-r--r--libvpx/vp9/encoder/x86/vp9_variance_impl_sse2.asm761
-rw-r--r--libvpx/vp9/encoder/x86/vp9_variance_impl_ssse3.asm372
-rw-r--r--libvpx/vp9/encoder/x86/vp9_variance_mmx.c382
-rw-r--r--libvpx/vp9/encoder/x86/vp9_variance_sse2.c773
-rw-r--r--libvpx/vp9/encoder/x86/vp9_variance_ssse3.c142
-rw-r--r--libvpx/vp9/encoder/x86/vp9_x86_csystemdependent.c55
-rw-r--r--libvpx/vp9/exports_dec2
-rw-r--r--libvpx/vp9/exports_enc4
-rw-r--r--libvpx/vp9/vp9_common.mk106
-rw-r--r--libvpx/vp9/vp9_cx_iface.c1153
-rw-r--r--libvpx/vp9/vp9_dx_iface.c750
-rw-r--r--libvpx/vp9/vp9_iface_common.h89
-rw-r--r--libvpx/vp9/vp9cx.mk101
-rw-r--r--libvpx/vp9/vp9dx.mk42
-rw-r--r--libvpx/vpx/internal/vpx_codec_internal.h272
-rw-r--r--libvpx/vpx/src/vpx_codec.c153
-rw-r--r--libvpx/vpx/src/vpx_decoder.c341
-rw-r--r--libvpx/vpx/src/vpx_encoder.c628
-rw-r--r--libvpx/vpx/src/vpx_image.c374
-rw-r--r--libvpx/vpx/vp8.h78
-rw-r--r--libvpx/vpx/vp8cx.h194
-rw-r--r--libvpx/vpx/vp8dx.h58
-rw-r--r--libvpx/vpx/vpx_codec.h862
-rw-r--r--libvpx/vpx/vpx_decoder.h567
-rw-r--r--libvpx/vpx/vpx_encoder.h1561
-rw-r--r--libvpx/vpx/vpx_image.h318
-rw-r--r--libvpx/vpx/vpx_integer.h3
-rw-r--r--libvpx/vpx_mem/include/vpx_mem_intrnl.h22
-rw-r--r--libvpx/vpx_mem/include/vpx_mem_tracker.h287
-rw-r--r--libvpx/vpx_mem/memory_manager/hmm_alloc.c64
-rw-r--r--libvpx/vpx_mem/memory_manager/hmm_base.c501
-rw-r--r--libvpx/vpx_mem/memory_manager/hmm_dflt_abort.c33
-rw-r--r--libvpx/vpx_mem/memory_manager/hmm_grow.c41
-rw-r--r--libvpx/vpx_mem/memory_manager/hmm_largest.c61
-rw-r--r--libvpx/vpx_mem/memory_manager/hmm_resize.c131
-rw-r--r--libvpx/vpx_mem/memory_manager/hmm_shrink.c136
-rw-r--r--libvpx/vpx_mem/memory_manager/hmm_true.c19
-rw-r--r--libvpx/vpx_mem/memory_manager/include/cavl_if.h57
-rw-r--r--libvpx/vpx_mem/memory_manager/include/cavl_impl.h1579
-rw-r--r--libvpx/vpx_mem/memory_manager/include/heapmm.h77
-rw-r--r--libvpx/vpx_mem/memory_manager/include/hmm_cnfg.h10
-rw-r--r--libvpx/vpx_mem/memory_manager/include/hmm_intrnl.h64
-rw-r--r--libvpx/vpx_mem/vpx_mem.c807
-rw-r--r--libvpx/vpx_mem/vpx_mem.h150
-rw-r--r--libvpx/vpx_mem/vpx_mem_tracker.c712
-rw-r--r--libvpx/vpx_ports/arm_cpudetect.c261
-rw-r--r--libvpx/vpx_ports/asm_offsets.h4
-rw-r--r--libvpx/vpx_ports/config.h10
-rw-r--r--libvpx/vpx_ports/emmintrin_compat.h55
-rw-r--r--libvpx/vpx_ports/emms.asm2
-rw-r--r--libvpx/vpx_ports/mem.h5
-rw-r--r--libvpx/vpx_ports/mem_ops.h182
-rw-r--r--libvpx/vpx_ports/mem_ops_aligned.h80
-rw-r--r--libvpx/vpx_ports/vpx_once.h97
-rw-r--r--libvpx/vpx_ports/vpx_timer.h63
-rw-r--r--libvpx/vpx_ports/vpxtypes.h167
-rw-r--r--libvpx/vpx_ports/x86.h238
-rw-r--r--libvpx/vpx_ports/x86_abi_support.asm27
-rw-r--r--libvpx/vpx_ports/x86_cpuid.c60
-rw-r--r--libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copy_y_neon.asm2
-rw-r--r--libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copyframe_func_neon.asm2
-rw-r--r--libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copysrcframe_func_neon.asm2
-rw-r--r--libvpx/vpx_scale/arm/neon/vp8_vpxyv12_extendframeborders_neon.asm2
-rw-r--r--libvpx/vpx_scale/arm/neon/yv12extend_arm.c2
-rw-r--r--libvpx/vpx_scale/generic/bicubic_scaler.c569
-rw-r--r--libvpx/vpx_scale/generic/gen_scalers.c684
-rw-r--r--libvpx/vpx_scale/generic/vpx_scale.c (renamed from libvpx/vpx_scale/generic/vpxscale.c)502
-rw-r--r--libvpx/vpx_scale/generic/yv12config.c174
-rw-r--r--libvpx/vpx_scale/generic/yv12extend.c266
-rw-r--r--libvpx/vpx_scale/generic/yv12extend_generic.h25
-rw-r--r--libvpx/vpx_scale/include/generic/vpxscale_arbitrary.h55
-rw-r--r--libvpx/vpx_scale/include/generic/vpxscale_depricated.h34
-rw-r--r--libvpx/vpx_scale/vpx_scale.h (renamed from libvpx/vpx_scale/vpxscale.h)12
-rw-r--r--libvpx/vpx_scale/vpx_scale.mk14
-rw-r--r--libvpx/vpx_scale/vpx_scale_asm_offsets.c (renamed from libvpx/vp8/common/asm_com_offsets.c)33
-rw-r--r--libvpx/vpx_scale/vpx_scale_rtcd.c18
-rw-r--r--libvpx/vpx_scale/vpx_scale_rtcd.sh31
-rw-r--r--libvpx/vpx_scale/win32/scaleopt.c1195
-rw-r--r--libvpx/vpx_scale/win32/scalesystemdependent.c87
-rw-r--r--libvpx/vpx_scale/yv12config.h41
-rw-r--r--libvpx/vpxdec.c1803
-rw-r--r--libvpx/vpxenc.c4132
-rw-r--r--libvpx/y4minput.c784
-rw-r--r--libvpx/y4minput.h11
-rw-r--r--mips-dspr2/libvpx_srcs.txt96
-rw-r--r--mips-dspr2/vp8_rtcd.h (renamed from mips-dspr2/vpx_rtcd.h)90
-rw-r--r--mips-dspr2/vp9_rtcd.h191
-rw-r--r--mips-dspr2/vpx_config.c2
-rw-r--r--mips-dspr2/vpx_config.h13
-rw-r--r--mips-dspr2/vpx_scale_rtcd.h58
-rw-r--r--mips-dspr2/vpx_version.h6
-rw-r--r--mips/.bins0
-rw-r--r--mips/.docs0
-rw-r--r--mips/.libs0
-rw-r--r--mips/libvpx_srcs.txt96
-rw-r--r--mips/vp8_rtcd.h (renamed from mips/vpx_rtcd.h)90
-rw-r--r--mips/vp9_rtcd.h189
-rw-r--r--mips/vpx_config.c2
-rw-r--r--mips/vpx_config.h13
-rw-r--r--mips/vpx_scale_rtcd.h58
-rw-r--r--mips/vpx_version.h6
502 files changed, 89596 insertions, 17184 deletions
diff --git a/UPDATING b/UPDATING
index e71403a..1c9f863 100644
--- a/UPDATING
+++ b/UPDATING
@@ -32,7 +32,7 @@ Aesthetic:
Example:
$ cd external/libvpx/armv7a
$ ../libvpx/configure --target=armv7-android-gcc --disable-runtime-cpu-detect \
- --disable-neon --sdk-path=$ANDROID_NDK_ROOT --disable-vp8-encoder \
+ --disable-neon --sdk-path=$ANDROID_NDK_ROOT --disable-vp9-encoder \
--disable-examples --disable-docs
Run 'make libvpx_srcs.txt'
@@ -43,5 +43,7 @@ Remove the unused files leaving only:
libvpx_srcs.txt
vpx_config.c
vpx_config.h
-vpx_rtcd.h
+vpx_scale_rtcd.h
+vp8_rtcd.h
+vp9_rtcd.h
vpx_version.h
diff --git a/armv7a-neon/libvpx_srcs.txt b/armv7a-neon/libvpx_srcs.txt
index 494fe37..15973e2 100644
--- a/armv7a-neon/libvpx_srcs.txt
+++ b/armv7a-neon/libvpx_srcs.txt
@@ -63,7 +63,6 @@ vp8/common/arm/neon/vp8_subpixelvariance16x16s_neon.asm.s
vp8/common/arm/neon/vp8_subpixelvariance8x8_neon.asm.s
vp8/common/arm/reconintra_arm.c
vp8/common/arm/variance_arm.c
-vp8/common/asm_com_offsets.c
vp8/common/blockd.c
vp8/common/blockd.h
vp8/common/coefupdateprobs.h
@@ -120,8 +119,8 @@ vp8/common/treecoder.c
vp8/common/treecoder.h
vp8/common/variance_c.c
vp8/common/variance.h
+vp8/common/vp8_asm_com_offsets.c
vp8/common/vp8_entropymodedata.h
-vp8/decoder/asm_dec_offsets.c
vp8/decoder/dboolhuff.c
vp8/decoder/dboolhuff.h
vp8/decoder/decodemv.c
@@ -134,6 +133,7 @@ vp8/decoder/onyxd_if.c
vp8/decoder/onyxd_int.h
vp8/decoder/threading.c
vp8/decoder/treereader.h
+vp8/decoder/vp8_asm_dec_offsets.c
vp8/encoder/arm/armv5te/boolhuff_armv5te.asm.s
vp8/encoder/arm/armv5te/vp8_packtokens_armv5.asm.s
vp8/encoder/arm/armv5te/vp8_packtokens_mbrow_armv5.asm.s
@@ -153,7 +153,6 @@ vp8/encoder/arm/neon/vp8_memcpy_neon.asm.s
vp8/encoder/arm/neon/vp8_mse16x16_neon.asm.s
vp8/encoder/arm/neon/vp8_shortwalsh4x4_neon.asm.s
vp8/encoder/arm/quantize_arm.c
-vp8/encoder/asm_enc_offsets.c
vp8/encoder/bitstream.c
vp8/encoder/bitstream.h
vp8/encoder/block.h
@@ -199,12 +198,92 @@ vp8/encoder/tokenize.c
vp8/encoder/tokenize.h
vp8/encoder/treewriter.c
vp8/encoder/treewriter.h
+vp8/encoder/vp8_asm_enc_offsets.c
vp8/vp8_common.mk
vp8/vp8cx_arm.mk
vp8/vp8_cx_iface.c
vp8/vp8cx.mk
vp8/vp8_dx_iface.c
vp8/vp8dx.mk
+vp9/common/generic/vp9_systemdependent.c
+vp9/common/vp9_alloccommon.c
+vp9/common/vp9_alloccommon.h
+vp9/common/vp9_asm_com_offsets.c
+vp9/common/vp9_blockd.h
+vp9/common/vp9_common.h
+vp9/common/vp9_convolve.c
+vp9/common/vp9_convolve.h
+vp9/common/vp9_debugmodes.c
+vp9/common/vp9_default_coef_probs.h
+vp9/common/vp9_entropy.c
+vp9/common/vp9_entropy.h
+vp9/common/vp9_entropymode.c
+vp9/common/vp9_entropymode.h
+vp9/common/vp9_entropymv.c
+vp9/common/vp9_entropymv.h
+vp9/common/vp9_enums.h
+vp9/common/vp9_extend.c
+vp9/common/vp9_extend.h
+vp9/common/vp9_filter.c
+vp9/common/vp9_filter.h
+vp9/common/vp9_findnearmv.c
+vp9/common/vp9_findnearmv.h
+vp9/common/vp9_idct.c
+vp9/common/vp9_idct.h
+vp9/common/vp9_loopfilter.c
+vp9/common/vp9_loopfilter_filters.c
+vp9/common/vp9_loopfilter.h
+vp9/common/vp9_mbpitch.c
+vp9/common/vp9_modecont.c
+vp9/common/vp9_modecontext.c
+vp9/common/vp9_modecont.h
+vp9/common/vp9_mv.h
+vp9/common/vp9_mvref_common.c
+vp9/common/vp9_mvref_common.h
+vp9/common/vp9_onyxc_int.h
+vp9/common/vp9_onyx.h
+vp9/common/vp9_ppflags.h
+vp9/common/vp9_pragmas.h
+vp9/common/vp9_pred_common.c
+vp9/common/vp9_pred_common.h
+vp9/common/vp9_quant_common.c
+vp9/common/vp9_quant_common.h
+vp9/common/vp9_reconinter.c
+vp9/common/vp9_reconinter.h
+vp9/common/vp9_reconintra.c
+vp9/common/vp9_reconintra.h
+vp9/common/vp9_rtcd.c
+vp9/common/vp9_rtcd_defs.sh
+vp9/common/vp9_sadmxn.h
+vp9/common/vp9_seg_common.c
+vp9/common/vp9_seg_common.h
+vp9/common/vp9_subpelvar.h
+vp9/common/vp9_systemdependent.h
+vp9/common/vp9_textblit.h
+vp9/common/vp9_tile_common.c
+vp9/common/vp9_tile_common.h
+vp9/common/vp9_treecoder.c
+vp9/common/vp9_treecoder.h
+vp9/decoder/vp9_asm_dec_offsets.c
+vp9/decoder/vp9_dboolhuff.c
+vp9/decoder/vp9_dboolhuff.h
+vp9/decoder/vp9_decodemv.c
+vp9/decoder/vp9_decodemv.h
+vp9/decoder/vp9_decodframe.c
+vp9/decoder/vp9_decodframe.h
+vp9/decoder/vp9_detokenize.c
+vp9/decoder/vp9_detokenize.h
+vp9/decoder/vp9_idct_blk.c
+vp9/decoder/vp9_idct_blk.h
+vp9/decoder/vp9_onyxd.h
+vp9/decoder/vp9_onyxd_if.c
+vp9/decoder/vp9_onyxd_int.h
+vp9/decoder/vp9_read_bit_buffer.h
+vp9/decoder/vp9_treereader.h
+vp9/vp9_common.mk
+vp9/vp9_dx_iface.c
+vp9/vp9dx.mk
+vp9/vp9_iface_common.h
vpx_config.c
vpx/internal/vpx_codec_internal.h
vpx_mem/include/vpx_mem_intrnl.h
@@ -214,7 +293,9 @@ vpx_mem/vpx_mem.mk
vpx_ports/arm_cpudetect.c
vpx_ports/arm.h
vpx_ports/asm_offsets.h
+vpx_ports/emmintrin_compat.h
vpx_ports/mem.h
+vpx_ports/vpx_once.h
vpx_ports/vpx_ports.mk
vpx_ports/vpx_timer.h
vpx_scale/arm/neon/vp8_vpxyv12_copyframe_func_neon.asm.s
@@ -223,13 +304,14 @@ vpx_scale/arm/neon/vp8_vpxyv12_copy_y_neon.asm.s
vpx_scale/arm/neon/vp8_vpxyv12_extendframeborders_neon.asm.s
vpx_scale/arm/neon/yv12extend_arm.c
vpx_scale/generic/gen_scalers.c
-vpx_scale/generic/vpxscale.c
+vpx_scale/generic/vpx_scale.c
vpx_scale/generic/yv12config.c
vpx_scale/generic/yv12extend.c
-vpx_scale/generic/yv12extend_generic.h
-vpx_scale/scale_mode.h
-vpx_scale/vpxscale.h
+vpx_scale/vpx_scale_asm_offsets.c
+vpx_scale/vpx_scale.h
vpx_scale/vpx_scale.mk
+vpx_scale/vpx_scale_rtcd.c
+vpx_scale/vpx_scale_rtcd.sh
vpx_scale/yv12config.h
vpx/src/vpx_codec.c
vpx/src/vpx_decoder.c
diff --git a/armv7a-neon/vpx_rtcd.h b/armv7a-neon/vp8_rtcd.h
index 914b08d..9cad64a 100644
--- a/armv7a-neon/vpx_rtcd.h
+++ b/armv7a-neon/vp8_rtcd.h
@@ -1,5 +1,5 @@
-#ifndef VPX_RTCD_
-#define VPX_RTCD_
+#ifndef VP8_RTCD_H_
+#define VP8_RTCD_H_
#ifdef RTCD_C
#define RTCD_EXTERN
@@ -7,7 +7,9 @@
#define RTCD_EXTERN extern
#endif
-#include "vp8/common/blockd.h"
+/*
+ * VP8
+ */
struct blockd;
struct macroblockd;
@@ -20,6 +22,9 @@ struct variance_vtable;
union int_mv;
struct yv12_buffer_config;
+void vp8_clear_system_state_c();
+#define vp8_clear_system_state vp8_clear_system_state_c
+
void vp8_dequantize_b_c(struct blockd*, short *dqc);
void vp8_dequantize_b_v6(struct blockd*, short *dqc);
void vp8_dequantize_b_neon(struct blockd*, short *dqc);
@@ -119,8 +124,8 @@ void vp8_build_intra_predictors_mby_s_c(struct macroblockd *x, unsigned char * y
void vp8_build_intra_predictors_mbuv_s_c(struct macroblockd *x, unsigned char * uabove_row, unsigned char * vabove_row, unsigned char *uleft, unsigned char *vleft, int left_stride, unsigned char * upred_ptr, unsigned char * vpred_ptr, int pred_stride);
#define vp8_build_intra_predictors_mbuv_s vp8_build_intra_predictors_mbuv_s_c
-void vp8_intra4x4_predict_c(unsigned char *Above, unsigned char *yleft, int left_stride, B_PREDICTION_MODE b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
-void vp8_intra4x4_predict_armv6(unsigned char *Above, unsigned char *yleft, int left_stride, B_PREDICTION_MODE b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
+void vp8_intra4x4_predict_c(unsigned char *Above, unsigned char *yleft, int left_stride, int b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
+void vp8_intra4x4_predict_armv6(unsigned char *Above, unsigned char *yleft, int left_stride, int b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
#define vp8_intra4x4_predict vp8_intra4x4_predict_armv6
void vp8_sixtap_predict16x16_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
@@ -381,85 +386,7 @@ void vp8_yv12_copy_partial_frame_neon(struct yv12_buffer_config *src_ybc, struct
int vp8_denoiser_filter_c(struct yv12_buffer_config* mc_running_avg, struct yv12_buffer_config* running_avg, struct macroblock* signal, unsigned int motion_magnitude2, int y_offset, int uv_offset);
#define vp8_denoiser_filter vp8_denoiser_filter_c
-void vp8_horizontal_line_4_5_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_4_5_scale vp8_horizontal_line_4_5_scale_c
-
-void vp8_vertical_band_4_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_4_5_scale vp8_vertical_band_4_5_scale_c
-
-void vp8_last_vertical_band_4_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_4_5_scale vp8_last_vertical_band_4_5_scale_c
-
-void vp8_horizontal_line_2_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_2_3_scale vp8_horizontal_line_2_3_scale_c
-
-void vp8_vertical_band_2_3_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_3_scale vp8_vertical_band_2_3_scale_c
-
-void vp8_last_vertical_band_2_3_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_2_3_scale vp8_last_vertical_band_2_3_scale_c
-
-void vp8_horizontal_line_3_5_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_3_5_scale vp8_horizontal_line_3_5_scale_c
-
-void vp8_vertical_band_3_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_3_5_scale vp8_vertical_band_3_5_scale_c
-
-void vp8_last_vertical_band_3_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_3_5_scale vp8_last_vertical_band_3_5_scale_c
-
-void vp8_horizontal_line_3_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_3_4_scale vp8_horizontal_line_3_4_scale_c
-
-void vp8_vertical_band_3_4_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_3_4_scale vp8_vertical_band_3_4_scale_c
-
-void vp8_last_vertical_band_3_4_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_3_4_scale vp8_last_vertical_band_3_4_scale_c
-
-void vp8_horizontal_line_1_2_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_1_2_scale vp8_horizontal_line_1_2_scale_c
-
-void vp8_vertical_band_1_2_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_1_2_scale vp8_vertical_band_1_2_scale_c
-
-void vp8_last_vertical_band_1_2_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_1_2_scale vp8_last_vertical_band_1_2_scale_c
-
-void vp8_horizontal_line_5_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_5_4_scale vp8_horizontal_line_5_4_scale_c
-
-void vp8_vertical_band_5_4_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_5_4_scale vp8_vertical_band_5_4_scale_c
-
-void vp8_horizontal_line_5_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_5_3_scale vp8_horizontal_line_5_3_scale_c
-
-void vp8_vertical_band_5_3_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_5_3_scale vp8_vertical_band_5_3_scale_c
-
-void vp8_horizontal_line_2_1_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_2_1_scale vp8_horizontal_line_2_1_scale_c
-
-void vp8_vertical_band_2_1_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_1_scale vp8_vertical_band_2_1_scale_c
-
-void vp8_vertical_band_2_1_scale_i_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_1_scale_i vp8_vertical_band_2_1_scale_i_c
-
-void vp8_yv12_extend_frame_borders_c(struct yv12_buffer_config *ybf);
-void vp8_yv12_extend_frame_borders_neon(struct yv12_buffer_config *ybf);
-#define vp8_yv12_extend_frame_borders vp8_yv12_extend_frame_borders_neon
-
-void vp8_yv12_copy_frame_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
-void vp8_yv12_copy_frame_neon(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
-#define vp8_yv12_copy_frame vp8_yv12_copy_frame_neon
-
-void vp8_yv12_copy_y_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
-void vp8_yv12_copy_y_neon(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
-#define vp8_yv12_copy_y vp8_yv12_copy_y_neon
-
-void vpx_rtcd(void);
+void vp8_rtcd(void);
#include "vpx_config.h"
#ifdef RTCD_C
diff --git a/armv7a-neon/vp9_rtcd.h b/armv7a-neon/vp9_rtcd.h
new file mode 100644
index 0000000..cc3c834
--- /dev/null
+++ b/armv7a-neon/vp9_rtcd.h
@@ -0,0 +1,190 @@
+#ifndef VP9_RTCD_H_
+#define VP9_RTCD_H_
+
+#ifdef RTCD_C
+#define RTCD_EXTERN
+#else
+#define RTCD_EXTERN extern
+#endif
+
+/*
+ * VP9
+ */
+
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_enums.h"
+
+struct loop_filter_info;
+struct macroblockd;
+struct loop_filter_info;
+
+/* Encoder forward decls */
+struct macroblock;
+struct vp9_variance_vtable;
+
+#define DEC_MVCOSTS int *mvjcost, int *mvcost[2]
+union int_mv;
+struct yv12_buffer_config;
+
+void vp9_idct_add_16x16_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add_16x16 vp9_idct_add_16x16_c
+
+void vp9_idct_add_8x8_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add_8x8 vp9_idct_add_8x8_c
+
+void vp9_idct_add_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add vp9_idct_add_c
+
+void vp9_idct_add_32x32_c(int16_t *q, uint8_t *dst, int stride, int eob);
+#define vp9_idct_add_32x32 vp9_idct_add_32x32_c
+
+void vp9_copy_mem16x16_c(const uint8_t *src, int src_pitch, uint8_t *dst, int dst_pitch);
+#define vp9_copy_mem16x16 vp9_copy_mem16x16_c
+
+void vp9_copy_mem8x8_c(const uint8_t *src, int src_pitch, uint8_t *dst, int dst_pitch);
+#define vp9_copy_mem8x8 vp9_copy_mem8x8_c
+
+void vp9_copy_mem8x4_c(const uint8_t *src, int src_pitch, uint8_t *dst, int dst_pitch);
+#define vp9_copy_mem8x4 vp9_copy_mem8x4_c
+
+void vp9_build_intra_predictors_c(uint8_t *src, int src_stride, uint8_t *pred, int y_stride, int mode, int bw, int bh, int up_available, int left_available, int right_available);
+#define vp9_build_intra_predictors vp9_build_intra_predictors_c
+
+void vp9_build_intra_predictors_sby_s_c(struct macroblockd *x, enum BLOCK_SIZE_TYPE bsize);
+#define vp9_build_intra_predictors_sby_s vp9_build_intra_predictors_sby_s_c
+
+void vp9_build_intra_predictors_sbuv_s_c(struct macroblockd *x, enum BLOCK_SIZE_TYPE bsize);
+#define vp9_build_intra_predictors_sbuv_s vp9_build_intra_predictors_sbuv_s_c
+
+void vp9_intra4x4_predict_c(struct macroblockd *xd, int block, enum BLOCK_SIZE_TYPE bsize, int b_mode, uint8_t *predictor, int pre_stride);
+#define vp9_intra4x4_predict vp9_intra4x4_predict_c
+
+void vp9_add_constant_residual_8x8_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_8x8 vp9_add_constant_residual_8x8_c
+
+void vp9_add_constant_residual_16x16_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_16x16 vp9_add_constant_residual_16x16_c
+
+void vp9_add_constant_residual_32x32_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_32x32 vp9_add_constant_residual_32x32_c
+
+void vp9_mb_lpf_vertical_edge_w_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
+#define vp9_mb_lpf_vertical_edge_w vp9_mb_lpf_vertical_edge_w_c
+
+void vp9_mbloop_filter_vertical_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_mbloop_filter_vertical_edge vp9_mbloop_filter_vertical_edge_c
+
+void vp9_loop_filter_vertical_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_loop_filter_vertical_edge vp9_loop_filter_vertical_edge_c
+
+void vp9_mb_lpf_horizontal_edge_w_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
+#define vp9_mb_lpf_horizontal_edge_w vp9_mb_lpf_horizontal_edge_w_c
+
+void vp9_mbloop_filter_horizontal_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_mbloop_filter_horizontal_edge vp9_mbloop_filter_horizontal_edge_c
+
+void vp9_loop_filter_horizontal_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_loop_filter_horizontal_edge vp9_loop_filter_horizontal_edge_c
+
+void vp9_blend_mb_inner_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_mb_inner vp9_blend_mb_inner_c
+
+void vp9_blend_mb_outer_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_mb_outer vp9_blend_mb_outer_c
+
+void vp9_blend_b_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_b vp9_blend_b_c
+
+void vp9_convolve8_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8 vp9_convolve8_c
+
+void vp9_convolve8_horiz_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_horiz vp9_convolve8_horiz_c
+
+void vp9_convolve8_vert_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_vert vp9_convolve8_vert_c
+
+void vp9_convolve8_avg_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg vp9_convolve8_avg_c
+
+void vp9_convolve8_avg_horiz_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg_horiz vp9_convolve8_avg_horiz_c
+
+void vp9_convolve8_avg_vert_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg_vert vp9_convolve8_avg_vert_c
+
+void vp9_short_idct4x4_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct4x4_1_add vp9_short_idct4x4_1_add_c
+
+void vp9_short_idct4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct4x4_add vp9_short_idct4x4_add_c
+
+void vp9_short_idct8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct8x8_add vp9_short_idct8x8_add_c
+
+void vp9_short_idct10_8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct10_8x8_add vp9_short_idct10_8x8_add_c
+
+void vp9_short_idct1_8x8_c(int16_t *input, int16_t *output);
+#define vp9_short_idct1_8x8 vp9_short_idct1_8x8_c
+
+void vp9_short_idct16x16_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct16x16_add vp9_short_idct16x16_add_c
+
+void vp9_short_idct10_16x16_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct10_16x16_add vp9_short_idct10_16x16_add_c
+
+void vp9_short_idct1_16x16_c(int16_t *input, int16_t *output);
+#define vp9_short_idct1_16x16 vp9_short_idct1_16x16_c
+
+void vp9_short_idct32x32_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct32x32_add vp9_short_idct32x32_add_c
+
+void vp9_short_idct1_32x32_c(int16_t *input, int16_t *output);
+#define vp9_short_idct1_32x32 vp9_short_idct1_32x32_c
+
+void vp9_short_idct10_32x32_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct10_32x32_add vp9_short_idct10_32x32_add_c
+
+void vp9_short_iht4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride, int tx_type);
+#define vp9_short_iht4x4_add vp9_short_iht4x4_add_c
+
+void vp9_short_iht8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride, int tx_type);
+#define vp9_short_iht8x8_add vp9_short_iht8x8_add_c
+
+void vp9_short_iht16x16_add_c(int16_t *input, uint8_t *output, int pitch, int tx_type);
+#define vp9_short_iht16x16_add vp9_short_iht16x16_add_c
+
+void vp9_idct4_1d_c(int16_t *input, int16_t *output);
+#define vp9_idct4_1d vp9_idct4_1d_c
+
+void vp9_dc_only_idct_add_c(int input_dc, uint8_t *pred_ptr, uint8_t *dst_ptr, int pitch, int stride);
+#define vp9_dc_only_idct_add vp9_dc_only_idct_add_c
+
+void vp9_short_iwalsh4x4_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_iwalsh4x4_1_add vp9_short_iwalsh4x4_1_add_c
+
+void vp9_short_iwalsh4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_iwalsh4x4_add vp9_short_iwalsh4x4_add_c
+
+unsigned int vp9_sad32x3_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, int max_sad);
+#define vp9_sad32x3 vp9_sad32x3_c
+
+unsigned int vp9_sad3x32_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, int max_sad);
+#define vp9_sad3x32 vp9_sad3x32_c
+
+void vp9_rtcd(void);
+#include "vpx_config.h"
+
+#ifdef RTCD_C
+#include "vpx_ports/arm.h"
+static void setup_rtcd_internal(void)
+{
+ int flags = arm_cpu_caps();
+
+ (void)flags;
+
+
+}
+#endif
+#endif
diff --git a/armv7a-neon/vpx_config.c b/armv7a-neon/vpx_config.c
index 863c84c..77be6fb 100644
--- a/armv7a-neon/vpx_config.c
+++ b/armv7a-neon/vpx_config.c
@@ -5,5 +5,5 @@
/* tree. An additional intellectual property rights grant can be found */
/* in the file PATENTS. All contributing project authors may */
/* be found in the AUTHORS file in the root of the source tree. */
-static const char* const cfg = "--force-target=armv7-android-gcc --disable-runtime-cpu-detect --sdk-path=/usr/local/google/home/johannkoenig/android-ndk --disable-examples --disable-docs --enable-realtime-only";
+static const char* const cfg = "--target=armv7-android-gcc --disable-runtime-cpu-detect --sdk-path=/usr/local/google/home/johannkoenig/android-ndk --disable-vp9-encoder --disable-examples --disable-docs --enable-realtime-only";
const char *vpx_codec_build_config(void) {return cfg;}
diff --git a/armv7a-neon/vpx_config.h b/armv7a-neon/vpx_config.h
index b3179e5..a808f7c 100644
--- a/armv7a-neon/vpx_config.h
+++ b/armv7a-neon/vpx_config.h
@@ -9,6 +9,7 @@
#ifndef VPX_CONFIG_H
#define VPX_CONFIG_H
#define RESTRICT
+#define INLINE __inline__ __attribute__((always_inline))
#define ARCH_ARM 1
#define ARCH_MIPS 0
#define ARCH_X86 0
@@ -34,7 +35,7 @@
#define HAVE_SYS_MMAN_H 1
#define HAVE_UNISTD_H 1
#define CONFIG_EXTERNAL_BUILD 0
-#define CONFIG_INSTALL_DOCS 1
+#define CONFIG_INSTALL_DOCS 0
#define CONFIG_INSTALL_BINS 1
#define CONFIG_INSTALL_LIBS 1
#define CONFIG_INSTALL_SRCS 0
@@ -61,7 +62,10 @@
#define CONFIG_INTERNAL_STATS 0
#define CONFIG_VP8_ENCODER 1
#define CONFIG_VP8_DECODER 1
+#define CONFIG_VP9_ENCODER 0
+#define CONFIG_VP9_DECODER 1
#define CONFIG_VP8 1
+#define CONFIG_VP9 1
#define CONFIG_ENCODERS 1
#define CONFIG_DECODERS 1
#define CONFIG_STATIC_MSVCRT 0
@@ -77,4 +81,11 @@
#define CONFIG_UNIT_TESTS 0
#define CONFIG_MULTI_RES_ENCODING 0
#define CONFIG_TEMPORAL_DENOISING 1
+#define CONFIG_EXPERIMENTAL 0
+#define CONFIG_DECRYPT 0
+#define CONFIG_ONESHOTQ 0
+#define CONFIG_MULTIPLE_ARF 0
+#define CONFIG_NON420 0
+#define CONFIG_ALPHA 0
+#define CONFIG_BALANCED_COEFTREE 0
#endif /* VPX_CONFIG_H */
diff --git a/armv7a-neon/vpx_scale_rtcd.h b/armv7a-neon/vpx_scale_rtcd.h
new file mode 100644
index 0000000..ed84626
--- /dev/null
+++ b/armv7a-neon/vpx_scale_rtcd.h
@@ -0,0 +1,62 @@
+#ifndef VPX_SCALE_RTCD_H_
+#define VPX_SCALE_RTCD_H_
+
+#ifdef RTCD_C
+#define RTCD_EXTERN
+#else
+#define RTCD_EXTERN extern
+#endif
+
+struct yv12_buffer_config;
+
+void vp8_horizontal_line_5_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_5_4_scale vp8_horizontal_line_5_4_scale_c
+
+void vp8_vertical_band_5_4_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_5_4_scale vp8_vertical_band_5_4_scale_c
+
+void vp8_horizontal_line_5_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_5_3_scale vp8_horizontal_line_5_3_scale_c
+
+void vp8_vertical_band_5_3_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_5_3_scale vp8_vertical_band_5_3_scale_c
+
+void vp8_horizontal_line_2_1_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_2_1_scale vp8_horizontal_line_2_1_scale_c
+
+void vp8_vertical_band_2_1_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_2_1_scale vp8_vertical_band_2_1_scale_c
+
+void vp8_vertical_band_2_1_scale_i_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_2_1_scale_i vp8_vertical_band_2_1_scale_i_c
+
+void vp8_yv12_extend_frame_borders_c(struct yv12_buffer_config *ybf);
+void vp8_yv12_extend_frame_borders_neon(struct yv12_buffer_config *ybf);
+#define vp8_yv12_extend_frame_borders vp8_yv12_extend_frame_borders_neon
+
+void vp8_yv12_copy_frame_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
+void vp8_yv12_copy_frame_neon(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
+#define vp8_yv12_copy_frame vp8_yv12_copy_frame_neon
+
+void vp8_yv12_copy_y_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
+void vp8_yv12_copy_y_neon(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
+#define vp8_yv12_copy_y vp8_yv12_copy_y_neon
+
+void vp9_extend_frame_borders_c(struct yv12_buffer_config *ybf, int subsampling_x, int subsampling_y);
+#define vp9_extend_frame_borders vp9_extend_frame_borders_c
+
+void vpx_scale_rtcd(void);
+#include "vpx_config.h"
+
+#ifdef RTCD_C
+#include "vpx_ports/arm.h"
+static void setup_rtcd_internal(void)
+{
+ int flags = arm_cpu_caps();
+
+ (void)flags;
+
+
+}
+#endif
+#endif
diff --git a/armv7a-neon/vpx_version.h b/armv7a-neon/vpx_version.h
index 663dd49..512851c 100644
--- a/armv7a-neon/vpx_version.h
+++ b/armv7a-neon/vpx_version.h
@@ -1,7 +1,7 @@
#define VERSION_MAJOR 1
-#define VERSION_MINOR 1
+#define VERSION_MINOR 2
#define VERSION_PATCH 0
#define VERSION_EXTRA ""
#define VERSION_PACKED ((VERSION_MAJOR<<16)|(VERSION_MINOR<<8)|(VERSION_PATCH))
-#define VERSION_STRING_NOSP "v1.1.0"
-#define VERSION_STRING " v1.1.0"
+#define VERSION_STRING_NOSP "v1.2.0"
+#define VERSION_STRING " v1.2.0"
diff --git a/armv7a/libvpx_srcs.txt b/armv7a/libvpx_srcs.txt
index e63834d..bab4901 100644
--- a/armv7a/libvpx_srcs.txt
+++ b/armv7a/libvpx_srcs.txt
@@ -32,7 +32,6 @@ vp8/common/arm/filter_arm.c
vp8/common/arm/loopfilter_arm.c
vp8/common/arm/reconintra_arm.c
vp8/common/arm/variance_arm.c
-vp8/common/asm_com_offsets.c
vp8/common/blockd.c
vp8/common/blockd.h
vp8/common/coefupdateprobs.h
@@ -89,8 +88,8 @@ vp8/common/treecoder.c
vp8/common/treecoder.h
vp8/common/variance_c.c
vp8/common/variance.h
+vp8/common/vp8_asm_com_offsets.c
vp8/common/vp8_entropymodedata.h
-vp8/decoder/asm_dec_offsets.c
vp8/decoder/dboolhuff.c
vp8/decoder/dboolhuff.h
vp8/decoder/decodemv.c
@@ -103,6 +102,7 @@ vp8/decoder/onyxd_if.c
vp8/decoder/onyxd_int.h
vp8/decoder/threading.c
vp8/decoder/treereader.h
+vp8/decoder/vp8_asm_dec_offsets.c
vp8/encoder/arm/armv5te/boolhuff_armv5te.asm.s
vp8/encoder/arm/armv5te/vp8_packtokens_armv5.asm.s
vp8/encoder/arm/armv5te/vp8_packtokens_mbrow_armv5.asm.s
@@ -115,7 +115,6 @@ vp8/encoder/arm/armv6/walsh_v6.asm.s
vp8/encoder/arm/boolhuff_arm.c
vp8/encoder/arm/dct_arm.c
vp8/encoder/arm/quantize_arm.c
-vp8/encoder/asm_enc_offsets.c
vp8/encoder/bitstream.c
vp8/encoder/bitstream.h
vp8/encoder/block.h
@@ -161,12 +160,92 @@ vp8/encoder/tokenize.c
vp8/encoder/tokenize.h
vp8/encoder/treewriter.c
vp8/encoder/treewriter.h
+vp8/encoder/vp8_asm_enc_offsets.c
vp8/vp8_common.mk
vp8/vp8cx_arm.mk
vp8/vp8_cx_iface.c
vp8/vp8cx.mk
vp8/vp8_dx_iface.c
vp8/vp8dx.mk
+vp9/common/generic/vp9_systemdependent.c
+vp9/common/vp9_alloccommon.c
+vp9/common/vp9_alloccommon.h
+vp9/common/vp9_asm_com_offsets.c
+vp9/common/vp9_blockd.h
+vp9/common/vp9_common.h
+vp9/common/vp9_convolve.c
+vp9/common/vp9_convolve.h
+vp9/common/vp9_debugmodes.c
+vp9/common/vp9_default_coef_probs.h
+vp9/common/vp9_entropy.c
+vp9/common/vp9_entropy.h
+vp9/common/vp9_entropymode.c
+vp9/common/vp9_entropymode.h
+vp9/common/vp9_entropymv.c
+vp9/common/vp9_entropymv.h
+vp9/common/vp9_enums.h
+vp9/common/vp9_extend.c
+vp9/common/vp9_extend.h
+vp9/common/vp9_filter.c
+vp9/common/vp9_filter.h
+vp9/common/vp9_findnearmv.c
+vp9/common/vp9_findnearmv.h
+vp9/common/vp9_idct.c
+vp9/common/vp9_idct.h
+vp9/common/vp9_loopfilter.c
+vp9/common/vp9_loopfilter_filters.c
+vp9/common/vp9_loopfilter.h
+vp9/common/vp9_mbpitch.c
+vp9/common/vp9_modecont.c
+vp9/common/vp9_modecontext.c
+vp9/common/vp9_modecont.h
+vp9/common/vp9_mv.h
+vp9/common/vp9_mvref_common.c
+vp9/common/vp9_mvref_common.h
+vp9/common/vp9_onyxc_int.h
+vp9/common/vp9_onyx.h
+vp9/common/vp9_ppflags.h
+vp9/common/vp9_pragmas.h
+vp9/common/vp9_pred_common.c
+vp9/common/vp9_pred_common.h
+vp9/common/vp9_quant_common.c
+vp9/common/vp9_quant_common.h
+vp9/common/vp9_reconinter.c
+vp9/common/vp9_reconinter.h
+vp9/common/vp9_reconintra.c
+vp9/common/vp9_reconintra.h
+vp9/common/vp9_rtcd.c
+vp9/common/vp9_rtcd_defs.sh
+vp9/common/vp9_sadmxn.h
+vp9/common/vp9_seg_common.c
+vp9/common/vp9_seg_common.h
+vp9/common/vp9_subpelvar.h
+vp9/common/vp9_systemdependent.h
+vp9/common/vp9_textblit.h
+vp9/common/vp9_tile_common.c
+vp9/common/vp9_tile_common.h
+vp9/common/vp9_treecoder.c
+vp9/common/vp9_treecoder.h
+vp9/decoder/vp9_asm_dec_offsets.c
+vp9/decoder/vp9_dboolhuff.c
+vp9/decoder/vp9_dboolhuff.h
+vp9/decoder/vp9_decodemv.c
+vp9/decoder/vp9_decodemv.h
+vp9/decoder/vp9_decodframe.c
+vp9/decoder/vp9_decodframe.h
+vp9/decoder/vp9_detokenize.c
+vp9/decoder/vp9_detokenize.h
+vp9/decoder/vp9_idct_blk.c
+vp9/decoder/vp9_idct_blk.h
+vp9/decoder/vp9_onyxd.h
+vp9/decoder/vp9_onyxd_if.c
+vp9/decoder/vp9_onyxd_int.h
+vp9/decoder/vp9_read_bit_buffer.h
+vp9/decoder/vp9_treereader.h
+vp9/vp9_common.mk
+vp9/vp9_dx_iface.c
+vp9/vp9dx.mk
+vp9/vp9_iface_common.h
vpx_config.c
vpx/internal/vpx_codec_internal.h
vpx_mem/include/vpx_mem_intrnl.h
@@ -176,17 +255,20 @@ vpx_mem/vpx_mem.mk
vpx_ports/arm_cpudetect.c
vpx_ports/arm.h
vpx_ports/asm_offsets.h
+vpx_ports/emmintrin_compat.h
vpx_ports/mem.h
+vpx_ports/vpx_once.h
vpx_ports/vpx_ports.mk
vpx_ports/vpx_timer.h
vpx_scale/generic/gen_scalers.c
-vpx_scale/generic/vpxscale.c
+vpx_scale/generic/vpx_scale.c
vpx_scale/generic/yv12config.c
vpx_scale/generic/yv12extend.c
-vpx_scale/generic/yv12extend_generic.h
-vpx_scale/scale_mode.h
-vpx_scale/vpxscale.h
+vpx_scale/vpx_scale_asm_offsets.c
+vpx_scale/vpx_scale.h
vpx_scale/vpx_scale.mk
+vpx_scale/vpx_scale_rtcd.c
+vpx_scale/vpx_scale_rtcd.sh
vpx_scale/yv12config.h
vpx/src/vpx_codec.c
vpx/src/vpx_decoder.c
diff --git a/armv7a/vpx_rtcd.h b/armv7a/vp8_rtcd.h
index 6553876..fa79b13 100644
--- a/armv7a/vpx_rtcd.h
+++ b/armv7a/vp8_rtcd.h
@@ -1,5 +1,5 @@
-#ifndef VPX_RTCD_
-#define VPX_RTCD_
+#ifndef VP8_RTCD_H_
+#define VP8_RTCD_H_
#ifdef RTCD_C
#define RTCD_EXTERN
@@ -7,7 +7,9 @@
#define RTCD_EXTERN extern
#endif
-#include "vp8/common/blockd.h"
+/*
+ * VP8
+ */
struct blockd;
struct macroblockd;
@@ -20,6 +22,9 @@ struct variance_vtable;
union int_mv;
struct yv12_buffer_config;
+void vp8_clear_system_state_c();
+#define vp8_clear_system_state vp8_clear_system_state_c
+
void vp8_dequantize_b_c(struct blockd*, short *dqc);
void vp8_dequantize_b_v6(struct blockd*, short *dqc);
#define vp8_dequantize_b vp8_dequantize_b_v6
@@ -101,8 +106,8 @@ void vp8_build_intra_predictors_mby_s_c(struct macroblockd *x, unsigned char * y
void vp8_build_intra_predictors_mbuv_s_c(struct macroblockd *x, unsigned char * uabove_row, unsigned char * vabove_row, unsigned char *uleft, unsigned char *vleft, int left_stride, unsigned char * upred_ptr, unsigned char * vpred_ptr, int pred_stride);
#define vp8_build_intra_predictors_mbuv_s vp8_build_intra_predictors_mbuv_s_c
-void vp8_intra4x4_predict_c(unsigned char *Above, unsigned char *yleft, int left_stride, B_PREDICTION_MODE b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
-void vp8_intra4x4_predict_armv6(unsigned char *Above, unsigned char *yleft, int left_stride, B_PREDICTION_MODE b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
+void vp8_intra4x4_predict_c(unsigned char *Above, unsigned char *yleft, int left_stride, int b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
+void vp8_intra4x4_predict_armv6(unsigned char *Above, unsigned char *yleft, int left_stride, int b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
#define vp8_intra4x4_predict vp8_intra4x4_predict_armv6
void vp8_sixtap_predict16x16_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
@@ -327,82 +332,7 @@ void vp8_yv12_copy_partial_frame_c(struct yv12_buffer_config *src_ybc, struct yv
int vp8_denoiser_filter_c(struct yv12_buffer_config* mc_running_avg, struct yv12_buffer_config* running_avg, struct macroblock* signal, unsigned int motion_magnitude2, int y_offset, int uv_offset);
#define vp8_denoiser_filter vp8_denoiser_filter_c
-void vp8_horizontal_line_4_5_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_4_5_scale vp8_horizontal_line_4_5_scale_c
-
-void vp8_vertical_band_4_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_4_5_scale vp8_vertical_band_4_5_scale_c
-
-void vp8_last_vertical_band_4_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_4_5_scale vp8_last_vertical_band_4_5_scale_c
-
-void vp8_horizontal_line_2_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_2_3_scale vp8_horizontal_line_2_3_scale_c
-
-void vp8_vertical_band_2_3_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_3_scale vp8_vertical_band_2_3_scale_c
-
-void vp8_last_vertical_band_2_3_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_2_3_scale vp8_last_vertical_band_2_3_scale_c
-
-void vp8_horizontal_line_3_5_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_3_5_scale vp8_horizontal_line_3_5_scale_c
-
-void vp8_vertical_band_3_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_3_5_scale vp8_vertical_band_3_5_scale_c
-
-void vp8_last_vertical_band_3_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_3_5_scale vp8_last_vertical_band_3_5_scale_c
-
-void vp8_horizontal_line_3_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_3_4_scale vp8_horizontal_line_3_4_scale_c
-
-void vp8_vertical_band_3_4_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_3_4_scale vp8_vertical_band_3_4_scale_c
-
-void vp8_last_vertical_band_3_4_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_3_4_scale vp8_last_vertical_band_3_4_scale_c
-
-void vp8_horizontal_line_1_2_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_1_2_scale vp8_horizontal_line_1_2_scale_c
-
-void vp8_vertical_band_1_2_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_1_2_scale vp8_vertical_band_1_2_scale_c
-
-void vp8_last_vertical_band_1_2_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_1_2_scale vp8_last_vertical_band_1_2_scale_c
-
-void vp8_horizontal_line_5_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_5_4_scale vp8_horizontal_line_5_4_scale_c
-
-void vp8_vertical_band_5_4_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_5_4_scale vp8_vertical_band_5_4_scale_c
-
-void vp8_horizontal_line_5_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_5_3_scale vp8_horizontal_line_5_3_scale_c
-
-void vp8_vertical_band_5_3_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_5_3_scale vp8_vertical_band_5_3_scale_c
-
-void vp8_horizontal_line_2_1_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_2_1_scale vp8_horizontal_line_2_1_scale_c
-
-void vp8_vertical_band_2_1_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_1_scale vp8_vertical_band_2_1_scale_c
-
-void vp8_vertical_band_2_1_scale_i_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_1_scale_i vp8_vertical_band_2_1_scale_i_c
-
-void vp8_yv12_extend_frame_borders_c(struct yv12_buffer_config *ybf);
-#define vp8_yv12_extend_frame_borders vp8_yv12_extend_frame_borders_c
-
-void vp8_yv12_copy_frame_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
-#define vp8_yv12_copy_frame vp8_yv12_copy_frame_c
-
-void vp8_yv12_copy_y_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
-#define vp8_yv12_copy_y vp8_yv12_copy_y_c
-
-void vpx_rtcd(void);
+void vp8_rtcd(void);
#include "vpx_config.h"
#ifdef RTCD_C
diff --git a/armv7a/vp9_rtcd.h b/armv7a/vp9_rtcd.h
new file mode 100644
index 0000000..cc3c834
--- /dev/null
+++ b/armv7a/vp9_rtcd.h
@@ -0,0 +1,190 @@
+#ifndef VP9_RTCD_H_
+#define VP9_RTCD_H_
+
+#ifdef RTCD_C
+#define RTCD_EXTERN
+#else
+#define RTCD_EXTERN extern
+#endif
+
+/*
+ * VP9
+ */
+
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_enums.h"
+
+struct loop_filter_info;
+struct macroblockd;
+struct loop_filter_info;
+
+/* Encoder forward decls */
+struct macroblock;
+struct vp9_variance_vtable;
+
+#define DEC_MVCOSTS int *mvjcost, int *mvcost[2]
+union int_mv;
+struct yv12_buffer_config;
+
+void vp9_idct_add_16x16_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add_16x16 vp9_idct_add_16x16_c
+
+void vp9_idct_add_8x8_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add_8x8 vp9_idct_add_8x8_c
+
+void vp9_idct_add_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add vp9_idct_add_c
+
+void vp9_idct_add_32x32_c(int16_t *q, uint8_t *dst, int stride, int eob);
+#define vp9_idct_add_32x32 vp9_idct_add_32x32_c
+
+void vp9_copy_mem16x16_c(const uint8_t *src, int src_pitch, uint8_t *dst, int dst_pitch);
+#define vp9_copy_mem16x16 vp9_copy_mem16x16_c
+
+void vp9_copy_mem8x8_c(const uint8_t *src, int src_pitch, uint8_t *dst, int dst_pitch);
+#define vp9_copy_mem8x8 vp9_copy_mem8x8_c
+
+void vp9_copy_mem8x4_c(const uint8_t *src, int src_pitch, uint8_t *dst, int dst_pitch);
+#define vp9_copy_mem8x4 vp9_copy_mem8x4_c
+
+void vp9_build_intra_predictors_c(uint8_t *src, int src_stride, uint8_t *pred, int y_stride, int mode, int bw, int bh, int up_available, int left_available, int right_available);
+#define vp9_build_intra_predictors vp9_build_intra_predictors_c
+
+void vp9_build_intra_predictors_sby_s_c(struct macroblockd *x, enum BLOCK_SIZE_TYPE bsize);
+#define vp9_build_intra_predictors_sby_s vp9_build_intra_predictors_sby_s_c
+
+void vp9_build_intra_predictors_sbuv_s_c(struct macroblockd *x, enum BLOCK_SIZE_TYPE bsize);
+#define vp9_build_intra_predictors_sbuv_s vp9_build_intra_predictors_sbuv_s_c
+
+void vp9_intra4x4_predict_c(struct macroblockd *xd, int block, enum BLOCK_SIZE_TYPE bsize, int b_mode, uint8_t *predictor, int pre_stride);
+#define vp9_intra4x4_predict vp9_intra4x4_predict_c
+
+void vp9_add_constant_residual_8x8_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_8x8 vp9_add_constant_residual_8x8_c
+
+void vp9_add_constant_residual_16x16_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_16x16 vp9_add_constant_residual_16x16_c
+
+void vp9_add_constant_residual_32x32_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_32x32 vp9_add_constant_residual_32x32_c
+
+void vp9_mb_lpf_vertical_edge_w_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
+#define vp9_mb_lpf_vertical_edge_w vp9_mb_lpf_vertical_edge_w_c
+
+void vp9_mbloop_filter_vertical_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_mbloop_filter_vertical_edge vp9_mbloop_filter_vertical_edge_c
+
+void vp9_loop_filter_vertical_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_loop_filter_vertical_edge vp9_loop_filter_vertical_edge_c
+
+void vp9_mb_lpf_horizontal_edge_w_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
+#define vp9_mb_lpf_horizontal_edge_w vp9_mb_lpf_horizontal_edge_w_c
+
+void vp9_mbloop_filter_horizontal_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_mbloop_filter_horizontal_edge vp9_mbloop_filter_horizontal_edge_c
+
+void vp9_loop_filter_horizontal_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_loop_filter_horizontal_edge vp9_loop_filter_horizontal_edge_c
+
+void vp9_blend_mb_inner_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_mb_inner vp9_blend_mb_inner_c
+
+void vp9_blend_mb_outer_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_mb_outer vp9_blend_mb_outer_c
+
+void vp9_blend_b_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_b vp9_blend_b_c
+
+void vp9_convolve8_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8 vp9_convolve8_c
+
+void vp9_convolve8_horiz_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_horiz vp9_convolve8_horiz_c
+
+void vp9_convolve8_vert_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_vert vp9_convolve8_vert_c
+
+void vp9_convolve8_avg_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg vp9_convolve8_avg_c
+
+void vp9_convolve8_avg_horiz_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg_horiz vp9_convolve8_avg_horiz_c
+
+void vp9_convolve8_avg_vert_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg_vert vp9_convolve8_avg_vert_c
+
+void vp9_short_idct4x4_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct4x4_1_add vp9_short_idct4x4_1_add_c
+
+void vp9_short_idct4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct4x4_add vp9_short_idct4x4_add_c
+
+void vp9_short_idct8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct8x8_add vp9_short_idct8x8_add_c
+
+void vp9_short_idct10_8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct10_8x8_add vp9_short_idct10_8x8_add_c
+
+void vp9_short_idct1_8x8_c(int16_t *input, int16_t *output);
+#define vp9_short_idct1_8x8 vp9_short_idct1_8x8_c
+
+void vp9_short_idct16x16_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct16x16_add vp9_short_idct16x16_add_c
+
+void vp9_short_idct10_16x16_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct10_16x16_add vp9_short_idct10_16x16_add_c
+
+void vp9_short_idct1_16x16_c(int16_t *input, int16_t *output);
+#define vp9_short_idct1_16x16 vp9_short_idct1_16x16_c
+
+void vp9_short_idct32x32_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct32x32_add vp9_short_idct32x32_add_c
+
+void vp9_short_idct1_32x32_c(int16_t *input, int16_t *output);
+#define vp9_short_idct1_32x32 vp9_short_idct1_32x32_c
+
+void vp9_short_idct10_32x32_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct10_32x32_add vp9_short_idct10_32x32_add_c
+
+void vp9_short_iht4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride, int tx_type);
+#define vp9_short_iht4x4_add vp9_short_iht4x4_add_c
+
+void vp9_short_iht8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride, int tx_type);
+#define vp9_short_iht8x8_add vp9_short_iht8x8_add_c
+
+void vp9_short_iht16x16_add_c(int16_t *input, uint8_t *output, int pitch, int tx_type);
+#define vp9_short_iht16x16_add vp9_short_iht16x16_add_c
+
+void vp9_idct4_1d_c(int16_t *input, int16_t *output);
+#define vp9_idct4_1d vp9_idct4_1d_c
+
+void vp9_dc_only_idct_add_c(int input_dc, uint8_t *pred_ptr, uint8_t *dst_ptr, int pitch, int stride);
+#define vp9_dc_only_idct_add vp9_dc_only_idct_add_c
+
+void vp9_short_iwalsh4x4_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_iwalsh4x4_1_add vp9_short_iwalsh4x4_1_add_c
+
+void vp9_short_iwalsh4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_iwalsh4x4_add vp9_short_iwalsh4x4_add_c
+
+unsigned int vp9_sad32x3_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, int max_sad);
+#define vp9_sad32x3 vp9_sad32x3_c
+
+unsigned int vp9_sad3x32_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, int max_sad);
+#define vp9_sad3x32 vp9_sad3x32_c
+
+void vp9_rtcd(void);
+#include "vpx_config.h"
+
+#ifdef RTCD_C
+#include "vpx_ports/arm.h"
+static void setup_rtcd_internal(void)
+{
+ int flags = arm_cpu_caps();
+
+ (void)flags;
+
+
+}
+#endif
+#endif
diff --git a/armv7a/vpx_config.c b/armv7a/vpx_config.c
index 559f9b0..a246c39 100644
--- a/armv7a/vpx_config.c
+++ b/armv7a/vpx_config.c
@@ -5,5 +5,5 @@
/* tree. An additional intellectual property rights grant can be found */
/* in the file PATENTS. All contributing project authors may */
/* be found in the AUTHORS file in the root of the source tree. */
-static const char* const cfg = "--force-target=armv7-android-gcc --disable-runtime-cpu-detect --sdk-path=/usr/local/google/home/johannkoenig/android-ndk --disable-examples --disable-docs --disable-neon --enable-realtime-only";
+static const char* const cfg = "--target=armv7-android-gcc --disable-runtime-cpu-detect --sdk-path=/usr/local/google/home/johannkoenig/android-ndk --disable-vp9-encoder --disable-neon --disable-examples --disable-docs --enable-realtime-only";
const char *vpx_codec_build_config(void) {return cfg;}
diff --git a/armv7a/vpx_config.h b/armv7a/vpx_config.h
index ddb331f..e04f103 100644
--- a/armv7a/vpx_config.h
+++ b/armv7a/vpx_config.h
@@ -9,6 +9,7 @@
#ifndef VPX_CONFIG_H
#define VPX_CONFIG_H
#define RESTRICT
+#define INLINE __inline__ __attribute__((always_inline))
#define ARCH_ARM 1
#define ARCH_MIPS 0
#define ARCH_X86 0
@@ -34,7 +35,7 @@
#define HAVE_SYS_MMAN_H 1
#define HAVE_UNISTD_H 1
#define CONFIG_EXTERNAL_BUILD 0
-#define CONFIG_INSTALL_DOCS 1
+#define CONFIG_INSTALL_DOCS 0
#define CONFIG_INSTALL_BINS 1
#define CONFIG_INSTALL_LIBS 1
#define CONFIG_INSTALL_SRCS 0
@@ -61,7 +62,10 @@
#define CONFIG_INTERNAL_STATS 0
#define CONFIG_VP8_ENCODER 1
#define CONFIG_VP8_DECODER 1
+#define CONFIG_VP9_ENCODER 0
+#define CONFIG_VP9_DECODER 1
#define CONFIG_VP8 1
+#define CONFIG_VP9 1
#define CONFIG_ENCODERS 1
#define CONFIG_DECODERS 1
#define CONFIG_STATIC_MSVCRT 0
@@ -77,4 +81,11 @@
#define CONFIG_UNIT_TESTS 0
#define CONFIG_MULTI_RES_ENCODING 0
#define CONFIG_TEMPORAL_DENOISING 1
+#define CONFIG_EXPERIMENTAL 0
+#define CONFIG_DECRYPT 0
+#define CONFIG_ONESHOTQ 0
+#define CONFIG_MULTIPLE_ARF 0
+#define CONFIG_NON420 0
+#define CONFIG_ALPHA 0
+#define CONFIG_BALANCED_COEFTREE 0
#endif /* VPX_CONFIG_H */
diff --git a/armv7a/vpx_scale_rtcd.h b/armv7a/vpx_scale_rtcd.h
new file mode 100644
index 0000000..3f25632
--- /dev/null
+++ b/armv7a/vpx_scale_rtcd.h
@@ -0,0 +1,59 @@
+#ifndef VPX_SCALE_RTCD_H_
+#define VPX_SCALE_RTCD_H_
+
+#ifdef RTCD_C
+#define RTCD_EXTERN
+#else
+#define RTCD_EXTERN extern
+#endif
+
+struct yv12_buffer_config;
+
+void vp8_horizontal_line_5_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_5_4_scale vp8_horizontal_line_5_4_scale_c
+
+void vp8_vertical_band_5_4_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_5_4_scale vp8_vertical_band_5_4_scale_c
+
+void vp8_horizontal_line_5_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_5_3_scale vp8_horizontal_line_5_3_scale_c
+
+void vp8_vertical_band_5_3_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_5_3_scale vp8_vertical_band_5_3_scale_c
+
+void vp8_horizontal_line_2_1_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_2_1_scale vp8_horizontal_line_2_1_scale_c
+
+void vp8_vertical_band_2_1_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_2_1_scale vp8_vertical_band_2_1_scale_c
+
+void vp8_vertical_band_2_1_scale_i_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_2_1_scale_i vp8_vertical_band_2_1_scale_i_c
+
+void vp8_yv12_extend_frame_borders_c(struct yv12_buffer_config *ybf);
+#define vp8_yv12_extend_frame_borders vp8_yv12_extend_frame_borders_c
+
+void vp8_yv12_copy_frame_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
+#define vp8_yv12_copy_frame vp8_yv12_copy_frame_c
+
+void vp8_yv12_copy_y_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
+#define vp8_yv12_copy_y vp8_yv12_copy_y_c
+
+void vp9_extend_frame_borders_c(struct yv12_buffer_config *ybf, int subsampling_x, int subsampling_y);
+#define vp9_extend_frame_borders vp9_extend_frame_borders_c
+
+void vpx_scale_rtcd(void);
+#include "vpx_config.h"
+
+#ifdef RTCD_C
+#include "vpx_ports/arm.h"
+static void setup_rtcd_internal(void)
+{
+ int flags = arm_cpu_caps();
+
+ (void)flags;
+
+
+}
+#endif
+#endif
diff --git a/armv7a/vpx_version.h b/armv7a/vpx_version.h
index 663dd49..512851c 100644
--- a/armv7a/vpx_version.h
+++ b/armv7a/vpx_version.h
@@ -1,7 +1,7 @@
#define VERSION_MAJOR 1
-#define VERSION_MINOR 1
+#define VERSION_MINOR 2
#define VERSION_PATCH 0
#define VERSION_EXTRA ""
#define VERSION_PACKED ((VERSION_MAJOR<<16)|(VERSION_MINOR<<8)|(VERSION_PATCH))
-#define VERSION_STRING_NOSP "v1.1.0"
-#define VERSION_STRING " v1.1.0"
+#define VERSION_STRING_NOSP "v1.2.0"
+#define VERSION_STRING " v1.2.0"
diff --git a/generic/libvpx_srcs.txt b/generic/libvpx_srcs.txt
index 5756427..8c1ec80 100644
--- a/generic/libvpx_srcs.txt
+++ b/generic/libvpx_srcs.txt
@@ -4,7 +4,6 @@ CHANGELOG
libs.mk
vp8/common/alloccommon.c
vp8/common/alloccommon.h
-vp8/common/asm_com_offsets.c
vp8/common/blockd.c
vp8/common/blockd.h
vp8/common/coefupdateprobs.h
@@ -61,8 +60,8 @@ vp8/common/treecoder.c
vp8/common/treecoder.h
vp8/common/variance_c.c
vp8/common/variance.h
+vp8/common/vp8_asm_com_offsets.c
vp8/common/vp8_entropymodedata.h
-vp8/decoder/asm_dec_offsets.c
vp8/decoder/dboolhuff.c
vp8/decoder/dboolhuff.h
vp8/decoder/decodemv.c
@@ -75,7 +74,7 @@ vp8/decoder/onyxd_if.c
vp8/decoder/onyxd_int.h
vp8/decoder/threading.c
vp8/decoder/treereader.h
-vp8/encoder/asm_enc_offsets.c
+vp8/decoder/vp8_asm_dec_offsets.c
vp8/encoder/bitstream.c
vp8/encoder/bitstream.h
vp8/encoder/block.h
@@ -122,11 +121,91 @@ vp8/encoder/tokenize.c
vp8/encoder/tokenize.h
vp8/encoder/treewriter.c
vp8/encoder/treewriter.h
+vp8/encoder/vp8_asm_enc_offsets.c
vp8/vp8_common.mk
vp8/vp8_cx_iface.c
vp8/vp8cx.mk
vp8/vp8_dx_iface.c
vp8/vp8dx.mk
+vp9/common/generic/vp9_systemdependent.c
+vp9/common/vp9_alloccommon.c
+vp9/common/vp9_alloccommon.h
+vp9/common/vp9_asm_com_offsets.c
+vp9/common/vp9_blockd.h
+vp9/common/vp9_common.h
+vp9/common/vp9_convolve.c
+vp9/common/vp9_convolve.h
+vp9/common/vp9_debugmodes.c
+vp9/common/vp9_default_coef_probs.h
+vp9/common/vp9_entropy.c
+vp9/common/vp9_entropy.h
+vp9/common/vp9_entropymode.c
+vp9/common/vp9_entropymode.h
+vp9/common/vp9_entropymv.c
+vp9/common/vp9_entropymv.h
+vp9/common/vp9_enums.h
+vp9/common/vp9_extend.c
+vp9/common/vp9_extend.h
+vp9/common/vp9_filter.c
+vp9/common/vp9_filter.h
+vp9/common/vp9_findnearmv.c
+vp9/common/vp9_findnearmv.h
+vp9/common/vp9_idct.c
+vp9/common/vp9_idct.h
+vp9/common/vp9_loopfilter.c
+vp9/common/vp9_loopfilter_filters.c
+vp9/common/vp9_loopfilter.h
+vp9/common/vp9_mbpitch.c
+vp9/common/vp9_modecont.c
+vp9/common/vp9_modecontext.c
+vp9/common/vp9_modecont.h
+vp9/common/vp9_mv.h
+vp9/common/vp9_mvref_common.c
+vp9/common/vp9_mvref_common.h
+vp9/common/vp9_onyxc_int.h
+vp9/common/vp9_onyx.h
+vp9/common/vp9_ppflags.h
+vp9/common/vp9_pragmas.h
+vp9/common/vp9_pred_common.c
+vp9/common/vp9_pred_common.h
+vp9/common/vp9_quant_common.c
+vp9/common/vp9_quant_common.h
+vp9/common/vp9_reconinter.c
+vp9/common/vp9_reconinter.h
+vp9/common/vp9_reconintra.c
+vp9/common/vp9_reconintra.h
+vp9/common/vp9_rtcd.c
+vp9/common/vp9_rtcd_defs.sh
+vp9/common/vp9_sadmxn.h
+vp9/common/vp9_seg_common.c
+vp9/common/vp9_seg_common.h
+vp9/common/vp9_subpelvar.h
+vp9/common/vp9_systemdependent.h
+vp9/common/vp9_textblit.h
+vp9/common/vp9_tile_common.c
+vp9/common/vp9_tile_common.h
+vp9/common/vp9_treecoder.c
+vp9/common/vp9_treecoder.h
+vp9/decoder/vp9_asm_dec_offsets.c
+vp9/decoder/vp9_dboolhuff.c
+vp9/decoder/vp9_dboolhuff.h
+vp9/decoder/vp9_decodemv.c
+vp9/decoder/vp9_decodemv.h
+vp9/decoder/vp9_decodframe.c
+vp9/decoder/vp9_decodframe.h
+vp9/decoder/vp9_detokenize.c
+vp9/decoder/vp9_detokenize.h
+vp9/decoder/vp9_idct_blk.c
+vp9/decoder/vp9_idct_blk.h
+vp9/decoder/vp9_onyxd.h
+vp9/decoder/vp9_onyxd_if.c
+vp9/decoder/vp9_onyxd_int.h
+vp9/decoder/vp9_read_bit_buffer.h
+vp9/decoder/vp9_treereader.h
+vp9/vp9_common.mk
+vp9/vp9_dx_iface.c
+vp9/vp9dx.mk
+vp9/vp9_iface_common.h
vpx_config.c
vpx/internal/vpx_codec_internal.h
vpx_mem/include/vpx_mem_intrnl.h
@@ -134,17 +213,20 @@ vpx_mem/vpx_mem.c
vpx_mem/vpx_mem.h
vpx_mem/vpx_mem.mk
vpx_ports/asm_offsets.h
+vpx_ports/emmintrin_compat.h
vpx_ports/mem.h
+vpx_ports/vpx_once.h
vpx_ports/vpx_ports.mk
vpx_ports/vpx_timer.h
vpx_scale/generic/gen_scalers.c
-vpx_scale/generic/vpxscale.c
+vpx_scale/generic/vpx_scale.c
vpx_scale/generic/yv12config.c
vpx_scale/generic/yv12extend.c
-vpx_scale/generic/yv12extend_generic.h
-vpx_scale/scale_mode.h
-vpx_scale/vpxscale.h
+vpx_scale/vpx_scale_asm_offsets.c
+vpx_scale/vpx_scale.h
vpx_scale/vpx_scale.mk
+vpx_scale/vpx_scale_rtcd.c
+vpx_scale/vpx_scale_rtcd.sh
vpx_scale/yv12config.h
vpx/src/vpx_codec.c
vpx/src/vpx_decoder.c
diff --git a/generic/vpx_rtcd.h b/generic/vp8_rtcd.h
index ab83978..97ef714 100644
--- a/generic/vpx_rtcd.h
+++ b/generic/vp8_rtcd.h
@@ -1,5 +1,5 @@
-#ifndef VPX_RTCD_
-#define VPX_RTCD_
+#ifndef VP8_RTCD_H_
+#define VP8_RTCD_H_
#ifdef RTCD_C
#define RTCD_EXTERN
@@ -7,7 +7,9 @@
#define RTCD_EXTERN extern
#endif
-#include "vp8/common/blockd.h"
+/*
+ * VP8
+ */
struct blockd;
struct macroblockd;
@@ -20,6 +22,9 @@ struct variance_vtable;
union int_mv;
struct yv12_buffer_config;
+void vp8_clear_system_state_c();
+#define vp8_clear_system_state vp8_clear_system_state_c
+
void vp8_dequantize_b_c(struct blockd*, short *dqc);
#define vp8_dequantize_b vp8_dequantize_b_c
@@ -83,7 +88,7 @@ void vp8_build_intra_predictors_mby_s_c(struct macroblockd *x, unsigned char * y
void vp8_build_intra_predictors_mbuv_s_c(struct macroblockd *x, unsigned char * uabove_row, unsigned char * vabove_row, unsigned char *uleft, unsigned char *vleft, int left_stride, unsigned char * upred_ptr, unsigned char * vpred_ptr, int pred_stride);
#define vp8_build_intra_predictors_mbuv_s vp8_build_intra_predictors_mbuv_s_c
-void vp8_intra4x4_predict_c(unsigned char *Above, unsigned char *yleft, int left_stride, B_PREDICTION_MODE b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
+void vp8_intra4x4_predict_c(unsigned char *Above, unsigned char *yleft, int left_stride, int b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
#define vp8_intra4x4_predict vp8_intra4x4_predict_c
void vp8_sixtap_predict16x16_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
@@ -284,82 +289,7 @@ void vp8_yv12_copy_partial_frame_c(struct yv12_buffer_config *src_ybc, struct yv
int vp8_denoiser_filter_c(struct yv12_buffer_config* mc_running_avg, struct yv12_buffer_config* running_avg, struct macroblock* signal, unsigned int motion_magnitude2, int y_offset, int uv_offset);
#define vp8_denoiser_filter vp8_denoiser_filter_c
-void vp8_horizontal_line_4_5_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_4_5_scale vp8_horizontal_line_4_5_scale_c
-
-void vp8_vertical_band_4_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_4_5_scale vp8_vertical_band_4_5_scale_c
-
-void vp8_last_vertical_band_4_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_4_5_scale vp8_last_vertical_band_4_5_scale_c
-
-void vp8_horizontal_line_2_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_2_3_scale vp8_horizontal_line_2_3_scale_c
-
-void vp8_vertical_band_2_3_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_3_scale vp8_vertical_band_2_3_scale_c
-
-void vp8_last_vertical_band_2_3_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_2_3_scale vp8_last_vertical_band_2_3_scale_c
-
-void vp8_horizontal_line_3_5_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_3_5_scale vp8_horizontal_line_3_5_scale_c
-
-void vp8_vertical_band_3_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_3_5_scale vp8_vertical_band_3_5_scale_c
-
-void vp8_last_vertical_band_3_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_3_5_scale vp8_last_vertical_band_3_5_scale_c
-
-void vp8_horizontal_line_3_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_3_4_scale vp8_horizontal_line_3_4_scale_c
-
-void vp8_vertical_band_3_4_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_3_4_scale vp8_vertical_band_3_4_scale_c
-
-void vp8_last_vertical_band_3_4_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_3_4_scale vp8_last_vertical_band_3_4_scale_c
-
-void vp8_horizontal_line_1_2_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_1_2_scale vp8_horizontal_line_1_2_scale_c
-
-void vp8_vertical_band_1_2_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_1_2_scale vp8_vertical_band_1_2_scale_c
-
-void vp8_last_vertical_band_1_2_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_1_2_scale vp8_last_vertical_band_1_2_scale_c
-
-void vp8_horizontal_line_5_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_5_4_scale vp8_horizontal_line_5_4_scale_c
-
-void vp8_vertical_band_5_4_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_5_4_scale vp8_vertical_band_5_4_scale_c
-
-void vp8_horizontal_line_5_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_5_3_scale vp8_horizontal_line_5_3_scale_c
-
-void vp8_vertical_band_5_3_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_5_3_scale vp8_vertical_band_5_3_scale_c
-
-void vp8_horizontal_line_2_1_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_2_1_scale vp8_horizontal_line_2_1_scale_c
-
-void vp8_vertical_band_2_1_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_1_scale vp8_vertical_band_2_1_scale_c
-
-void vp8_vertical_band_2_1_scale_i_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_1_scale_i vp8_vertical_band_2_1_scale_i_c
-
-void vp8_yv12_extend_frame_borders_c(struct yv12_buffer_config *ybf);
-#define vp8_yv12_extend_frame_borders vp8_yv12_extend_frame_borders_c
-
-void vp8_yv12_copy_frame_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
-#define vp8_yv12_copy_frame vp8_yv12_copy_frame_c
-
-void vp8_yv12_copy_y_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
-#define vp8_yv12_copy_y vp8_yv12_copy_y_c
-
-void vpx_rtcd(void);
+void vp8_rtcd(void);
#include "vpx_config.h"
#ifdef RTCD_C
diff --git a/generic/vp9_rtcd.h b/generic/vp9_rtcd.h
new file mode 100644
index 0000000..dee08d4
--- /dev/null
+++ b/generic/vp9_rtcd.h
@@ -0,0 +1,185 @@
+#ifndef VP9_RTCD_H_
+#define VP9_RTCD_H_
+
+#ifdef RTCD_C
+#define RTCD_EXTERN
+#else
+#define RTCD_EXTERN extern
+#endif
+
+/*
+ * VP9
+ */
+
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_enums.h"
+
+struct loop_filter_info;
+struct macroblockd;
+struct loop_filter_info;
+
+/* Encoder forward decls */
+struct macroblock;
+struct vp9_variance_vtable;
+
+#define DEC_MVCOSTS int *mvjcost, int *mvcost[2]
+union int_mv;
+struct yv12_buffer_config;
+
+void vp9_idct_add_16x16_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add_16x16 vp9_idct_add_16x16_c
+
+void vp9_idct_add_8x8_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add_8x8 vp9_idct_add_8x8_c
+
+void vp9_idct_add_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add vp9_idct_add_c
+
+void vp9_idct_add_32x32_c(int16_t *q, uint8_t *dst, int stride, int eob);
+#define vp9_idct_add_32x32 vp9_idct_add_32x32_c
+
+void vp9_copy_mem16x16_c(const uint8_t *src, int src_pitch, uint8_t *dst, int dst_pitch);
+#define vp9_copy_mem16x16 vp9_copy_mem16x16_c
+
+void vp9_copy_mem8x8_c(const uint8_t *src, int src_pitch, uint8_t *dst, int dst_pitch);
+#define vp9_copy_mem8x8 vp9_copy_mem8x8_c
+
+void vp9_copy_mem8x4_c(const uint8_t *src, int src_pitch, uint8_t *dst, int dst_pitch);
+#define vp9_copy_mem8x4 vp9_copy_mem8x4_c
+
+void vp9_build_intra_predictors_c(uint8_t *src, int src_stride, uint8_t *pred, int y_stride, int mode, int bw, int bh, int up_available, int left_available, int right_available);
+#define vp9_build_intra_predictors vp9_build_intra_predictors_c
+
+void vp9_build_intra_predictors_sby_s_c(struct macroblockd *x, enum BLOCK_SIZE_TYPE bsize);
+#define vp9_build_intra_predictors_sby_s vp9_build_intra_predictors_sby_s_c
+
+void vp9_build_intra_predictors_sbuv_s_c(struct macroblockd *x, enum BLOCK_SIZE_TYPE bsize);
+#define vp9_build_intra_predictors_sbuv_s vp9_build_intra_predictors_sbuv_s_c
+
+void vp9_intra4x4_predict_c(struct macroblockd *xd, int block, enum BLOCK_SIZE_TYPE bsize, int b_mode, uint8_t *predictor, int pre_stride);
+#define vp9_intra4x4_predict vp9_intra4x4_predict_c
+
+void vp9_add_constant_residual_8x8_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_8x8 vp9_add_constant_residual_8x8_c
+
+void vp9_add_constant_residual_16x16_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_16x16 vp9_add_constant_residual_16x16_c
+
+void vp9_add_constant_residual_32x32_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_32x32 vp9_add_constant_residual_32x32_c
+
+void vp9_mb_lpf_vertical_edge_w_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
+#define vp9_mb_lpf_vertical_edge_w vp9_mb_lpf_vertical_edge_w_c
+
+void vp9_mbloop_filter_vertical_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_mbloop_filter_vertical_edge vp9_mbloop_filter_vertical_edge_c
+
+void vp9_loop_filter_vertical_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_loop_filter_vertical_edge vp9_loop_filter_vertical_edge_c
+
+void vp9_mb_lpf_horizontal_edge_w_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
+#define vp9_mb_lpf_horizontal_edge_w vp9_mb_lpf_horizontal_edge_w_c
+
+void vp9_mbloop_filter_horizontal_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_mbloop_filter_horizontal_edge vp9_mbloop_filter_horizontal_edge_c
+
+void vp9_loop_filter_horizontal_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_loop_filter_horizontal_edge vp9_loop_filter_horizontal_edge_c
+
+void vp9_blend_mb_inner_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_mb_inner vp9_blend_mb_inner_c
+
+void vp9_blend_mb_outer_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_mb_outer vp9_blend_mb_outer_c
+
+void vp9_blend_b_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_b vp9_blend_b_c
+
+void vp9_convolve8_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8 vp9_convolve8_c
+
+void vp9_convolve8_horiz_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_horiz vp9_convolve8_horiz_c
+
+void vp9_convolve8_vert_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_vert vp9_convolve8_vert_c
+
+void vp9_convolve8_avg_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg vp9_convolve8_avg_c
+
+void vp9_convolve8_avg_horiz_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg_horiz vp9_convolve8_avg_horiz_c
+
+void vp9_convolve8_avg_vert_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg_vert vp9_convolve8_avg_vert_c
+
+void vp9_short_idct4x4_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct4x4_1_add vp9_short_idct4x4_1_add_c
+
+void vp9_short_idct4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct4x4_add vp9_short_idct4x4_add_c
+
+void vp9_short_idct8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct8x8_add vp9_short_idct8x8_add_c
+
+void vp9_short_idct10_8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct10_8x8_add vp9_short_idct10_8x8_add_c
+
+void vp9_short_idct1_8x8_c(int16_t *input, int16_t *output);
+#define vp9_short_idct1_8x8 vp9_short_idct1_8x8_c
+
+void vp9_short_idct16x16_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct16x16_add vp9_short_idct16x16_add_c
+
+void vp9_short_idct10_16x16_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct10_16x16_add vp9_short_idct10_16x16_add_c
+
+void vp9_short_idct1_16x16_c(int16_t *input, int16_t *output);
+#define vp9_short_idct1_16x16 vp9_short_idct1_16x16_c
+
+void vp9_short_idct32x32_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct32x32_add vp9_short_idct32x32_add_c
+
+void vp9_short_idct1_32x32_c(int16_t *input, int16_t *output);
+#define vp9_short_idct1_32x32 vp9_short_idct1_32x32_c
+
+void vp9_short_idct10_32x32_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct10_32x32_add vp9_short_idct10_32x32_add_c
+
+void vp9_short_iht4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride, int tx_type);
+#define vp9_short_iht4x4_add vp9_short_iht4x4_add_c
+
+void vp9_short_iht8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride, int tx_type);
+#define vp9_short_iht8x8_add vp9_short_iht8x8_add_c
+
+void vp9_short_iht16x16_add_c(int16_t *input, uint8_t *output, int pitch, int tx_type);
+#define vp9_short_iht16x16_add vp9_short_iht16x16_add_c
+
+void vp9_idct4_1d_c(int16_t *input, int16_t *output);
+#define vp9_idct4_1d vp9_idct4_1d_c
+
+void vp9_dc_only_idct_add_c(int input_dc, uint8_t *pred_ptr, uint8_t *dst_ptr, int pitch, int stride);
+#define vp9_dc_only_idct_add vp9_dc_only_idct_add_c
+
+void vp9_short_iwalsh4x4_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_iwalsh4x4_1_add vp9_short_iwalsh4x4_1_add_c
+
+void vp9_short_iwalsh4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_iwalsh4x4_add vp9_short_iwalsh4x4_add_c
+
+unsigned int vp9_sad32x3_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, int max_sad);
+#define vp9_sad32x3 vp9_sad32x3_c
+
+unsigned int vp9_sad3x32_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, int max_sad);
+#define vp9_sad3x32 vp9_sad3x32_c
+
+void vp9_rtcd(void);
+#include "vpx_config.h"
+
+#ifdef RTCD_C
+static void setup_rtcd_internal(void)
+{
+
+}
+#endif
+#endif
diff --git a/generic/vpx_config.c b/generic/vpx_config.c
index 61f4fd8..c87cb91 100644
--- a/generic/vpx_config.c
+++ b/generic/vpx_config.c
@@ -5,5 +5,5 @@
/* tree. An additional intellectual property rights grant can be found */
/* in the file PATENTS. All contributing project authors may */
/* be found in the AUTHORS file in the root of the source tree. */
-static const char* const cfg = "--force-target=generic-gnu --disable-examples --disable-docs --enable-realtime-only";
+static const char* const cfg = "--force-target=generic-gnu --disable-vp9-encoder --disable-examples --disable-docs --enable-realtime-only";
const char *vpx_codec_build_config(void) {return cfg;}
diff --git a/generic/vpx_config.h b/generic/vpx_config.h
index 9c6da62..44e6842 100644
--- a/generic/vpx_config.h
+++ b/generic/vpx_config.h
@@ -9,6 +9,7 @@
#ifndef VPX_CONFIG_H
#define VPX_CONFIG_H
#define RESTRICT
+#define INLINE __inline__ __attribute__((always_inline))
#define ARCH_ARM 0
#define ARCH_MIPS 0
#define ARCH_X86 0
@@ -34,7 +35,7 @@
#define HAVE_SYS_MMAN_H 1
#define HAVE_UNISTD_H 1
#define CONFIG_EXTERNAL_BUILD 0
-#define CONFIG_INSTALL_DOCS 1
+#define CONFIG_INSTALL_DOCS 0
#define CONFIG_INSTALL_BINS 1
#define CONFIG_INSTALL_LIBS 1
#define CONFIG_INSTALL_SRCS 0
@@ -61,7 +62,10 @@
#define CONFIG_INTERNAL_STATS 0
#define CONFIG_VP8_ENCODER 1
#define CONFIG_VP8_DECODER 1
+#define CONFIG_VP9_ENCODER 0
+#define CONFIG_VP9_DECODER 1
#define CONFIG_VP8 1
+#define CONFIG_VP9 1
#define CONFIG_ENCODERS 1
#define CONFIG_DECODERS 1
#define CONFIG_STATIC_MSVCRT 0
@@ -77,4 +81,11 @@
#define CONFIG_UNIT_TESTS 1
#define CONFIG_MULTI_RES_ENCODING 0
#define CONFIG_TEMPORAL_DENOISING 1
+#define CONFIG_EXPERIMENTAL 0
+#define CONFIG_DECRYPT 0
+#define CONFIG_ONESHOTQ 0
+#define CONFIG_MULTIPLE_ARF 0
+#define CONFIG_NON420 0
+#define CONFIG_ALPHA 0
+#define CONFIG_BALANCED_COEFTREE 0
#endif /* VPX_CONFIG_H */
diff --git a/generic/vpx_scale_rtcd.h b/generic/vpx_scale_rtcd.h
new file mode 100644
index 0000000..3a1db05
--- /dev/null
+++ b/generic/vpx_scale_rtcd.h
@@ -0,0 +1,54 @@
+#ifndef VPX_SCALE_RTCD_H_
+#define VPX_SCALE_RTCD_H_
+
+#ifdef RTCD_C
+#define RTCD_EXTERN
+#else
+#define RTCD_EXTERN extern
+#endif
+
+struct yv12_buffer_config;
+
+void vp8_horizontal_line_5_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_5_4_scale vp8_horizontal_line_5_4_scale_c
+
+void vp8_vertical_band_5_4_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_5_4_scale vp8_vertical_band_5_4_scale_c
+
+void vp8_horizontal_line_5_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_5_3_scale vp8_horizontal_line_5_3_scale_c
+
+void vp8_vertical_band_5_3_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_5_3_scale vp8_vertical_band_5_3_scale_c
+
+void vp8_horizontal_line_2_1_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_2_1_scale vp8_horizontal_line_2_1_scale_c
+
+void vp8_vertical_band_2_1_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_2_1_scale vp8_vertical_band_2_1_scale_c
+
+void vp8_vertical_band_2_1_scale_i_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_2_1_scale_i vp8_vertical_band_2_1_scale_i_c
+
+void vp8_yv12_extend_frame_borders_c(struct yv12_buffer_config *ybf);
+#define vp8_yv12_extend_frame_borders vp8_yv12_extend_frame_borders_c
+
+void vp8_yv12_copy_frame_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
+#define vp8_yv12_copy_frame vp8_yv12_copy_frame_c
+
+void vp8_yv12_copy_y_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
+#define vp8_yv12_copy_y vp8_yv12_copy_y_c
+
+void vp9_extend_frame_borders_c(struct yv12_buffer_config *ybf, int subsampling_x, int subsampling_y);
+#define vp9_extend_frame_borders vp9_extend_frame_borders_c
+
+void vpx_scale_rtcd(void);
+#include "vpx_config.h"
+
+#ifdef RTCD_C
+static void setup_rtcd_internal(void)
+{
+
+}
+#endif
+#endif
diff --git a/generic/vpx_version.h b/generic/vpx_version.h
index 663dd49..512851c 100644
--- a/generic/vpx_version.h
+++ b/generic/vpx_version.h
@@ -1,7 +1,7 @@
#define VERSION_MAJOR 1
-#define VERSION_MINOR 1
+#define VERSION_MINOR 2
#define VERSION_PATCH 0
#define VERSION_EXTRA ""
#define VERSION_PACKED ((VERSION_MAJOR<<16)|(VERSION_MINOR<<8)|(VERSION_PATCH))
-#define VERSION_STRING_NOSP "v1.1.0"
-#define VERSION_STRING " v1.1.0"
+#define VERSION_STRING_NOSP "v1.2.0"
+#define VERSION_STRING " v1.2.0"
diff --git a/libvpx.mk b/libvpx.mk
index 5cb7820..197ed75 100644
--- a/libvpx.mk
+++ b/libvpx.mk
@@ -60,14 +60,16 @@ LOCAL_SRC_FILES += $(libvpx_target)/vpx_config.c
# used yet but are included in the comments for future reference.
libvpx_asm_offsets_intermediates := \
- vp8/common/asm_com_offsets.intermediate \
- vp8/decoder/asm_dec_offsets.intermediate \
- vp8/encoder/asm_enc_offsets.intermediate \
+ vp8/common/vp8_asm_com_offsets.intermediate \
+ vp8/decoder/vp8_asm_dec_offsets.intermediate \
+ vp8/encoder/vp8_asm_enc_offsets.intermediate \
+ vpx_scale/vpx_scale_asm_offsets.intermediate \
libvpx_asm_offsets_files := \
- vp8/common/asm_com_offsets.asm \
- vp8/decoder/asm_dec_offsets.asm \
- vp8/encoder/asm_enc_offsets.asm \
+ vp8/common/vp8_asm_com_offsets.asm \
+ vp8/decoder/vp8_asm_dec_offsets.asm \
+ vp8/encoder/vp8_asm_enc_offsets.asm \
+ vpx_scale/vpx_scale_asm_offsets.asm \
# Build the S files with inline assembly.
COMPILE_TO_S := $(addprefix $(libvpx_intermediates)/, $(libvpx_asm_offsets_intermediates))
@@ -107,6 +109,7 @@ LOCAL_C_INCLUDES := \
$(libvpx_intermediates)/vp8/common \
$(libvpx_intermediates)/vp8/decoder \
$(libvpx_intermediates)/vp8/encoder \
+ $(libvpx_intermediates)/vpx_scale \
libvpx_target :=
libvpx_asm :=
diff --git a/libvpx/CHANGELOG b/libvpx/CHANGELOG
index dcb9f73..ef64a96 100644
--- a/libvpx/CHANGELOG
+++ b/libvpx/CHANGELOG
@@ -1,3 +1,32 @@
+2012-12-21 v1.2.0
+ This release acts as a checkpoint for a large amount of internal refactoring
+ and testing. It also contains a number of small bugfixes, so all users are
+ encouraged to upgrade.
+
+ - Upgrading:
+ This release is ABI and API compatible with Duclair (v1.0.0). Users
+ of older releases should refer to the Upgrading notes in this
+ document for that release.
+
+ - Enhancements:
+ VP8 optimizations for MIPS dspr2
+ vpxenc: add -quiet option
+
+ - Speed:
+ Encoder and decoder speed is consistent with the Eider release.
+
+ - Quality:
+ In general, quality is consistent with the Eider release.
+
+ Minor tweaks to ARNR filtering
+ Minor improvements to real time encoding with multiple temporal layers
+
+ - Bug Fixes:
+ Fixes multithreaded encoder race condition in loopfilter
+ Fixes multi-resolution threaded encoding
+ Fix potential encoder dead-lock after picture resize
+
+
2012-05-09 v1.1.0 "Eider"
This introduces a number of enhancements, mostly focused on real-time
encoding. In addition, it fixes a decoder bug (first introduced in
diff --git a/libvpx/args.c b/libvpx/args.c
index 37ba778..9dabc9b 100644
--- a/libvpx/args.c
+++ b/libvpx/args.c
@@ -25,241 +25,214 @@ extern void die(const char *fmt, ...);
#endif
-struct arg arg_init(char **argv)
-{
- struct arg a;
-
- a.argv = argv;
- a.argv_step = 1;
- a.name = NULL;
- a.val = NULL;
- a.def = NULL;
- return a;
+struct arg arg_init(char **argv) {
+ struct arg a;
+
+ a.argv = argv;
+ a.argv_step = 1;
+ a.name = NULL;
+ a.val = NULL;
+ a.def = NULL;
+ return a;
}
-int arg_match(struct arg *arg_, const struct arg_def *def, char **argv)
-{
- struct arg arg;
+int arg_match(struct arg *arg_, const struct arg_def *def, char **argv) {
+ struct arg arg;
- if (!argv[0] || argv[0][0] != '-')
- return 0;
+ if (!argv[0] || argv[0][0] != '-')
+ return 0;
- arg = arg_init(argv);
+ arg = arg_init(argv);
- if (def->short_name
- && strlen(arg.argv[0]) == strlen(def->short_name) + 1
- && !strcmp(arg.argv[0] + 1, def->short_name))
- {
+ if (def->short_name
+ && strlen(arg.argv[0]) == strlen(def->short_name) + 1
+ && !strcmp(arg.argv[0] + 1, def->short_name)) {
- arg.name = arg.argv[0] + 1;
- arg.val = def->has_val ? arg.argv[1] : NULL;
- arg.argv_step = def->has_val ? 2 : 1;
- }
- else if (def->long_name)
- {
- const size_t name_len = strlen(def->long_name);
-
- if (strlen(arg.argv[0]) >= name_len + 2
- && arg.argv[0][1] == '-'
- && !strncmp(arg.argv[0] + 2, def->long_name, name_len)
- && (arg.argv[0][name_len+2] == '='
- || arg.argv[0][name_len+2] == '\0'))
- {
-
- arg.name = arg.argv[0] + 2;
- arg.val = arg.name[name_len] == '=' ? arg.name + name_len + 1 : NULL;
- arg.argv_step = 1;
- }
+ arg.name = arg.argv[0] + 1;
+ arg.val = def->has_val ? arg.argv[1] : NULL;
+ arg.argv_step = def->has_val ? 2 : 1;
+ } else if (def->long_name) {
+ const size_t name_len = strlen(def->long_name);
+
+ if (strlen(arg.argv[0]) >= name_len + 2
+ && arg.argv[0][1] == '-'
+ && !strncmp(arg.argv[0] + 2, def->long_name, name_len)
+ && (arg.argv[0][name_len + 2] == '='
+ || arg.argv[0][name_len + 2] == '\0')) {
+
+ arg.name = arg.argv[0] + 2;
+ arg.val = arg.name[name_len] == '=' ? arg.name + name_len + 1 : NULL;
+ arg.argv_step = 1;
}
+ }
- if (arg.name && !arg.val && def->has_val)
- die("Error: option %s requires argument.\n", arg.name);
+ if (arg.name && !arg.val && def->has_val)
+ die("Error: option %s requires argument.\n", arg.name);
- if (arg.name && arg.val && !def->has_val)
- die("Error: option %s requires no argument.\n", arg.name);
+ if (arg.name && arg.val && !def->has_val)
+ die("Error: option %s requires no argument.\n", arg.name);
- if (arg.name
- && (arg.val || !def->has_val))
- {
- arg.def = def;
- *arg_ = arg;
- return 1;
- }
+ if (arg.name
+ && (arg.val || !def->has_val)) {
+ arg.def = def;
+ *arg_ = arg;
+ return 1;
+ }
- return 0;
+ return 0;
}
-const char *arg_next(struct arg *arg)
-{
- if (arg->argv[0])
- arg->argv += arg->argv_step;
+const char *arg_next(struct arg *arg) {
+ if (arg->argv[0])
+ arg->argv += arg->argv_step;
- return *arg->argv;
+ return *arg->argv;
}
-char **argv_dup(int argc, const char **argv)
-{
- char **new_argv = malloc((argc + 1) * sizeof(*argv));
+char **argv_dup(int argc, const char **argv) {
+ char **new_argv = malloc((argc + 1) * sizeof(*argv));
- memcpy(new_argv, argv, argc * sizeof(*argv));
- new_argv[argc] = NULL;
- return new_argv;
+ memcpy(new_argv, argv, argc * sizeof(*argv));
+ new_argv[argc] = NULL;
+ return new_argv;
}
-void arg_show_usage(FILE *fp, const struct arg_def *const *defs)
-{
- char option_text[40] = {0};
+void arg_show_usage(FILE *fp, const struct arg_def *const *defs) {
+ char option_text[40] = {0};
- for (; *defs; defs++)
- {
- const struct arg_def *def = *defs;
- char *short_val = def->has_val ? " <arg>" : "";
- char *long_val = def->has_val ? "=<arg>" : "";
+ for (; *defs; defs++) {
+ const struct arg_def *def = *defs;
+ char *short_val = def->has_val ? " <arg>" : "";
+ char *long_val = def->has_val ? "=<arg>" : "";
- if (def->short_name && def->long_name)
- {
- char *comma = def->has_val ? "," : ", ";
+ if (def->short_name && def->long_name) {
+ char *comma = def->has_val ? "," : ", ";
- snprintf(option_text, 37, "-%s%s%s --%s%6s",
- def->short_name, short_val, comma,
- def->long_name, long_val);
- }
- else if (def->short_name)
- snprintf(option_text, 37, "-%s%s",
- def->short_name, short_val);
- else if (def->long_name)
- snprintf(option_text, 37, " --%s%s",
- def->long_name, long_val);
+ snprintf(option_text, 37, "-%s%s%s --%s%6s",
+ def->short_name, short_val, comma,
+ def->long_name, long_val);
+ } else if (def->short_name)
+ snprintf(option_text, 37, "-%s%s",
+ def->short_name, short_val);
+ else if (def->long_name)
+ snprintf(option_text, 37, " --%s%s",
+ def->long_name, long_val);
- fprintf(fp, " %-37s\t%s\n", option_text, def->desc);
+ fprintf(fp, " %-37s\t%s\n", option_text, def->desc);
- if(def->enums)
- {
- const struct arg_enum_list *listptr;
+ if (def->enums) {
+ const struct arg_enum_list *listptr;
- fprintf(fp, " %-37s\t ", "");
+ fprintf(fp, " %-37s\t ", "");
- for(listptr = def->enums; listptr->name; listptr++)
- fprintf(fp, "%s%s", listptr->name,
- listptr[1].name ? ", " : "\n");
- }
+ for (listptr = def->enums; listptr->name; listptr++)
+ fprintf(fp, "%s%s", listptr->name,
+ listptr[1].name ? ", " : "\n");
}
+ }
}
-unsigned int arg_parse_uint(const struct arg *arg)
-{
- long int rawval;
- char *endptr;
+unsigned int arg_parse_uint(const struct arg *arg) {
+ long int rawval;
+ char *endptr;
- rawval = strtol(arg->val, &endptr, 10);
+ rawval = strtol(arg->val, &endptr, 10);
- if (arg->val[0] != '\0' && endptr[0] == '\0')
- {
- if (rawval >= 0 && rawval <= UINT_MAX)
- return rawval;
+ if (arg->val[0] != '\0' && endptr[0] == '\0') {
+ if (rawval >= 0 && rawval <= UINT_MAX)
+ return rawval;
- die("Option %s: Value %ld out of range for unsigned int\n",
- arg->name, rawval);
- }
+ die("Option %s: Value %ld out of range for unsigned int\n",
+ arg->name, rawval);
+ }
- die("Option %s: Invalid character '%c'\n", arg->name, *endptr);
- return 0;
+ die("Option %s: Invalid character '%c'\n", arg->name, *endptr);
+ return 0;
}
-int arg_parse_int(const struct arg *arg)
-{
- long int rawval;
- char *endptr;
+int arg_parse_int(const struct arg *arg) {
+ long int rawval;
+ char *endptr;
- rawval = strtol(arg->val, &endptr, 10);
+ rawval = strtol(arg->val, &endptr, 10);
- if (arg->val[0] != '\0' && endptr[0] == '\0')
- {
- if (rawval >= INT_MIN && rawval <= INT_MAX)
- return rawval;
+ if (arg->val[0] != '\0' && endptr[0] == '\0') {
+ if (rawval >= INT_MIN && rawval <= INT_MAX)
+ return rawval;
- die("Option %s: Value %ld out of range for signed int\n",
- arg->name, rawval);
- }
+ die("Option %s: Value %ld out of range for signed int\n",
+ arg->name, rawval);
+ }
- die("Option %s: Invalid character '%c'\n", arg->name, *endptr);
- return 0;
+ die("Option %s: Invalid character '%c'\n", arg->name, *endptr);
+ return 0;
}
-struct vpx_rational
-{
- int num; /**< fraction numerator */
- int den; /**< fraction denominator */
+struct vpx_rational {
+ int num; /**< fraction numerator */
+ int den; /**< fraction denominator */
};
-struct vpx_rational arg_parse_rational(const struct arg *arg)
-{
- long int rawval;
- char *endptr;
- struct vpx_rational rat;
-
- /* parse numerator */
- rawval = strtol(arg->val, &endptr, 10);
-
- if (arg->val[0] != '\0' && endptr[0] == '/')
- {
- if (rawval >= INT_MIN && rawval <= INT_MAX)
- rat.num = rawval;
- else die("Option %s: Value %ld out of range for signed int\n",
- arg->name, rawval);
- }
- else die("Option %s: Expected / at '%c'\n", arg->name, *endptr);
-
- /* parse denominator */
- rawval = strtol(endptr + 1, &endptr, 10);
-
- if (arg->val[0] != '\0' && endptr[0] == '\0')
- {
- if (rawval >= INT_MIN && rawval <= INT_MAX)
- rat.den = rawval;
- else die("Option %s: Value %ld out of range for signed int\n",
- arg->name, rawval);
- }
- else die("Option %s: Invalid character '%c'\n", arg->name, *endptr);
-
- return rat;
+struct vpx_rational arg_parse_rational(const struct arg *arg) {
+ long int rawval;
+ char *endptr;
+ struct vpx_rational rat;
+
+ /* parse numerator */
+ rawval = strtol(arg->val, &endptr, 10);
+
+ if (arg->val[0] != '\0' && endptr[0] == '/') {
+ if (rawval >= INT_MIN && rawval <= INT_MAX)
+ rat.num = rawval;
+ else die("Option %s: Value %ld out of range for signed int\n",
+ arg->name, rawval);
+ } else die("Option %s: Expected / at '%c'\n", arg->name, *endptr);
+
+ /* parse denominator */
+ rawval = strtol(endptr + 1, &endptr, 10);
+
+ if (arg->val[0] != '\0' && endptr[0] == '\0') {
+ if (rawval >= INT_MIN && rawval <= INT_MAX)
+ rat.den = rawval;
+ else die("Option %s: Value %ld out of range for signed int\n",
+ arg->name, rawval);
+ } else die("Option %s: Invalid character '%c'\n", arg->name, *endptr);
+
+ return rat;
}
-int arg_parse_enum(const struct arg *arg)
-{
- const struct arg_enum_list *listptr;
- long int rawval;
- char *endptr;
-
- /* First see if the value can be parsed as a raw value */
- rawval = strtol(arg->val, &endptr, 10);
- if (arg->val[0] != '\0' && endptr[0] == '\0')
- {
- /* Got a raw value, make sure it's valid */
- for(listptr = arg->def->enums; listptr->name; listptr++)
- if(listptr->val == rawval)
- return rawval;
- }
+int arg_parse_enum(const struct arg *arg) {
+ const struct arg_enum_list *listptr;
+ long int rawval;
+ char *endptr;
- /* Next see if it can be parsed as a string */
- for(listptr = arg->def->enums; listptr->name; listptr++)
- if(!strcmp(arg->val, listptr->name))
- return listptr->val;
+ /* First see if the value can be parsed as a raw value */
+ rawval = strtol(arg->val, &endptr, 10);
+ if (arg->val[0] != '\0' && endptr[0] == '\0') {
+ /* Got a raw value, make sure it's valid */
+ for (listptr = arg->def->enums; listptr->name; listptr++)
+ if (listptr->val == rawval)
+ return rawval;
+ }
- die("Option %s: Invalid value '%s'\n", arg->name, arg->val);
- return 0;
+ /* Next see if it can be parsed as a string */
+ for (listptr = arg->def->enums; listptr->name; listptr++)
+ if (!strcmp(arg->val, listptr->name))
+ return listptr->val;
+
+ die("Option %s: Invalid value '%s'\n", arg->name, arg->val);
+ return 0;
}
-int arg_parse_enum_or_int(const struct arg *arg)
-{
- if(arg->def->enums)
- return arg_parse_enum(arg);
- return arg_parse_int(arg);
+int arg_parse_enum_or_int(const struct arg *arg) {
+ if (arg->def->enums)
+ return arg_parse_enum(arg);
+ return arg_parse_int(arg);
}
diff --git a/libvpx/args.h b/libvpx/args.h
index 7963fa6..ad591af 100644
--- a/libvpx/args.h
+++ b/libvpx/args.h
@@ -13,29 +13,26 @@
#define ARGS_H
#include <stdio.h>
-struct arg
-{
- char **argv;
- const char *name;
- const char *val;
- unsigned int argv_step;
- const struct arg_def *def;
+struct arg {
+ char **argv;
+ const char *name;
+ const char *val;
+ unsigned int argv_step;
+ const struct arg_def *def;
};
-struct arg_enum_list
-{
- const char *name;
- int val;
+struct arg_enum_list {
+ const char *name;
+ int val;
};
#define ARG_ENUM_LIST_END {0}
-typedef struct arg_def
-{
- const char *short_name;
- const char *long_name;
- int has_val;
- const char *desc;
- const struct arg_enum_list *enums;
+typedef struct arg_def {
+ const char *short_name;
+ const char *long_name;
+ int has_val;
+ const char *desc;
+ const struct arg_enum_list *enums;
} arg_def_t;
#define ARG_DEF(s,l,v,d) {s,l,v,d, NULL}
#define ARG_DEF_ENUM(s,l,v,d,e) {s,l,v,d,e}
diff --git a/libvpx/build/arm-msvs/obj_int_extract.bat b/libvpx/build/arm-msvs/obj_int_extract.bat
new file mode 100644
index 0000000..147342d
--- /dev/null
+++ b/libvpx/build/arm-msvs/obj_int_extract.bat
@@ -0,0 +1,25 @@
+REM Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+REM
+REM Use of this source code is governed by a BSD-style license
+REM that can be found in the LICENSE file in the root of the source
+REM tree. An additional intellectual property rights grant can be found
+REM in the file PATENTS. All contributing project authors may
+REM be found in the AUTHORS file in the root of the source tree.
+echo on
+
+cl /I "./" /I "%1" /nologo /c /DWINAPI_FAMILY=WINAPI_FAMILY_PHONE_APP "%1/vp9/common/vp9_asm_com_offsets.c"
+cl /I "./" /I "%1" /nologo /c /DWINAPI_FAMILY=WINAPI_FAMILY_PHONE_APP "%1/vp9/decoder/vp9_asm_dec_offsets.c"
+cl /I "./" /I "%1" /nologo /c /DWINAPI_FAMILY=WINAPI_FAMILY_PHONE_APP "%1/vp9/encoder/vp9_asm_enc_offsets.c"
+obj_int_extract.exe rvds "vp9_asm_com_offsets.obj" > "vp9_asm_com_offsets.asm"
+obj_int_extract.exe rvds "vp9_asm_dec_offsets.obj" > "vp9_asm_dec_offsets.asm"
+obj_int_extract.exe rvds "vp9_asm_enc_offsets.obj" > "vp9_asm_enc_offsets.asm"
+
+cl /I "./" /I "%1" /nologo /c /DWINAPI_FAMILY=WINAPI_FAMILY_PHONE_APP "%1/vp8/common/vp8_asm_com_offsets.c"
+cl /I "./" /I "%1" /nologo /c /DWINAPI_FAMILY=WINAPI_FAMILY_PHONE_APP "%1/vp8/decoder/vp8_asm_dec_offsets.c"
+cl /I "./" /I "%1" /nologo /c /DWINAPI_FAMILY=WINAPI_FAMILY_PHONE_APP "%1/vp8/encoder/vp8_asm_enc_offsets.c"
+obj_int_extract.exe rvds "vp8_asm_com_offsets.obj" > "vp8_asm_com_offsets.asm"
+obj_int_extract.exe rvds "vp8_asm_dec_offsets.obj" > "vp8_asm_dec_offsets.asm"
+obj_int_extract.exe rvds "vp8_asm_enc_offsets.obj" > "vp8_asm_enc_offsets.asm"
+
+cl /I "./" /I "%1" /nologo /c /DWINAPI_FAMILY=WINAPI_FAMILY_PHONE_APP "%1/vpx_scale/vpx_scale_asm_offsets.c"
+obj_int_extract.exe rvds "vpx_scale_asm_offsets.obj" > "vpx_scale_asm_offsets.asm"
diff --git a/libvpx/build/make/Android.mk b/libvpx/build/make/Android.mk
index d54639a..1ff0884 100644
--- a/libvpx/build/make/Android.mk
+++ b/libvpx/build/make/Android.mk
@@ -27,7 +27,7 @@
# Android.mk file in the libvpx directory:
# LOCAL_PATH := $(call my-dir)
# include $(CLEAR_VARS)
-# include libvpx/build/make/Android.mk
+# include jni/libvpx/build/make/Android.mk
#
# There are currently two TARGET_ARCH_ABI targets for ARM.
# armeabi and armeabi-v7a. armeabi-v7a is selected by creating an
@@ -48,7 +48,7 @@
# Running ndk-build will build libvpx and include it in your project.
#
-CONFIG_DIR := $(LOCAL_PATH)
+CONFIG_DIR := $(LOCAL_PATH)/
LIBVPX_PATH := $(LOCAL_PATH)/libvpx
ASM_CNV_PATH_LOCAL := $(TARGET_ARCH_ABI)/ads2gas
ASM_CNV_PATH := $(LOCAL_PATH)/$(ASM_CNV_PATH_LOCAL)
@@ -56,9 +56,9 @@ ASM_CNV_PATH := $(LOCAL_PATH)/$(ASM_CNV_PATH_LOCAL)
# Makefiles created by the libvpx configure process
# This will need to be fixed to handle x86.
ifeq ($(TARGET_ARCH_ABI),armeabi-v7a)
- include $(CONFIG_DIR)/libs-armv7-android-gcc.mk
+ include $(CONFIG_DIR)libs-armv7-android-gcc.mk
else
- include $(CONFIG_DIR)/libs-armv5te-android-gcc.mk
+ include $(CONFIG_DIR)libs-armv5te-android-gcc.mk
endif
# Rule that is normally in Makefile created by libvpx
@@ -106,26 +106,25 @@ $$(eval $$(call ev-build-file))
$(1) : $$(_OBJ) $(2)
@mkdir -p $$(dir $$@)
- @grep $(OFFSET_PATTERN) $$< | tr -d '\#' | $(CONFIG_DIR)/$(ASM_CONVERSION) > $$@
+ @grep $(OFFSET_PATTERN) $$< | tr -d '\#' | $(CONFIG_DIR)$(ASM_CONVERSION) > $$@
endef
# Use ads2gas script to convert from RVCT format to GAS format. This passes
# puts the processed file under $(ASM_CNV_PATH). Local clean rule
# to handle removing these
-ASM_CNV_OFFSETS_DEPEND = $(ASM_CNV_PATH)/asm_com_offsets.asm
-ifeq ($(CONFIG_VP8_DECODER), yes)
- ASM_CNV_OFFSETS_DEPEND += $(ASM_CNV_PATH)/asm_dec_offsets.asm
-endif
ifeq ($(CONFIG_VP8_ENCODER), yes)
- ASM_CNV_OFFSETS_DEPEND += $(ASM_CNV_PATH)/asm_enc_offsets.asm
+ ASM_CNV_OFFSETS_DEPEND += $(ASM_CNV_PATH)/vp8_asm_enc_offsets.asm
+endif
+ifeq ($(HAVE_NEON), yes)
+ ASM_CNV_OFFSETS_DEPEND += $(ASM_CNV_PATH)/vpx_scale_asm_offsets.asm
endif
.PRECIOUS: %.asm.s
$(ASM_CNV_PATH)/libvpx/%.asm.s: $(LIBVPX_PATH)/%.asm $(ASM_CNV_OFFSETS_DEPEND)
@mkdir -p $(dir $@)
- @$(CONFIG_DIR)/$(ASM_CONVERSION) <$< > $@
+ @$(CONFIG_DIR)$(ASM_CONVERSION) <$< > $@
-# For building vpx_rtcd.h, which has a rule in libs.mk
+# For building *_rtcd.h, which have rules in libs.mk
TGT_ISA:=$(word 1, $(subst -, ,$(TOOLCHAIN)))
target := libs
@@ -177,7 +176,14 @@ ifeq ($(CONFIG_RUNTIME_CPU_DETECT),yes)
LOCAL_STATIC_LIBRARIES := cpufeatures
endif
-$(foreach file, $(LOCAL_SRC_FILES), $(LOCAL_PATH)/$(file)): vpx_rtcd.h
+# Add a dependency to force generation of the RTCD files.
+ifeq ($(CONFIG_VP8), yes)
+$(foreach file, $(LOCAL_SRC_FILES), $(LOCAL_PATH)/$(file)): vp8_rtcd.h
+endif
+ifeq ($(CONFIG_VP9), yes)
+$(foreach file, $(LOCAL_SRC_FILES), $(LOCAL_PATH)/$(file)): vp9_rtcd.h
+endif
+$(foreach file, $(LOCAL_SRC_FILES), $(LOCAL_PATH)/$(file)): vpx_scale_rtcd.h
.PHONY: clean
clean:
@@ -189,23 +195,18 @@ clean:
include $(BUILD_SHARED_LIBRARY)
-$(eval $(call asm_offsets_template,\
- $(ASM_CNV_PATH)/asm_com_offsets.asm, \
- $(LIBVPX_PATH)/vp8/common/asm_com_offsets.c))
-
-ifeq ($(CONFIG_VP8_DECODER), yes)
+ifeq ($(HAVE_NEON), yes)
$(eval $(call asm_offsets_template,\
- $(ASM_CNV_PATH)/asm_dec_offsets.asm, \
- $(LIBVPX_PATH)/vp8/decoder/asm_dec_offsets.c))
+ $(ASM_CNV_PATH)/vpx_scale_asm_offsets.asm, \
+ $(LIBVPX_PATH)/vpx_scale/vpx_scale_asm_offsets.c))
endif
ifeq ($(CONFIG_VP8_ENCODER), yes)
$(eval $(call asm_offsets_template,\
- $(ASM_CNV_PATH)/asm_enc_offsets.asm, \
- $(LIBVPX_PATH)/vp8/encoder/asm_enc_offsets.c))
+ $(ASM_CNV_PATH)/vp8_asm_enc_offsets.asm, \
+ $(LIBVPX_PATH)/vp8/encoder/vp8_asm_enc_offsets.c))
endif
ifeq ($(CONFIG_RUNTIME_CPU_DETECT),yes)
$(call import-module,cpufeatures)
endif
-
diff --git a/libvpx/build/make/Makefile b/libvpx/build/make/Makefile
index 1088c84..7a25239 100644
--- a/libvpx/build/make/Makefile
+++ b/libvpx/build/make/Makefile
@@ -74,7 +74,7 @@ HOSTCC?=gcc
TGT_ISA:=$(word 1, $(subst -, ,$(TOOLCHAIN)))
TGT_OS:=$(word 2, $(subst -, ,$(TOOLCHAIN)))
TGT_CC:=$(word 3, $(subst -, ,$(TOOLCHAIN)))
-quiet:=$(if $(verbose),,yes)
+quiet:=$(if $(or $(verbose), $(V)),, yes)
qexec=$(if $(quiet),@)
# Cancel built-in implicit rules
@@ -103,6 +103,18 @@ test::
.PHONY: testdata
testdata::
+# Add compiler flags for intrinsic files
+$(BUILD_PFX)%_mmx.c.d: CFLAGS += -mmmx
+$(BUILD_PFX)%_mmx.c.o: CFLAGS += -mmmx
+$(BUILD_PFX)%_sse2.c.d: CFLAGS += -msse2
+$(BUILD_PFX)%_sse2.c.o: CFLAGS += -msse2
+$(BUILD_PFX)%_sse3.c.d: CFLAGS += -msse3
+$(BUILD_PFX)%_sse3.c.o: CFLAGS += -msse3
+$(BUILD_PFX)%_ssse3.c.d: CFLAGS += -mssse3
+$(BUILD_PFX)%_ssse3.c.o: CFLAGS += -mssse3
+$(BUILD_PFX)%_sse4.c.d: CFLAGS += -msse4.1
+$(BUILD_PFX)%_sse4.c.o: CFLAGS += -msse4.1
+
$(BUILD_PFX)%.c.d: %.c
$(if $(quiet),@echo " [DEP] $@")
$(qexec)mkdir -p $(dir $@)
@@ -253,10 +265,25 @@ $(1):
$(if $(quiet),@echo " [LD] $$@")
$(qexec)$$(LD) -shared $$(LDFLAGS) \
-Wl,--no-undefined -Wl,-soname,$$(SONAME) \
- -Wl,--version-script,$$(SO_VERSION_SCRIPT) -o $$@ \
- $$(filter %.o,$$?) $$(extralibs)
+ -Wl,--version-script,$$(EXPORTS_FILE) -o $$@ \
+ $$(filter %.o,$$^) $$(extralibs)
+endef
+
+define dl_template
+# Not using a pattern rule here because we don't want to generate empty
+# archives when they are listed as a dependency in files not responsible
+# for creating them.
+$(1):
+ $(if $(quiet),@echo " [LD] $$@")
+ $(qexec)$$(LD) -dynamiclib $$(LDFLAGS) \
+ -exported_symbols_list $$(EXPORTS_FILE) \
+ -Wl,-headerpad_max_install_names,-compatibility_version,1.0,-current_version,$$(VERSION_MAJOR) \
+ -o $$@ \
+ $$(filter %.o,$$^) $$(extralibs)
endef
+
+
define lipo_lib_template
$(1): $(addsuffix /$(1),$(FAT_ARCHS))
$(if $(quiet),@echo " [LIPO] $$@")
@@ -321,6 +348,7 @@ LIBS=$(call enabled,LIBS)
@touch $@
$(foreach lib,$(filter %_g.a,$(LIBS)),$(eval $(call archive_template,$(lib))))
$(foreach lib,$(filter %so.$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_PATCH),$(LIBS)),$(eval $(call so_template,$(lib))))
+$(foreach lib,$(filter %$(VERSION_MAJOR).dylib,$(LIBS)),$(eval $(call dl_template,$(lib))))
INSTALL-LIBS=$(call cond_enabled,CONFIG_INSTALL_LIBS,INSTALL-LIBS)
ifeq ($(MAKECMDGOALS),dist)
@@ -360,10 +388,14 @@ ifneq ($(call enabled,DIST-SRCS),)
DIST-SRCS-$(CONFIG_MSVS) += build/make/gen_msvs_sln.sh
DIST-SRCS-$(CONFIG_MSVS) += build/x86-msvs/yasm.rules
DIST-SRCS-$(CONFIG_MSVS) += build/x86-msvs/obj_int_extract.bat
+ DIST-SRCS-$(CONFIG_MSVS) += build/arm-msvs/obj_int_extract.bat
DIST-SRCS-$(CONFIG_RVCT) += build/make/armlink_adapter.sh
- # Include obj_int_extract if we use offsets from asm_*_offsets
+ # Include obj_int_extract if we use offsets from *_asm_*_offsets
DIST-SRCS-$(ARCH_ARM)$(ARCH_X86)$(ARCH_X86_64) += build/make/obj_int_extract.c
DIST-SRCS-$(ARCH_ARM) += build/make/ads2gas.pl
+ DIST-SRCS-$(ARCH_ARM) += build/make/ads2gas_apple.pl
+ DIST-SRCS-$(ARCH_ARM) += build/make/ads2armasm_ms.pl
+ DIST-SRCS-$(ARCH_ARM) += build/make/thumb.pm
DIST-SRCS-yes += $(target:-$(TOOLCHAIN)=).mk
endif
INSTALL-SRCS := $(call cond_enabled,CONFIG_INSTALL_SRCS,INSTALL-SRCS)
diff --git a/libvpx/build/make/ads2armasm_ms.pl b/libvpx/build/make/ads2armasm_ms.pl
new file mode 100755
index 0000000..1def539
--- /dev/null
+++ b/libvpx/build/make/ads2armasm_ms.pl
@@ -0,0 +1,38 @@
+#!/usr/bin/perl
+##
+## Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+##
+## Use of this source code is governed by a BSD-style license
+## that can be found in the LICENSE file in the root of the source
+## tree. An additional intellectual property rights grant can be found
+## in the file PATENTS. All contributing project authors may
+## be found in the AUTHORS file in the root of the source tree.
+##
+
+use FindBin;
+use lib $FindBin::Bin;
+use thumb;
+
+print "; This file was created from a .asm file\n";
+print "; using the ads2armasm_ms.pl script.\n";
+
+while (<STDIN>)
+{
+ undef $comment;
+ undef $line;
+
+ s/REQUIRE8//;
+ s/PRESERVE8//;
+ s/^\s*ARM\s*$//;
+ s/AREA\s+\|\|(.*)\|\|/AREA |$1|/;
+ s/qsubaddx/qsax/i;
+ s/qaddsubx/qasx/i;
+
+ thumb::FixThumbInstructions($_, 1);
+
+ s/ldrneb/ldrbne/i;
+ s/ldrneh/ldrhne/i;
+
+ print;
+}
+
diff --git a/libvpx/build/make/ads2gas.pl b/libvpx/build/make/ads2gas.pl
index 95be467..9c41901 100755
--- a/libvpx/build/make/ads2gas.pl
+++ b/libvpx/build/make/ads2gas.pl
@@ -17,9 +17,24 @@
#
# Usage: cat inputfile | perl ads2gas.pl > outputfile
#
+
+use FindBin;
+use lib $FindBin::Bin;
+use thumb;
+
+my $thumb = 0;
+
+foreach my $arg (@ARGV) {
+ $thumb = 1 if ($arg eq "-thumb");
+}
+
print "@ This file was created from a .asm file\n";
print "@ using the ads2gas.pl script.\n";
print "\t.equ DO1STROUNDING, 0\n";
+if ($thumb) {
+ print "\t.syntax unified\n";
+ print "\t.thumb\n";
+}
# Stack of procedure names.
@proc_stack = ();
@@ -151,8 +166,13 @@ while (<STDIN>)
# ALIGN directive
s/\bALIGN\b/.balign/g;
- # ARM code
- s/\sARM/.arm/g;
+ if ($thumb) {
+ # ARM code - we force everything to thumb with the declaration in the header
+ s/\sARM//g;
+ } else {
+ # ARM code
+ s/\sARM/.arm/g;
+ }
# push/pop
s/(push\s+)(r\d+)/stmdb sp\!, \{$2\}/g;
@@ -162,6 +182,10 @@ while (<STDIN>)
s/(vld1.\d+\s+)(q\d+)/$1\{$2\}/g;
s/(vtbl.\d+\s+[^,]+),([^,]+)/$1,\{$2\}/g;
+ if ($thumb) {
+ thumb::FixThumbInstructions($_, 0);
+ }
+
# eabi_attributes numerical equivalents can be found in the
# "ARM IHI 0045C" document.
diff --git a/libvpx/build/make/ads2gas_apple.pl b/libvpx/build/make/ads2gas_apple.pl
index 81280bf..51e6fbc 100755
--- a/libvpx/build/make/ads2gas_apple.pl
+++ b/libvpx/build/make/ads2gas_apple.pl
@@ -10,12 +10,12 @@
##
-# ads2gas.pl
+# ads2gas_apple.pl
# Author: Eric Fung (efung (at) acm.org)
#
# Convert ARM Developer Suite 1.0.1 syntax assembly source to GNU as format
#
-# Usage: cat inputfile | perl ads2gas.pl > outputfile
+# Usage: cat inputfile | perl ads2gas_apple.pl > outputfile
#
print "@ This file was created from a .asm file\n";
print "@ using the ads2gas_apple.pl script.\n\n";
diff --git a/libvpx/build/make/configure.sh b/libvpx/build/make/configure.sh
index 05bbabe..ee4493d 100755
--- a/libvpx/build/make/configure.sh
+++ b/libvpx/build/make/configure.sh
@@ -88,6 +88,7 @@ Build options:
${toggle_debug} enable/disable debug mode
${toggle_gprof} enable/disable gprof profiling instrumentation
${toggle_gcov} enable/disable gcov coverage instrumentation
+ ${toggle_thumb} enable/disable building arm assembly in thumb mode
Install options:
${toggle_install_docs} control whether docs are installed
@@ -265,18 +266,20 @@ else
fi
TMP_H="${TMPDIRx}/vpx-conf-$$-${RANDOM}.h"
TMP_C="${TMPDIRx}/vpx-conf-$$-${RANDOM}.c"
+TMP_CC="${TMPDIRx}/vpx-conf-$$-${RANDOM}.cc"
TMP_O="${TMPDIRx}/vpx-conf-$$-${RANDOM}.o"
TMP_X="${TMPDIRx}/vpx-conf-$$-${RANDOM}.x"
TMP_ASM="${TMPDIRx}/vpx-conf-$$-${RANDOM}.asm"
clean_temp_files() {
- rm -f ${TMP_C} ${TMP_H} ${TMP_O} ${TMP_X} ${TMP_ASM}
+ rm -f ${TMP_C} ${TMP_CC} ${TMP_H} ${TMP_O} ${TMP_X} ${TMP_ASM}
}
#
# Toolchain Check Functions
#
check_cmd() {
+ enabled external_build && return
log "$@"
"$@" >>${logfile} 2>&1
}
@@ -290,9 +293,9 @@ check_cc() {
check_cxx() {
log check_cxx "$@"
- cat >${TMP_C}
- log_file ${TMP_C}
- check_cmd ${CXX} ${CXXFLAGS} "$@" -c -o ${TMP_O} ${TMP_C}
+ cat >${TMP_CC}
+ log_file ${TMP_CC}
+ check_cmd ${CXX} ${CXXFLAGS} "$@" -c -o ${TMP_O} ${TMP_CC}
}
check_cpp() {
@@ -414,6 +417,8 @@ SRC_PATH_BARE=$source_path
BUILD_PFX=${BUILD_PFX}
TOOLCHAIN=${toolchain}
ASM_CONVERSION=${asm_conversion_cmd:-${source_path}/build/make/ads2gas.pl}
+GEN_VCPROJ=${gen_vcproj_cmd}
+MSVS_ARCH_DIR=${msvs_arch_dir}
CC=${CC}
CXX=${CXX}
@@ -431,14 +436,15 @@ ASFLAGS = ${ASFLAGS}
extralibs = ${extralibs}
AS_SFX = ${AS_SFX:-.asm}
EXE_SFX = ${EXE_SFX}
+VCPROJ_SFX = ${VCPROJ_SFX}
RTCD_OPTIONS = ${RTCD_OPTIONS}
EOF
if enabled rvct; then cat >> $1 << EOF
-fmt_deps = sed -e 's;^__image.axf;\$(dir \$@)\$(notdir \$<).o \$@;' #hide
+fmt_deps = sed -e 's;^__image.axf;\${@:.d=.o} \$@;' #hide
EOF
else cat >> $1 << EOF
-fmt_deps = sed -e 's;^\([a-zA-Z0-9_]*\)\.o;\$(dir \$@)\1\$(suffix \$<).o \$@;'
+fmt_deps = sed -e 's;^\([a-zA-Z0-9_]*\)\.o;\${@:.d=.o} \$@;'
EOF
fi
@@ -459,6 +465,7 @@ write_common_target_config_h() {
#ifndef VPX_CONFIG_H
#define VPX_CONFIG_H
#define RESTRICT ${RESTRICT}
+#define INLINE ${INLINE}
EOF
print_config_h ARCH "${TMP_H}" ${ARCH_LIST}
print_config_h HAVE "${TMP_H}" ${HAVE_LIST}
@@ -596,8 +603,13 @@ process_common_toolchain() {
armv6*)
tgt_isa=armv6
;;
+ armv7*-hardfloat*)
+ tgt_isa=armv7
+ float_abi=hard
+ ;;
armv7*)
tgt_isa=armv7
+ float_abi=softfp
;;
armv5te*)
tgt_isa=armv5te
@@ -641,6 +653,9 @@ process_common_toolchain() {
tgt_isa=x86_64
tgt_os=darwin12
;;
+ x86_64*mingw32*)
+ tgt_os=win64
+ ;;
*mingw32*|*cygwin*)
[ -z "$tgt_isa" ] && tgt_isa=x86
tgt_os=win32
@@ -767,6 +782,7 @@ process_common_toolchain() {
;;
armv5te)
soft_enable edsp
+ disable fast_unaligned
;;
esac
@@ -782,8 +798,15 @@ process_common_toolchain() {
check_add_asflags --defsym ARCHITECTURE=${arch_int}
tune_cflags="-mtune="
if [ ${tgt_isa} == "armv7" ]; then
- check_add_cflags -march=armv7-a -mfloat-abi=softfp
- check_add_asflags -march=armv7-a -mfloat-abi=softfp
+ if [ -z "${float_abi}" ]; then
+ check_cpp <<EOF && float_abi=hard || float_abi=softfp
+#ifndef __ARM_PCS_VFP
+#error "not hardfp"
+#endif
+EOF
+ fi
+ check_add_cflags -march=armv7-a -mfloat-abi=${float_abi}
+ check_add_asflags -march=armv7-a -mfloat-abi=${float_abi}
if enabled neon
then
@@ -801,6 +824,18 @@ process_common_toolchain() {
enabled debug && add_asflags -g
asm_conversion_cmd="${source_path}/build/make/ads2gas.pl"
+ if enabled thumb; then
+ asm_conversion_cmd="$asm_conversion_cmd -thumb"
+ check_add_cflags -mthumb
+ check_add_asflags -mthumb -mimplicit-it=always
+ fi
+ ;;
+ vs*)
+ asm_conversion_cmd="${source_path}/build/make/ads2armasm_ms.pl"
+ AS_SFX=.s
+ msvs_arch_dir=arm-msvs
+ disable multithread
+ disable unit_tests
;;
rvct)
CC=armcc
@@ -906,7 +941,7 @@ process_common_toolchain() {
add_ldflags -arch_only ${tgt_isa}
if [ -z "${alt_libc}" ]; then
- alt_libc=${SDK_PATH}/SDKs/iPhoneOS5.1.sdk
+ alt_libc=${SDK_PATH}/SDKs/iPhoneOS6.0.sdk
fi
add_cflags "-isysroot ${alt_libc}"
@@ -994,13 +1029,6 @@ process_common_toolchain() {
#error "not x32"
#endif
EOF
- soft_enable runtime_cpu_detect
- soft_enable mmx
- soft_enable sse
- soft_enable sse2
- soft_enable sse3
- soft_enable ssse3
- soft_enable sse4_1
case ${tgt_os} in
win*)
@@ -1042,18 +1070,33 @@ EOF
add_ldflags -m${bits}
link_with_cc=gcc
tune_cflags="-march="
- setup_gnu_toolchain
+ setup_gnu_toolchain
#for 32 bit x86 builds, -O3 did not turn on this flag
- enabled optimizations && check_add_cflags -fomit-frame-pointer
+ enabled optimizations && disabled gprof && check_add_cflags -fomit-frame-pointer
;;
vs*)
# When building with Microsoft Visual Studio the assembler is
# invoked directly. Checking at configure time is unnecessary.
# Skip the check by setting AS arbitrarily
AS=msvs
+ msvs_arch_dir=x86-msvs
;;
esac
+ soft_enable runtime_cpu_detect
+ soft_enable mmx
+ soft_enable sse
+ soft_enable sse2
+ soft_enable sse3
+ soft_enable ssse3
+ # We can't use 'check_cflags' until the compiler is configured and CC is
+ # populated.
+ if enabled gcc && ! disabled sse4_1 && ! check_cflags -msse4; then
+ RTCD_OPTIONS="${RTCD_OPTIONS}--disable-sse4_1 "
+ else
+ soft_enable sse4_1
+ fi
+
case "${AS}" in
auto|"")
which nasm >/dev/null 2>&1 && AS=nasm
@@ -1069,12 +1112,14 @@ EOF
win32)
add_asflags -f win32
enabled debug && add_asflags -g cv8
+ EXE_SFX=.exe
;;
win64)
add_asflags -f x64
enabled debug && add_asflags -g cv8
+ EXE_SFX=.exe
;;
- linux*|solaris*)
+ linux*|solaris*|android*)
add_asflags -f elf${bits}
enabled debug && [ "${AS}" = yasm ] && add_asflags -g dwarf2
enabled debug && [ "${AS}" = nasm ] && add_asflags -g
@@ -1154,6 +1199,14 @@ EOF
[ -f "${TMP_O}" ] && od -A n -t x1 "${TMP_O}" | tr -d '\n' |
grep '4f *32 *42 *45' >/dev/null 2>&1 && enable big_endian
+ # Try to find which inline keywords are supported
+ check_cc <<EOF && INLINE="inline"
+ static inline function() {}
+EOF
+ check_cc <<EOF && INLINE="__inline__ __attribute__((always_inline))"
+ static __attribute__((always_inline)) function() {}
+EOF
+
# Almost every platform uses pthreads.
if enabled multithread; then
case ${toolchain} in
@@ -1175,9 +1228,6 @@ EOF
;;
esac
- # for sysconf(3) and friends.
- check_header unistd.h
-
# glibc needs these
if enabled linux; then
add_cflags -D_LARGEFILE_SOURCE
diff --git a/libvpx/build/make/gen_msvs_proj.sh b/libvpx/build/make/gen_msvs_proj.sh
index 6d42941..cff27c8 100755
--- a/libvpx/build/make/gen_msvs_proj.sh
+++ b/libvpx/build/make/gen_msvs_proj.sh
@@ -26,6 +26,7 @@ Options:
--help Print this message
--exe Generate a project for building an Application
--lib Generate a project for creating a static library
+ --dll Generate a project for creating a dll
--static-crt Use the static C runtime (/MT)
--target=isa-os-cc Target specifier (required)
--out=filename Write output to a file [stdout]
@@ -142,7 +143,9 @@ generate_filter() {
if [ "${f##*.}" == "$pat" ]; then
unset file_list[i]
+ objf=$(echo ${f%.*}.obj | sed -e 's/^[\./]\+//g' -e 's,/,_,g')
open_tag File RelativePath="./$f"
+
if [ "$pat" == "asm" ] && $asm_use_custom_step; then
for plat in "${platforms[@]}"; do
for cfg in Debug Release; do
@@ -152,14 +155,27 @@ generate_filter() {
tag Tool \
Name="VCCustomBuildTool" \
Description="Assembling \$(InputFileName)" \
- CommandLine="$(eval echo \$asm_${cfg}_cmdline)" \
- Outputs="\$(InputName).obj" \
+ CommandLine="$(eval echo \$asm_${cfg}_cmdline) -o \$(IntDir)$objf" \
+ Outputs="\$(IntDir)$objf" \
close_tag FileConfiguration
done
done
fi
+ if [ "$pat" == "c" ] || [ "$pat" == "cc" ] ; then
+ for plat in "${platforms[@]}"; do
+ for cfg in Debug Release; do
+ open_tag FileConfiguration \
+ Name="${cfg}|${plat}" \
+ tag Tool \
+ Name="VCCLCompilerTool" \
+ ObjectFile="\$(IntDir)$objf" \
+
+ close_tag FileConfiguration
+ done
+ done
+ fi
close_tag File
break
@@ -190,6 +206,8 @@ for opt in "$@"; do
;;
--exe) proj_kind="exe"
;;
+ --dll) proj_kind="dll"
+ ;;
--lib) proj_kind="lib"
;;
--src-path-bare=*) src_path_bare="$optval"
@@ -242,10 +260,15 @@ uses_asm=${uses_asm:-false}
case "${vs_ver:-8}" in
7) vs_ver_id="7.10"
asm_use_custom_step=$uses_asm
+ warn_64bit='Detect64BitPortabilityProblems=true'
;;
8) vs_ver_id="8.00"
+ asm_use_custom_step=$uses_asm
+ warn_64bit='Detect64BitPortabilityProblems=true'
;;
9) vs_ver_id="9.00"
+ asm_use_custom_step=$uses_asm
+ warn_64bit='Detect64BitPortabilityProblems=false'
;;
esac
@@ -284,10 +307,11 @@ esac
case "$target" in
x86_64*)
platforms[0]="x64"
+ asm_Debug_cmdline="yasm -Xvc -g cv8 -f \$(PlatformName) ${yasmincs} &quot;\$(InputPath)&quot;"
+ asm_Release_cmdline="yasm -Xvc -f \$(PlatformName) ${yasmincs} &quot;\$(InputPath)&quot;"
;;
x86*)
platforms[0]="Win32"
- # these are only used by vs7
asm_Debug_cmdline="yasm -Xvc -g cv8 -f \$(PlatformName) ${yasmincs} &quot;\$(InputPath)&quot;"
asm_Release_cmdline="yasm -Xvc -f \$(PlatformName) ${yasmincs} &quot;\$(InputPath)&quot;"
;;
@@ -299,6 +323,8 @@ generate_vcproj() {
case "$proj_kind" in
exe) vs_ConfigurationType=1
;;
+ dll) vs_ConfigurationType=2
+ ;;
*) vs_ConfigurationType=4
;;
esac
@@ -318,13 +344,6 @@ generate_vcproj() {
done
close_tag Platforms
- open_tag ToolFiles
- case "$target" in
- x86*) $uses_asm && tag ToolFile RelativePath="$self_dirname/../x86-msvs/yasm.rules"
- ;;
- esac
- close_tag ToolFiles
-
open_tag Configurations
for plat in "${platforms[@]}"; do
plat_no_ws=`echo $plat | sed 's/[^A-Za-z0-9_]/_/g'`
@@ -346,8 +365,8 @@ generate_vcproj() {
PreprocessorDefinitions="WIN32;DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;_CRT_SECURE_NO_DEPRECATE" \
RuntimeLibrary="$debug_runtime" \
WarningLevel="3" \
- Detect64BitPortabilityProblems="true" \
DebugInformationFormat="1" \
+ $warn_64bit \
;;
vpx)
tag Tool \
@@ -363,7 +382,7 @@ generate_vcproj() {
UsePrecompiledHeader="0" \
WarningLevel="3" \
DebugInformationFormat="1" \
- Detect64BitPortabilityProblems="true" \
+ $warn_64bit \
$uses_asm && tag Tool Name="YASM" IncludePaths="$incs" Debug="true"
;;
@@ -377,7 +396,7 @@ generate_vcproj() {
UsePrecompiledHeader="0" \
WarningLevel="3" \
DebugInformationFormat="1" \
- Detect64BitPortabilityProblems="true" \
+ $warn_64bit \
$uses_asm && tag Tool Name="YASM" IncludePaths="$incs" Debug="true"
;;
@@ -454,8 +473,8 @@ generate_vcproj() {
RuntimeLibrary="$release_runtime" \
UsePrecompiledHeader="0" \
WarningLevel="3" \
- Detect64BitPortabilityProblems="true" \
DebugInformationFormat="0" \
+ $warn_64bit \
;;
vpx)
tag Tool \
@@ -472,7 +491,7 @@ generate_vcproj() {
UsePrecompiledHeader="0" \
WarningLevel="3" \
DebugInformationFormat="0" \
- Detect64BitPortabilityProblems="true" \
+ $warn_64bit \
$uses_asm && tag Tool Name="YASM" IncludePaths="$incs"
;;
@@ -487,7 +506,7 @@ generate_vcproj() {
UsePrecompiledHeader="0" \
WarningLevel="3" \
DebugInformationFormat="0" \
- Detect64BitPortabilityProblems="true" \
+ $warn_64bit \
$uses_asm && tag Tool Name="YASM" IncludePaths="$incs"
;;
diff --git a/libvpx/build/make/gen_msvs_sln.sh b/libvpx/build/make/gen_msvs_sln.sh
index 240678b..5a8c793 100755
--- a/libvpx/build/make/gen_msvs_sln.sh
+++ b/libvpx/build/make/gen_msvs_sln.sh
@@ -25,7 +25,7 @@ files.
Options:
--help Print this message
--out=outfile Redirect output to a file
- --ver=version Version (7,8,9) of visual studio to generate for
+ --ver=version Version (7,8,9,10,11) of visual studio to generate for
--target=isa-os-cc Target specifier
EOF
exit 1
@@ -55,14 +55,19 @@ indent_pop() {
parse_project() {
local file=$1
- local name=`grep Name "$file" | awk 'BEGIN {FS="\""}{if (NR==1) print $2}'`
- local guid=`grep ProjectGUID "$file" | awk 'BEGIN {FS="\""}{if (NR==1) print $2}'`
+ if [ "$sfx" = "vcproj" ]; then
+ local name=`grep Name "$file" | awk 'BEGIN {FS="\""}{if (NR==1) print $2}'`
+ local guid=`grep ProjectGUID "$file" | awk 'BEGIN {FS="\""}{if (NR==1) print $2}'`
+ else
+ local name=`grep RootNamespace "$file" | sed 's,.*<.*>\(.*\)</.*>.*,\1,'`
+ local guid=`grep ProjectGuid "$file" | sed 's,.*<.*>\(.*\)</.*>.*,\1,'`
+ fi
# save the project GUID to a varaible, normalizing to the basename of the
# vcproj file without the extension
local var
var=${file##*/}
- var=${var%%.vcproj}
+ var=${var%%.${sfx}}
eval "${var}_file=\"$1\""
eval "${var}_name=$name"
eval "${var}_guid=$guid"
@@ -83,14 +88,14 @@ process_project() {
# vcproj file without the extension
local var
var=${file##*/}
- var=${var%%.vcproj}
+ var=${var%%.${sfx}}
eval "${var}_guid=$guid"
echo "Project(\"{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}\") = \"$name\", \"$file\", \"$guid\""
indent_push
eval "local deps=\"\${${var}_deps}\""
- if [ -n "$deps" ]; then
+ if [ -n "$deps" ] && [ "$sfx" = "vcproj" ]; then
echo "${indent}ProjectSection(ProjectDependencies) = postProject"
indent_push
@@ -221,7 +226,7 @@ for opt in "$@"; do
;;
--ver=*) vs_ver="$optval"
case $optval in
- [789])
+ [789]|10|11)
;;
*) die Unrecognized Visual Studio Version in $opt
;;
@@ -257,6 +262,20 @@ case "${vs_ver:-8}" in
9) sln_vers="10.00"
sln_vers_str="Visual Studio 2008"
;;
+ 10) sln_vers="11.00"
+ sln_vers_str="Visual Studio 2010"
+ ;;
+ 11) sln_vers="12.00"
+ sln_vers_str="Visual Studio 2012"
+ ;;
+esac
+case "${vs_ver:-8}" in
+ [789])
+ sfx=vcproj
+ ;;
+ 10|11)
+ sfx=vcxproj
+ ;;
esac
for f in "${file_list[@]}"; do
diff --git a/libvpx/build/make/gen_msvs_vcxproj.sh b/libvpx/build/make/gen_msvs_vcxproj.sh
new file mode 100755
index 0000000..4875915
--- /dev/null
+++ b/libvpx/build/make/gen_msvs_vcxproj.sh
@@ -0,0 +1,530 @@
+#!/bin/bash
+##
+## Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+##
+## Use of this source code is governed by a BSD-style license
+## that can be found in the LICENSE file in the root of the source
+## tree. An additional intellectual property rights grant can be found
+## in the file PATENTS. All contributing project authors may
+## be found in the AUTHORS file in the root of the source tree.
+##
+
+
+self=$0
+self_basename=${self##*/}
+self_dirname=$(dirname "$0")
+EOL=$'\n'
+
+show_help() {
+ cat <<EOF
+Usage: ${self_basename} --name=projname [options] file1 [file2 ...]
+
+This script generates a Visual Studio project file from a list of source
+code files.
+
+Options:
+ --help Print this message
+ --exe Generate a project for building an Application
+ --lib Generate a project for creating a static library
+ --dll Generate a project for creating a dll
+ --static-crt Use the static C runtime (/MT)
+ --target=isa-os-cc Target specifier (required)
+ --out=filename Write output to a file [stdout]
+ --name=project_name Name of the project (required)
+ --proj-guid=GUID GUID to use for the project
+ --module-def=filename File containing export definitions (for DLLs)
+ --ver=version Version (10,11) of visual studio to generate for
+ --src-path-bare=dir Path to root of source tree
+ -Ipath/to/include Additional include directories
+ -DFLAG[=value] Preprocessor macros to define
+ -Lpath/to/lib Additional library search paths
+ -llibname Library to link against
+EOF
+ exit 1
+}
+
+die() {
+ echo "${self_basename}: $@" >&2
+ exit 1
+}
+
+die_unknown(){
+ echo "Unknown option \"$1\"." >&2
+ echo "See ${self_basename} --help for available options." >&2
+ exit 1
+}
+
+generate_uuid() {
+ local hex="0123456789ABCDEF"
+ local i
+ local uuid=""
+ local j
+ #93995380-89BD-4b04-88EB-625FBE52EBFB
+ for ((i=0; i<32; i++)); do
+ (( j = $RANDOM % 16 ))
+ uuid="${uuid}${hex:$j:1}"
+ done
+ echo "${uuid:0:8}-${uuid:8:4}-${uuid:12:4}-${uuid:16:4}-${uuid:20:12}"
+}
+
+indent1=" "
+indent=""
+indent_push() {
+ indent="${indent}${indent1}"
+}
+indent_pop() {
+ indent="${indent%${indent1}}"
+}
+
+tag_attributes() {
+ for opt in "$@"; do
+ optval="${opt#*=}"
+ [ -n "${optval}" ] ||
+ die "Missing attribute value in '$opt' while generating $tag tag"
+ echo "${indent}${opt%%=*}=\"${optval}\""
+ done
+}
+
+open_tag() {
+ local tag=$1
+ shift
+ if [ $# -ne 0 ]; then
+ echo "${indent}<${tag}"
+ indent_push
+ tag_attributes "$@"
+ echo "${indent}>"
+ else
+ echo "${indent}<${tag}>"
+ indent_push
+ fi
+}
+
+close_tag() {
+ local tag=$1
+ indent_pop
+ echo "${indent}</${tag}>"
+}
+
+tag() {
+ local tag=$1
+ shift
+ if [ $# -ne 0 ]; then
+ echo "${indent}<${tag}"
+ indent_push
+ tag_attributes "$@"
+ indent_pop
+ echo "${indent}/>"
+ else
+ echo "${indent}<${tag}/>"
+ fi
+}
+
+tag_content() {
+ local tag=$1
+ local content=$2
+ shift
+ shift
+ if [ $# -ne 0 ]; then
+ echo "${indent}<${tag}"
+ indent_push
+ tag_attributes "$@"
+ echo "${indent}>${content}</${tag}>"
+ indent_pop
+ else
+ echo "${indent}<${tag}>${content}</${tag}>"
+ fi
+}
+
+generate_filter() {
+ local name=$1
+ local pats=$2
+ local file_list_sz
+ local i
+ local f
+ local saveIFS="$IFS"
+ local pack
+ echo "generating filter '$name' from ${#file_list[@]} files" >&2
+ IFS=*
+
+ file_list_sz=${#file_list[@]}
+ for i in ${!file_list[@]}; do
+ f=${file_list[i]}
+ for pat in ${pats//;/$IFS}; do
+ if [ "${f##*.}" == "$pat" ]; then
+ unset file_list[i]
+
+ objf=$(echo ${f%.*}.obj | sed -e 's/^[\./]\+//g' -e 's,/,_,g')
+
+ if ([ "$pat" == "asm" ] || [ "$pat" == "s" ]) && $asm_use_custom_step; then
+ open_tag CustomBuild \
+ Include=".\\$f"
+ for plat in "${platforms[@]}"; do
+ for cfg in Debug Release; do
+ tag_content Message "Assembling %(Filename)%(Extension)" \
+ Condition="'\$(Configuration)|\$(Platform)'=='$cfg|$plat'"
+ tag_content Command "$(eval echo \$asm_${cfg}_cmdline) -o \$(IntDir)$objf" \
+ Condition="'\$(Configuration)|\$(Platform)'=='$cfg|$plat'"
+ tag_content Outputs "\$(IntDir)$objf" \
+ Condition="'\$(Configuration)|\$(Platform)'=='$cfg|$plat'"
+ done
+ done
+ close_tag CustomBuild
+ elif [ "$pat" == "c" ] || [ "$pat" == "cc" ] ; then
+ open_tag ClCompile \
+ Include=".\\$f"
+ # Separate file names with Condition?
+ tag_content ObjectFileName "\$(IntDir)$objf"
+ close_tag ClCompile
+ elif [ "$pat" == "h" ] ; then
+ tag ClInclude \
+ Include=".\\$f"
+ elif [ "$pat" == "vcxproj" ] ; then
+ open_tag ProjectReference \
+ Include="$f"
+ depguid=`grep ProjectGuid "$f" | sed 's,.*<.*>\(.*\)</.*>.*,\1,'`
+ tag_content Project "$depguid"
+ tag_content ReferenceOutputAssembly false
+ close_tag ProjectReference
+ else
+ tag None \
+ Include=".\\$f"
+ fi
+
+ break
+ fi
+ done
+ done
+
+ IFS="$saveIFS"
+}
+
+# Process command line
+unset target
+for opt in "$@"; do
+ optval="${opt#*=}"
+ case "$opt" in
+ --help|-h) show_help
+ ;;
+ --target=*) target="${optval}"
+ ;;
+ --out=*) outfile="$optval"
+ ;;
+ --name=*) name="${optval}"
+ ;;
+ --proj-guid=*) guid="${optval}"
+ ;;
+ --module-def=*) module_def="${optval}"
+ ;;
+ --exe) proj_kind="exe"
+ ;;
+ --dll) proj_kind="dll"
+ ;;
+ --lib) proj_kind="lib"
+ ;;
+ --src-path-bare=*) src_path_bare="$optval"
+ ;;
+ --static-crt) use_static_runtime=true
+ ;;
+ --ver=*)
+ vs_ver="$optval"
+ case "$optval" in
+ 10|11)
+ ;;
+ *) die Unrecognized Visual Studio Version in $opt
+ ;;
+ esac
+ ;;
+ -I*)
+ opt="${opt%/}"
+ incs="${incs}${incs:+;}${opt##-I}"
+ yasmincs="${yasmincs} ${opt}"
+ ;;
+ -D*) defines="${defines}${defines:+;}${opt##-D}"
+ ;;
+ -L*) # fudge . to $(OutDir)
+ if [ "${opt##-L}" == "." ]; then
+ libdirs="${libdirs}${libdirs:+;}\$(OutDir)"
+ else
+ # Also try directories for this platform/configuration
+ libdirs="${libdirs}${libdirs:+;}${opt##-L}"
+ libdirs="${libdirs}${libdirs:+;}${opt##-L}/\$(PlatformName)/\$(Configuration)"
+ libdirs="${libdirs}${libdirs:+;}${opt##-L}/\$(PlatformName)"
+ fi
+ ;;
+ -l*) libs="${libs}${libs:+ }${opt##-l}.lib"
+ ;;
+ -*) die_unknown $opt
+ ;;
+ *)
+ file_list[${#file_list[@]}]="$opt"
+ case "$opt" in
+ *.asm|*.s) uses_asm=true
+ ;;
+ esac
+ ;;
+ esac
+done
+outfile=${outfile:-/dev/stdout}
+guid=${guid:-`generate_uuid`}
+asm_use_custom_step=false
+uses_asm=${uses_asm:-false}
+case "${vs_ver:-11}" in
+ 10|11)
+ asm_use_custom_step=$uses_asm
+ ;;
+esac
+
+[ -n "$name" ] || die "Project name (--name) must be specified!"
+[ -n "$target" ] || die "Target (--target) must be specified!"
+
+if ${use_static_runtime:-false}; then
+ release_runtime=MultiThreaded
+ debug_runtime=MultiThreadedDebug
+ lib_sfx=mt
+else
+ release_runtime=MultiThreadedDLL
+ debug_runtime=MultiThreadedDebugDLL
+ lib_sfx=md
+fi
+
+# Calculate debug lib names: If a lib ends in ${lib_sfx}.lib, then rename
+# it to ${lib_sfx}d.lib. This precludes linking to release libs from a
+# debug exe, so this may need to be refactored later.
+for lib in ${libs}; do
+ if [ "$lib" != "${lib%${lib_sfx}.lib}" ]; then
+ lib=${lib%.lib}d.lib
+ fi
+ debug_libs="${debug_libs}${debug_libs:+ }${lib}"
+done
+debug_libs=${debug_libs// /;}
+libs=${libs// /;}
+
+
+# List of all platforms supported for this target
+case "$target" in
+ x86_64*)
+ platforms[0]="x64"
+ asm_Debug_cmdline="yasm -Xvc -g cv8 -f \$(PlatformName) ${yasmincs} &quot;%(FullPath)&quot;"
+ asm_Release_cmdline="yasm -Xvc -f \$(PlatformName) ${yasmincs} &quot;%(FullPath)&quot;"
+ ;;
+ x86*)
+ platforms[0]="Win32"
+ asm_Debug_cmdline="yasm -Xvc -g cv8 -f \$(PlatformName) ${yasmincs} &quot;%(FullPath)&quot;"
+ asm_Release_cmdline="yasm -Xvc -f \$(PlatformName) ${yasmincs} &quot;%(FullPath)&quot;"
+ ;;
+ arm*)
+ asm_Debug_cmdline="armasm -nologo &quot;%(FullPath)&quot;"
+ asm_Release_cmdline="armasm -nologo &quot;%(FullPath)&quot;"
+ if [ "$name" = "obj_int_extract" ]; then
+ # We don't want to build this tool for the target architecture,
+ # but for an architecture we can run locally during the build.
+ platforms[0]="Win32"
+ else
+ platforms[0]="ARM"
+ fi
+ ;;
+ *) die "Unsupported target $target!"
+ ;;
+esac
+
+generate_vcxproj() {
+ echo "<?xml version=\"1.0\" encoding=\"utf-8\"?>"
+ open_tag Project \
+ DefaultTargets="Build" \
+ ToolsVersion="4.0" \
+ xmlns="http://schemas.microsoft.com/developer/msbuild/2003" \
+
+ open_tag ItemGroup \
+ Label="ProjectConfigurations"
+ for plat in "${platforms[@]}"; do
+ for config in Debug Release; do
+ open_tag ProjectConfiguration \
+ Include="$config|$plat"
+ tag_content Configuration $config
+ tag_content Platform $plat
+ close_tag ProjectConfiguration
+ done
+ done
+ close_tag ItemGroup
+
+ open_tag PropertyGroup \
+ Label="Globals"
+ tag_content ProjectGuid "{${guid}}"
+ tag_content RootNamespace ${name}
+ tag_content Keyword ManagedCProj
+ close_tag PropertyGroup
+
+ tag Import \
+ Project="\$(VCTargetsPath)\\Microsoft.Cpp.Default.props"
+
+ for plat in "${platforms[@]}"; do
+ for config in Release Debug; do
+ open_tag PropertyGroup \
+ Condition="'\$(Configuration)|\$(Platform)'=='$config|$plat'" \
+ Label="Configuration"
+ if [ "$proj_kind" = "exe" ]; then
+ tag_content ConfigurationType Application
+ elif [ "$proj_kind" = "dll" ]; then
+ tag_content ConfigurationType DynamicLibrary
+ else
+ tag_content ConfigurationType StaticLibrary
+ fi
+ if [ "$vs_ver" = "11" ]; then
+ if [ "$plat" = "ARM" ]; then
+ # Setting the wp80 toolchain automatically sets the
+ # WINAPI_FAMILY define, which is required for building
+ # code for arm with the windows headers. Alternatively,
+ # one could add AppContainerApplication=true in the Globals
+ # section and add PrecompiledHeader=NotUsing and
+ # CompileAsWinRT=false in ClCompile and SubSystem=Console
+ # in Link.
+ tag_content PlatformToolset v110_wp80
+ else
+ tag_content PlatformToolset v110
+ fi
+ fi
+ tag_content CharacterSet Unicode
+ if [ "$config" = "Release" ]; then
+ tag_content WholeProgramOptimization true
+ fi
+ close_tag PropertyGroup
+ done
+ done
+
+ tag Import \
+ Project="\$(VCTargetsPath)\\Microsoft.Cpp.props"
+
+ open_tag ImportGroup \
+ Label="PropertySheets"
+ tag Import \
+ Project="\$(UserRootDir)\\Microsoft.Cpp.\$(Platform).user.props" \
+ Condition="exists('\$(UserRootDir)\\Microsoft.Cpp.\$(Platform).user.props')" \
+ Label="LocalAppDataPlatform"
+ close_tag ImportGroup
+
+ tag PropertyGroup \
+ Label="UserMacros"
+
+ for plat in "${platforms[@]}"; do
+ plat_no_ws=`echo $plat | sed 's/[^A-Za-z0-9_]/_/g'`
+ for config in Debug Release; do
+ open_tag PropertyGroup \
+ Condition="'\$(Configuration)|\$(Platform)'=='$config|$plat'"
+ tag_content OutDir "\$(SolutionDir)$plat_no_ws\\\$(Configuration)\\"
+ tag_content IntDir "$plat_no_ws\\\$(Configuration)\\${name}\\"
+ close_tag PropertyGroup
+ done
+ done
+
+ for plat in "${platforms[@]}"; do
+ for config in Debug Release; do
+ open_tag ItemDefinitionGroup \
+ Condition="'\$(Configuration)|\$(Platform)'=='$config|$plat'"
+ if [ "$name" = "vpx" ]; then
+ open_tag PreBuildEvent
+ tag_content Command "call obj_int_extract.bat $src_path_bare"
+ close_tag PreBuildEvent
+ fi
+ open_tag ClCompile
+ if [ "$config" = "Debug" ]; then
+ opt=Disabled
+ runtime=$debug_runtime
+ curlibs=$debug_libs
+ confsuffix=d
+ case "$name" in
+ obj_int_extract)
+ debug=DEBUG
+ ;;
+ *)
+ debug=_DEBUG
+ ;;
+ esac
+ else
+ opt=MaxSpeed
+ runtime=$release_runtime
+ curlibs=$libs
+ confsuffix=""
+ tag_content FavorSizeOrSpeed Speed
+ debug=NDEBUG
+ fi
+ case "$name" in
+ obj_int_extract)
+ extradefines=";_CONSOLE"
+ ;;
+ *)
+ extradefines=";$defines"
+ ;;
+ esac
+ tag_content Optimization $opt
+ tag_content AdditionalIncludeDirectories "$incs;%(AdditionalIncludeDirectories)"
+ tag_content PreprocessorDefinitions "WIN32;$debug;_CRT_SECURE_NO_WARNINGS;_CRT_SECURE_NO_DEPRECATE$extradefines;%(PreprocessorDefinitions)"
+ tag_content RuntimeLibrary $runtime
+ tag_content WarningLevel Level3
+ # DebugInformationFormat
+ close_tag ClCompile
+ case "$proj_kind" in
+ exe)
+ open_tag Link
+ if [ "$name" = "obj_int_extract" ]; then
+ tag_content OutputFile "${name}.exe"
+ else
+ tag_content AdditionalDependencies "$curlibs"
+ tag_content AdditionalLibraryDirectories "$libdirs;%(AdditionalLibraryDirectories)"
+ fi
+ tag_content GenerateDebugInformation true
+ close_tag Link
+ ;;
+ dll)
+ open_tag Link
+ tag_content GenerateDebugInformation true
+ tag_content ModuleDefinitionFile $module_def
+ close_tag Link
+ ;;
+ lib)
+ open_tag Lib
+ tag_content OutputFile "\$(OutDir)${name}${lib_sfx}${confsuffix}.lib"
+ close_tag Lib
+ ;;
+ esac
+ close_tag ItemDefinitionGroup
+ done
+
+ done
+
+ open_tag ItemGroup
+ generate_filter "Source Files" "c;cc;def;odl;idl;hpj;bat;asm;asmx;s"
+ close_tag ItemGroup
+ open_tag ItemGroup
+ generate_filter "Header Files" "h;hm;inl;inc;xsd"
+ close_tag ItemGroup
+ open_tag ItemGroup
+ generate_filter "Build Files" "mk"
+ close_tag ItemGroup
+ open_tag ItemGroup
+ generate_filter "References" "vcxproj"
+ close_tag ItemGroup
+
+ tag Import \
+ Project="\$(VCTargetsPath)\\Microsoft.Cpp.targets"
+
+ open_tag ImportGroup \
+ Label="ExtensionTargets"
+ close_tag ImportGroup
+
+ close_tag Project
+
+ # This must be done from within the {} subshell
+ echo "Ignored files list (${#file_list[@]} items) is:" >&2
+ for f in "${file_list[@]}"; do
+ echo " $f" >&2
+ done
+}
+
+# This regexp doesn't catch most of the strings in the vcxproj format,
+# since they're like <tag>path</tag> instead of <tag attr="path" />
+# as previously. It still seems to work ok despite this.
+generate_vcxproj |
+ sed -e '/"/s;\([^ "]\)/;\1\\;g' |
+ sed -e '/xmlns/s;\\;/;g' > ${outfile}
+
+exit
diff --git a/libvpx/build/make/obj_int_extract.c b/libvpx/build/make/obj_int_extract.c
index bf317bd..1604b5e 100644
--- a/libvpx/build/make/obj_int_extract.c
+++ b/libvpx/build/make/obj_int_extract.c
@@ -17,21 +17,19 @@
#include "vpx_config.h"
#include "vpx/vpx_integer.h"
-typedef enum
-{
- OUTPUT_FMT_PLAIN,
- OUTPUT_FMT_RVDS,
- OUTPUT_FMT_GAS,
+typedef enum {
+ OUTPUT_FMT_PLAIN,
+ OUTPUT_FMT_RVDS,
+ OUTPUT_FMT_GAS,
} output_fmt_t;
-int log_msg(const char *fmt, ...)
-{
- int res;
- va_list ap;
- va_start(ap, fmt);
- res = vfprintf(stderr, fmt, ap);
- va_end(ap);
- return res;
+int log_msg(const char *fmt, ...) {
+ int res;
+ va_list ap;
+ va_start(ap, fmt);
+ res = vfprintf(stderr, fmt, ap);
+ va_end(ap);
+ return res;
}
#if defined(__GNUC__) && __GNUC__
@@ -40,175 +38,148 @@ int log_msg(const char *fmt, ...)
#include <mach-o/loader.h>
#include <mach-o/nlist.h>
-int parse_macho(uint8_t *base_buf, size_t sz)
-{
- int i, j;
- struct mach_header header;
- uint8_t *buf = base_buf;
- int base_data_section = 0;
- int bits = 0;
-
- /* We can read in mach_header for 32 and 64 bit architectures
- * because it's identical to mach_header_64 except for the last
- * element (uint32_t reserved), which we don't use. Then, when
- * we know which architecture we're looking at, increment buf
- * appropriately.
- */
- memcpy(&header, buf, sizeof(struct mach_header));
-
- if (header.magic == MH_MAGIC)
- {
- if (header.cputype == CPU_TYPE_ARM
- || header.cputype == CPU_TYPE_X86)
- {
- bits = 32;
- buf += sizeof(struct mach_header);
- }
- else
- {
- log_msg("Bad cputype for object file. Currently only tested for CPU_TYPE_[ARM|X86].\n");
- goto bail;
- }
+int parse_macho(uint8_t *base_buf, size_t sz) {
+ int i, j;
+ struct mach_header header;
+ uint8_t *buf = base_buf;
+ int base_data_section = 0;
+ int bits = 0;
+
+ /* We can read in mach_header for 32 and 64 bit architectures
+ * because it's identical to mach_header_64 except for the last
+ * element (uint32_t reserved), which we don't use. Then, when
+ * we know which architecture we're looking at, increment buf
+ * appropriately.
+ */
+ memcpy(&header, buf, sizeof(struct mach_header));
+
+ if (header.magic == MH_MAGIC) {
+ if (header.cputype == CPU_TYPE_ARM
+ || header.cputype == CPU_TYPE_X86) {
+ bits = 32;
+ buf += sizeof(struct mach_header);
+ } else {
+ log_msg("Bad cputype for object file. Currently only tested for CPU_TYPE_[ARM|X86].\n");
+ goto bail;
}
- else if (header.magic == MH_MAGIC_64)
- {
- if (header.cputype == CPU_TYPE_X86_64)
- {
- bits = 64;
- buf += sizeof(struct mach_header_64);
- }
- else
- {
- log_msg("Bad cputype for object file. Currently only tested for CPU_TYPE_X86_64.\n");
- goto bail;
- }
+ } else if (header.magic == MH_MAGIC_64) {
+ if (header.cputype == CPU_TYPE_X86_64) {
+ bits = 64;
+ buf += sizeof(struct mach_header_64);
+ } else {
+ log_msg("Bad cputype for object file. Currently only tested for CPU_TYPE_X86_64.\n");
+ goto bail;
}
- else
- {
- log_msg("Bad magic number for object file. 0x%x or 0x%x expected, 0x%x found.\n",
- MH_MAGIC, MH_MAGIC_64, header.magic);
+ } else {
+ log_msg("Bad magic number for object file. 0x%x or 0x%x expected, 0x%x found.\n",
+ MH_MAGIC, MH_MAGIC_64, header.magic);
+ goto bail;
+ }
+
+ if (header.filetype != MH_OBJECT) {
+ log_msg("Bad filetype for object file. Currently only tested for MH_OBJECT.\n");
+ goto bail;
+ }
+
+ for (i = 0; i < header.ncmds; i++) {
+ struct load_command lc;
+
+ memcpy(&lc, buf, sizeof(struct load_command));
+
+ if (lc.cmd == LC_SEGMENT) {
+ uint8_t *seg_buf = buf;
+ struct section s;
+ struct segment_command seg_c;
+
+ memcpy(&seg_c, seg_buf, sizeof(struct segment_command));
+ seg_buf += sizeof(struct segment_command);
+
+ /* Although each section is given it's own offset, nlist.n_value
+ * references the offset of the first section. This isn't
+ * apparent without debug information because the offset of the
+ * data section is the same as the first section. However, with
+ * debug sections mixed in, the offset of the debug section
+ * increases but n_value still references the first section.
+ */
+ if (seg_c.nsects < 1) {
+ log_msg("Not enough sections\n");
goto bail;
- }
+ }
+
+ memcpy(&s, seg_buf, sizeof(struct section));
+ base_data_section = s.offset;
+ } else if (lc.cmd == LC_SEGMENT_64) {
+ uint8_t *seg_buf = buf;
+ struct section_64 s;
+ struct segment_command_64 seg_c;
+
+ memcpy(&seg_c, seg_buf, sizeof(struct segment_command_64));
+ seg_buf += sizeof(struct segment_command_64);
- if (header.filetype != MH_OBJECT)
- {
- log_msg("Bad filetype for object file. Currently only tested for MH_OBJECT.\n");
+ /* Explanation in LG_SEGMENT */
+ if (seg_c.nsects < 1) {
+ log_msg("Not enough sections\n");
goto bail;
- }
+ }
- for (i = 0; i < header.ncmds; i++)
- {
- struct load_command lc;
-
- memcpy(&lc, buf, sizeof(struct load_command));
-
- if (lc.cmd == LC_SEGMENT)
- {
- uint8_t *seg_buf = buf;
- struct section s;
- struct segment_command seg_c;
-
- memcpy(&seg_c, seg_buf, sizeof(struct segment_command));
- seg_buf += sizeof(struct segment_command);
-
- /* Although each section is given it's own offset, nlist.n_value
- * references the offset of the first section. This isn't
- * apparent without debug information because the offset of the
- * data section is the same as the first section. However, with
- * debug sections mixed in, the offset of the debug section
- * increases but n_value still references the first section.
- */
- if (seg_c.nsects < 1)
- {
- log_msg("Not enough sections\n");
- goto bail;
- }
+ memcpy(&s, seg_buf, sizeof(struct section_64));
+ base_data_section = s.offset;
+ } else if (lc.cmd == LC_SYMTAB) {
+ if (base_data_section != 0) {
+ struct symtab_command sc;
+ uint8_t *sym_buf = base_buf;
+ uint8_t *str_buf = base_buf;
- memcpy(&s, seg_buf, sizeof(struct section));
- base_data_section = s.offset;
- }
- else if (lc.cmd == LC_SEGMENT_64)
- {
- uint8_t *seg_buf = buf;
- struct section_64 s;
- struct segment_command_64 seg_c;
-
- memcpy(&seg_c, seg_buf, sizeof(struct segment_command_64));
- seg_buf += sizeof(struct segment_command_64);
-
- /* Explanation in LG_SEGMENT */
- if (seg_c.nsects < 1)
- {
- log_msg("Not enough sections\n");
- goto bail;
- }
+ memcpy(&sc, buf, sizeof(struct symtab_command));
- memcpy(&s, seg_buf, sizeof(struct section_64));
- base_data_section = s.offset;
- }
- else if (lc.cmd == LC_SYMTAB)
- {
- if (base_data_section != 0)
- {
- struct symtab_command sc;
- uint8_t *sym_buf = base_buf;
- uint8_t *str_buf = base_buf;
-
- memcpy(&sc, buf, sizeof(struct symtab_command));
-
- if (sc.cmdsize != sizeof(struct symtab_command))
- {
- log_msg("Can't find symbol table!\n");
- goto bail;
- }
-
- sym_buf += sc.symoff;
- str_buf += sc.stroff;
-
- for (j = 0; j < sc.nsyms; j++)
- {
- /* Location of string is cacluated each time from the
- * start of the string buffer. On darwin the symbols
- * are prefixed by "_", so we bump the pointer by 1.
- * The target value is defined as an int in asm_*_offsets.c,
- * which is 4 bytes on all targets we currently use.
- */
- if (bits == 32)
- {
- struct nlist nl;
- int val;
-
- memcpy(&nl, sym_buf, sizeof(struct nlist));
- sym_buf += sizeof(struct nlist);
-
- memcpy(&val, base_buf + base_data_section + nl.n_value,
- sizeof(val));
- printf("%-40s EQU %5d\n",
- str_buf + nl.n_un.n_strx + 1, val);
- }
- else /* if (bits == 64) */
- {
- struct nlist_64 nl;
- int val;
-
- memcpy(&nl, sym_buf, sizeof(struct nlist_64));
- sym_buf += sizeof(struct nlist_64);
-
- memcpy(&val, base_buf + base_data_section + nl.n_value,
- sizeof(val));
- printf("%-40s EQU %5d\n",
- str_buf + nl.n_un.n_strx + 1, val);
- }
- }
- }
+ if (sc.cmdsize != sizeof(struct symtab_command)) {
+ log_msg("Can't find symbol table!\n");
+ goto bail;
}
- buf += lc.cmdsize;
+ sym_buf += sc.symoff;
+ str_buf += sc.stroff;
+
+ for (j = 0; j < sc.nsyms; j++) {
+ /* Location of string is cacluated each time from the
+ * start of the string buffer. On darwin the symbols
+ * are prefixed by "_", so we bump the pointer by 1.
+ * The target value is defined as an int in *_asm_*_offsets.c,
+ * which is 4 bytes on all targets we currently use.
+ */
+ if (bits == 32) {
+ struct nlist nl;
+ int val;
+
+ memcpy(&nl, sym_buf, sizeof(struct nlist));
+ sym_buf += sizeof(struct nlist);
+
+ memcpy(&val, base_buf + base_data_section + nl.n_value,
+ sizeof(val));
+ printf("%-40s EQU %5d\n",
+ str_buf + nl.n_un.n_strx + 1, val);
+ } else { /* if (bits == 64) */
+ struct nlist_64 nl;
+ int val;
+
+ memcpy(&nl, sym_buf, sizeof(struct nlist_64));
+ sym_buf += sizeof(struct nlist_64);
+
+ memcpy(&val, base_buf + base_data_section + nl.n_value,
+ sizeof(val));
+ printf("%-40s EQU %5d\n",
+ str_buf + nl.n_un.n_strx + 1, val);
+ }
+ }
+ }
}
- return 0;
+ buf += lc.cmdsize;
+ }
+
+ return 0;
bail:
- return 1;
+ return 1;
}
@@ -216,448 +187,400 @@ bail:
#include "elf.h"
#define COPY_STRUCT(dst, buf, ofst, sz) do {\
- if(ofst + sizeof((*(dst))) > sz) goto bail;\
- memcpy(dst, buf+ofst, sizeof((*(dst))));\
- } while(0)
+ if(ofst + sizeof((*(dst))) > sz) goto bail;\
+ memcpy(dst, buf+ofst, sizeof((*(dst))));\
+ } while(0)
#define ENDIAN_ASSIGN(val, memb) do {\
- if(!elf->le_data) {log_msg("Big Endian data not supported yet!\n");goto bail;}\
- (val) = (memb);\
- } while(0)
+ if(!elf->le_data) {log_msg("Big Endian data not supported yet!\n");goto bail;}\
+ (val) = (memb);\
+ } while(0)
#define ENDIAN_ASSIGN_IN_PLACE(memb) do {\
- ENDIAN_ASSIGN(memb, memb);\
- } while(0)
-
-typedef struct
-{
- uint8_t *buf; /* Buffer containing ELF data */
- size_t sz; /* Buffer size */
- int le_data; /* Data is little-endian */
- unsigned char e_ident[EI_NIDENT]; /* Magic number and other info */
- int bits; /* 32 or 64 */
- Elf32_Ehdr hdr32;
- Elf64_Ehdr hdr64;
+ ENDIAN_ASSIGN(memb, memb);\
+ } while(0)
+
+typedef struct {
+ uint8_t *buf; /* Buffer containing ELF data */
+ size_t sz; /* Buffer size */
+ int le_data; /* Data is little-endian */
+ unsigned char e_ident[EI_NIDENT]; /* Magic number and other info */
+ int bits; /* 32 or 64 */
+ Elf32_Ehdr hdr32;
+ Elf64_Ehdr hdr64;
} elf_obj_t;
-int parse_elf_header(elf_obj_t *elf)
-{
- int res;
- /* Verify ELF Magic numbers */
- COPY_STRUCT(&elf->e_ident, elf->buf, 0, elf->sz);
- res = elf->e_ident[EI_MAG0] == ELFMAG0;
- res &= elf->e_ident[EI_MAG1] == ELFMAG1;
- res &= elf->e_ident[EI_MAG2] == ELFMAG2;
- res &= elf->e_ident[EI_MAG3] == ELFMAG3;
- res &= elf->e_ident[EI_CLASS] == ELFCLASS32
- || elf->e_ident[EI_CLASS] == ELFCLASS64;
- res &= elf->e_ident[EI_DATA] == ELFDATA2LSB;
-
- if (!res) goto bail;
-
- elf->le_data = elf->e_ident[EI_DATA] == ELFDATA2LSB;
-
- /* Read in relevant values */
- if (elf->e_ident[EI_CLASS] == ELFCLASS32)
- {
- elf->bits = 32;
- COPY_STRUCT(&elf->hdr32, elf->buf, 0, elf->sz);
-
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_type);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_machine);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_version);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_entry);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_phoff);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_shoff);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_flags);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_ehsize);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_phentsize);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_phnum);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_shentsize);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_shnum);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_shstrndx);
- }
- else /* if (elf->e_ident[EI_CLASS] == ELFCLASS64) */
- {
- elf->bits = 64;
- COPY_STRUCT(&elf->hdr64, elf->buf, 0, elf->sz);
-
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_type);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_machine);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_version);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_entry);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_phoff);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_shoff);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_flags);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_ehsize);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_phentsize);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_phnum);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_shentsize);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_shnum);
- ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_shstrndx);
- }
-
- return 0;
+int parse_elf_header(elf_obj_t *elf) {
+ int res;
+ /* Verify ELF Magic numbers */
+ COPY_STRUCT(&elf->e_ident, elf->buf, 0, elf->sz);
+ res = elf->e_ident[EI_MAG0] == ELFMAG0;
+ res &= elf->e_ident[EI_MAG1] == ELFMAG1;
+ res &= elf->e_ident[EI_MAG2] == ELFMAG2;
+ res &= elf->e_ident[EI_MAG3] == ELFMAG3;
+ res &= elf->e_ident[EI_CLASS] == ELFCLASS32
+ || elf->e_ident[EI_CLASS] == ELFCLASS64;
+ res &= elf->e_ident[EI_DATA] == ELFDATA2LSB;
+
+ if (!res) goto bail;
+
+ elf->le_data = elf->e_ident[EI_DATA] == ELFDATA2LSB;
+
+ /* Read in relevant values */
+ if (elf->e_ident[EI_CLASS] == ELFCLASS32) {
+ elf->bits = 32;
+ COPY_STRUCT(&elf->hdr32, elf->buf, 0, elf->sz);
+
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_type);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_machine);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_version);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_entry);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_phoff);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_shoff);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_flags);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_ehsize);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_phentsize);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_phnum);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_shentsize);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_shnum);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_shstrndx);
+ } else { /* if (elf->e_ident[EI_CLASS] == ELFCLASS64) */
+ elf->bits = 64;
+ COPY_STRUCT(&elf->hdr64, elf->buf, 0, elf->sz);
+
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_type);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_machine);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_version);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_entry);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_phoff);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_shoff);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_flags);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_ehsize);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_phentsize);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_phnum);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_shentsize);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_shnum);
+ ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_shstrndx);
+ }
+
+ return 0;
bail:
- log_msg("Failed to parse ELF file header");
- return 1;
+ log_msg("Failed to parse ELF file header");
+ return 1;
}
-int parse_elf_section(elf_obj_t *elf, int idx, Elf32_Shdr *hdr32, Elf64_Shdr *hdr64)
-{
- if (hdr32)
- {
- if (idx >= elf->hdr32.e_shnum)
- goto bail;
-
- COPY_STRUCT(hdr32, elf->buf, elf->hdr32.e_shoff + idx * elf->hdr32.e_shentsize,
- elf->sz);
- ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_name);
- ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_type);
- ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_flags);
- ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_addr);
- ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_offset);
- ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_size);
- ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_link);
- ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_info);
- ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_addralign);
- ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_entsize);
- }
- else /* if (hdr64) */
- {
- if (idx >= elf->hdr64.e_shnum)
- goto bail;
-
- COPY_STRUCT(hdr64, elf->buf, elf->hdr64.e_shoff + idx * elf->hdr64.e_shentsize,
- elf->sz);
- ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_name);
- ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_type);
- ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_flags);
- ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_addr);
- ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_offset);
- ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_size);
- ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_link);
- ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_info);
- ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_addralign);
- ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_entsize);
- }
+int parse_elf_section(elf_obj_t *elf, int idx, Elf32_Shdr *hdr32, Elf64_Shdr *hdr64) {
+ if (hdr32) {
+ if (idx >= elf->hdr32.e_shnum)
+ goto bail;
+
+ COPY_STRUCT(hdr32, elf->buf, elf->hdr32.e_shoff + idx * elf->hdr32.e_shentsize,
+ elf->sz);
+ ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_name);
+ ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_type);
+ ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_flags);
+ ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_addr);
+ ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_offset);
+ ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_size);
+ ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_link);
+ ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_info);
+ ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_addralign);
+ ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_entsize);
+ } else { /* if (hdr64) */
+ if (idx >= elf->hdr64.e_shnum)
+ goto bail;
- return 0;
+ COPY_STRUCT(hdr64, elf->buf, elf->hdr64.e_shoff + idx * elf->hdr64.e_shentsize,
+ elf->sz);
+ ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_name);
+ ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_type);
+ ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_flags);
+ ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_addr);
+ ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_offset);
+ ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_size);
+ ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_link);
+ ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_info);
+ ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_addralign);
+ ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_entsize);
+ }
+
+ return 0;
bail:
- return 1;
+ return 1;
}
-char *parse_elf_string_table(elf_obj_t *elf, int s_idx, int idx)
-{
- if (elf->bits == 32)
- {
- Elf32_Shdr shdr;
-
- if (parse_elf_section(elf, s_idx, &shdr, NULL))
- {
- log_msg("Failed to parse ELF string table: section %d, index %d\n",
- s_idx, idx);
- return "";
- }
+char *parse_elf_string_table(elf_obj_t *elf, int s_idx, int idx) {
+ if (elf->bits == 32) {
+ Elf32_Shdr shdr;
- return (char *)(elf->buf + shdr.sh_offset + idx);
+ if (parse_elf_section(elf, s_idx, &shdr, NULL)) {
+ log_msg("Failed to parse ELF string table: section %d, index %d\n",
+ s_idx, idx);
+ return "";
}
- else /* if (elf->bits == 64) */
- {
- Elf64_Shdr shdr;
-
- if (parse_elf_section(elf, s_idx, NULL, &shdr))
- {
- log_msg("Failed to parse ELF string table: section %d, index %d\n",
- s_idx, idx);
- return "";
- }
- return (char *)(elf->buf + shdr.sh_offset + idx);
+ return (char *)(elf->buf + shdr.sh_offset + idx);
+ } else { /* if (elf->bits == 64) */
+ Elf64_Shdr shdr;
+
+ if (parse_elf_section(elf, s_idx, NULL, &shdr)) {
+ log_msg("Failed to parse ELF string table: section %d, index %d\n",
+ s_idx, idx);
+ return "";
}
+
+ return (char *)(elf->buf + shdr.sh_offset + idx);
+ }
}
-int parse_elf_symbol(elf_obj_t *elf, unsigned int ofst, Elf32_Sym *sym32, Elf64_Sym *sym64)
-{
- if (sym32)
- {
- COPY_STRUCT(sym32, elf->buf, ofst, elf->sz);
- ENDIAN_ASSIGN_IN_PLACE(sym32->st_name);
- ENDIAN_ASSIGN_IN_PLACE(sym32->st_value);
- ENDIAN_ASSIGN_IN_PLACE(sym32->st_size);
- ENDIAN_ASSIGN_IN_PLACE(sym32->st_info);
- ENDIAN_ASSIGN_IN_PLACE(sym32->st_other);
- ENDIAN_ASSIGN_IN_PLACE(sym32->st_shndx);
- }
- else /* if (sym64) */
- {
- COPY_STRUCT(sym64, elf->buf, ofst, elf->sz);
- ENDIAN_ASSIGN_IN_PLACE(sym64->st_name);
- ENDIAN_ASSIGN_IN_PLACE(sym64->st_value);
- ENDIAN_ASSIGN_IN_PLACE(sym64->st_size);
- ENDIAN_ASSIGN_IN_PLACE(sym64->st_info);
- ENDIAN_ASSIGN_IN_PLACE(sym64->st_other);
- ENDIAN_ASSIGN_IN_PLACE(sym64->st_shndx);
- }
- return 0;
+int parse_elf_symbol(elf_obj_t *elf, unsigned int ofst, Elf32_Sym *sym32, Elf64_Sym *sym64) {
+ if (sym32) {
+ COPY_STRUCT(sym32, elf->buf, ofst, elf->sz);
+ ENDIAN_ASSIGN_IN_PLACE(sym32->st_name);
+ ENDIAN_ASSIGN_IN_PLACE(sym32->st_value);
+ ENDIAN_ASSIGN_IN_PLACE(sym32->st_size);
+ ENDIAN_ASSIGN_IN_PLACE(sym32->st_info);
+ ENDIAN_ASSIGN_IN_PLACE(sym32->st_other);
+ ENDIAN_ASSIGN_IN_PLACE(sym32->st_shndx);
+ } else { /* if (sym64) */
+ COPY_STRUCT(sym64, elf->buf, ofst, elf->sz);
+ ENDIAN_ASSIGN_IN_PLACE(sym64->st_name);
+ ENDIAN_ASSIGN_IN_PLACE(sym64->st_value);
+ ENDIAN_ASSIGN_IN_PLACE(sym64->st_size);
+ ENDIAN_ASSIGN_IN_PLACE(sym64->st_info);
+ ENDIAN_ASSIGN_IN_PLACE(sym64->st_other);
+ ENDIAN_ASSIGN_IN_PLACE(sym64->st_shndx);
+ }
+ return 0;
bail:
- return 1;
+ return 1;
}
-int parse_elf(uint8_t *buf, size_t sz, output_fmt_t mode)
-{
- elf_obj_t elf;
- unsigned int ofst;
- int i;
- Elf32_Off strtab_off32;
- Elf64_Off strtab_off64; /* save String Table offset for later use */
+int parse_elf(uint8_t *buf, size_t sz, output_fmt_t mode) {
+ elf_obj_t elf;
+ unsigned int ofst;
+ int i;
+ Elf32_Off strtab_off32;
+ Elf64_Off strtab_off64; /* save String Table offset for later use */
- memset(&elf, 0, sizeof(elf));
- elf.buf = buf;
- elf.sz = sz;
+ memset(&elf, 0, sizeof(elf));
+ elf.buf = buf;
+ elf.sz = sz;
- /* Parse Header */
- if (parse_elf_header(&elf))
- goto bail;
+ /* Parse Header */
+ if (parse_elf_header(&elf))
+ goto bail;
- if (elf.bits == 32)
- {
- Elf32_Shdr shdr;
- for (i = 0; i < elf.hdr32.e_shnum; i++)
- {
- parse_elf_section(&elf, i, &shdr, NULL);
-
- if (shdr.sh_type == SHT_STRTAB)
- {
- char strtsb_name[128];
-
- strcpy(strtsb_name, (char *)(elf.buf + shdr.sh_offset + shdr.sh_name));
-
- if (!(strcmp(strtsb_name, ".shstrtab")))
- {
- /* log_msg("found section: %s\n", strtsb_name); */
- strtab_off32 = shdr.sh_offset;
- break;
- }
- }
+ if (elf.bits == 32) {
+ Elf32_Shdr shdr;
+ for (i = 0; i < elf.hdr32.e_shnum; i++) {
+ parse_elf_section(&elf, i, &shdr, NULL);
+
+ if (shdr.sh_type == SHT_STRTAB) {
+ char strtsb_name[128];
+
+ strcpy(strtsb_name, (char *)(elf.buf + shdr.sh_offset + shdr.sh_name));
+
+ if (!(strcmp(strtsb_name, ".shstrtab"))) {
+ /* log_msg("found section: %s\n", strtsb_name); */
+ strtab_off32 = shdr.sh_offset;
+ break;
}
+ }
}
- else /* if (elf.bits == 64) */
- {
- Elf64_Shdr shdr;
- for (i = 0; i < elf.hdr64.e_shnum; i++)
- {
- parse_elf_section(&elf, i, NULL, &shdr);
-
- if (shdr.sh_type == SHT_STRTAB)
- {
- char strtsb_name[128];
-
- strcpy(strtsb_name, (char *)(elf.buf + shdr.sh_offset + shdr.sh_name));
-
- if (!(strcmp(strtsb_name, ".shstrtab")))
- {
- /* log_msg("found section: %s\n", strtsb_name); */
- strtab_off64 = shdr.sh_offset;
- break;
- }
- }
+ } else { /* if (elf.bits == 64) */
+ Elf64_Shdr shdr;
+ for (i = 0; i < elf.hdr64.e_shnum; i++) {
+ parse_elf_section(&elf, i, NULL, &shdr);
+
+ if (shdr.sh_type == SHT_STRTAB) {
+ char strtsb_name[128];
+
+ strcpy(strtsb_name, (char *)(elf.buf + shdr.sh_offset + shdr.sh_name));
+
+ if (!(strcmp(strtsb_name, ".shstrtab"))) {
+ /* log_msg("found section: %s\n", strtsb_name); */
+ strtab_off64 = shdr.sh_offset;
+ break;
}
+ }
}
+ }
+
+ /* Parse all Symbol Tables */
+ if (elf.bits == 32) {
+ Elf32_Shdr shdr;
+ for (i = 0; i < elf.hdr32.e_shnum; i++) {
+ parse_elf_section(&elf, i, &shdr, NULL);
+
+ if (shdr.sh_type == SHT_SYMTAB) {
+ for (ofst = shdr.sh_offset;
+ ofst < shdr.sh_offset + shdr.sh_size;
+ ofst += shdr.sh_entsize) {
+ Elf32_Sym sym;
+
+ parse_elf_symbol(&elf, ofst, &sym, NULL);
+
+ /* For all OBJECTS (data objects), extract the value from the
+ * proper data segment.
+ */
+ /* if (ELF32_ST_TYPE(sym.st_info) == STT_OBJECT && sym.st_name)
+ log_msg("found data object %s\n",
+ parse_elf_string_table(&elf,
+ shdr.sh_link,
+ sym.st_name));
+ */
+
+ if (ELF32_ST_TYPE(sym.st_info) == STT_OBJECT
+ && sym.st_size == 4) {
+ Elf32_Shdr dhdr;
+ int val = 0;
+ char section_name[128];
+
+ parse_elf_section(&elf, sym.st_shndx, &dhdr, NULL);
+
+ /* For explanition - refer to _MSC_VER version of code */
+ strcpy(section_name, (char *)(elf.buf + strtab_off32 + dhdr.sh_name));
+ /* log_msg("Section_name: %s, Section_type: %d\n", section_name, dhdr.sh_type); */
+
+ if (strcmp(section_name, ".bss")) {
+ if (sizeof(val) != sym.st_size) {
+ /* The target value is declared as an int in
+ * *_asm_*_offsets.c, which is 4 bytes on all
+ * targets we currently use. Complain loudly if
+ * this is not true.
+ */
+ log_msg("Symbol size is wrong\n");
+ goto bail;
+ }
+
+ memcpy(&val,
+ elf.buf + dhdr.sh_offset + sym.st_value,
+ sym.st_size);
+ }
+
+ if (!elf.le_data) {
+ log_msg("Big Endian data not supported yet!\n");
+ goto bail;
+ }
- /* Parse all Symbol Tables */
- if (elf.bits == 32)
- {
- Elf32_Shdr shdr;
- for (i = 0; i < elf.hdr32.e_shnum; i++)
- {
- parse_elf_section(&elf, i, &shdr, NULL);
-
- if (shdr.sh_type == SHT_SYMTAB)
- {
- for (ofst = shdr.sh_offset;
- ofst < shdr.sh_offset + shdr.sh_size;
- ofst += shdr.sh_entsize)
- {
- Elf32_Sym sym;
-
- parse_elf_symbol(&elf, ofst, &sym, NULL);
-
- /* For all OBJECTS (data objects), extract the value from the
- * proper data segment.
- */
- /* if (ELF32_ST_TYPE(sym.st_info) == STT_OBJECT && sym.st_name)
- log_msg("found data object %s\n",
- parse_elf_string_table(&elf,
- shdr.sh_link,
- sym.st_name));
- */
-
- if (ELF32_ST_TYPE(sym.st_info) == STT_OBJECT
- && sym.st_size == 4)
- {
- Elf32_Shdr dhdr;
- int val = 0;
- char section_name[128];
-
- parse_elf_section(&elf, sym.st_shndx, &dhdr, NULL);
-
- /* For explanition - refer to _MSC_VER version of code */
- strcpy(section_name, (char *)(elf.buf + strtab_off32 + dhdr.sh_name));
- /* log_msg("Section_name: %s, Section_type: %d\n", section_name, dhdr.sh_type); */
-
- if (strcmp(section_name, ".bss"))
- {
- if (sizeof(val) != sym.st_size)
- {
- /* The target value is declared as an int in
- * asm_*_offsets.c, which is 4 bytes on all
- * targets we currently use. Complain loudly if
- * this is not true.
- */
- log_msg("Symbol size is wrong\n");
- goto bail;
- }
-
- memcpy(&val,
- elf.buf + dhdr.sh_offset + sym.st_value,
- sym.st_size);
- }
-
- if (!elf.le_data)
- {
- log_msg("Big Endian data not supported yet!\n");
- goto bail;
- }
-
- switch (mode)
- {
- case OUTPUT_FMT_RVDS:
- printf("%-40s EQU %5d\n",
- parse_elf_string_table(&elf,
- shdr.sh_link,
- sym.st_name),
- val);
- break;
- case OUTPUT_FMT_GAS:
- printf(".equ %-40s, %5d\n",
- parse_elf_string_table(&elf,
- shdr.sh_link,
- sym.st_name),
- val);
- break;
- default:
- printf("%s = %d\n",
- parse_elf_string_table(&elf,
- shdr.sh_link,
- sym.st_name),
- val);
- }
- }
- }
+ switch (mode) {
+ case OUTPUT_FMT_RVDS:
+ printf("%-40s EQU %5d\n",
+ parse_elf_string_table(&elf,
+ shdr.sh_link,
+ sym.st_name),
+ val);
+ break;
+ case OUTPUT_FMT_GAS:
+ printf(".equ %-40s, %5d\n",
+ parse_elf_string_table(&elf,
+ shdr.sh_link,
+ sym.st_name),
+ val);
+ break;
+ default:
+ printf("%s = %d\n",
+ parse_elf_string_table(&elf,
+ shdr.sh_link,
+ sym.st_name),
+ val);
}
+ }
}
+ }
}
- else /* if (elf.bits == 64) */
- {
- Elf64_Shdr shdr;
- for (i = 0; i < elf.hdr64.e_shnum; i++)
- {
- parse_elf_section(&elf, i, NULL, &shdr);
-
- if (shdr.sh_type == SHT_SYMTAB)
- {
- for (ofst = shdr.sh_offset;
- ofst < shdr.sh_offset + shdr.sh_size;
- ofst += shdr.sh_entsize)
- {
- Elf64_Sym sym;
-
- parse_elf_symbol(&elf, ofst, NULL, &sym);
-
- /* For all OBJECTS (data objects), extract the value from the
- * proper data segment.
- */
- /* if (ELF64_ST_TYPE(sym.st_info) == STT_OBJECT && sym.st_name)
- log_msg("found data object %s\n",
- parse_elf_string_table(&elf,
- shdr.sh_link,
- sym.st_name));
- */
-
- if (ELF64_ST_TYPE(sym.st_info) == STT_OBJECT
- && sym.st_size == 4)
- {
- Elf64_Shdr dhdr;
- int val = 0;
- char section_name[128];
-
- parse_elf_section(&elf, sym.st_shndx, NULL, &dhdr);
-
- /* For explanition - refer to _MSC_VER version of code */
- strcpy(section_name, (char *)(elf.buf + strtab_off64 + dhdr.sh_name));
- /* log_msg("Section_name: %s, Section_type: %d\n", section_name, dhdr.sh_type); */
-
- if ((strcmp(section_name, ".bss")))
- {
- if (sizeof(val) != sym.st_size)
- {
- /* The target value is declared as an int in
- * asm_*_offsets.c, which is 4 bytes on all
- * targets we currently use. Complain loudly if
- * this is not true.
- */
- log_msg("Symbol size is wrong\n");
- goto bail;
- }
-
- memcpy(&val,
- elf.buf + dhdr.sh_offset + sym.st_value,
- sym.st_size);
- }
-
- if (!elf.le_data)
- {
- log_msg("Big Endian data not supported yet!\n");
- goto bail;
- }
-
- switch (mode)
- {
- case OUTPUT_FMT_RVDS:
- printf("%-40s EQU %5d\n",
- parse_elf_string_table(&elf,
- shdr.sh_link,
- sym.st_name),
- val);
- break;
- case OUTPUT_FMT_GAS:
- printf(".equ %-40s, %5d\n",
- parse_elf_string_table(&elf,
- shdr.sh_link,
- sym.st_name),
- val);
- break;
- default:
- printf("%s = %d\n",
- parse_elf_string_table(&elf,
- shdr.sh_link,
- sym.st_name),
- val);
- }
- }
- }
+ } else { /* if (elf.bits == 64) */
+ Elf64_Shdr shdr;
+ for (i = 0; i < elf.hdr64.e_shnum; i++) {
+ parse_elf_section(&elf, i, NULL, &shdr);
+
+ if (shdr.sh_type == SHT_SYMTAB) {
+ for (ofst = shdr.sh_offset;
+ ofst < shdr.sh_offset + shdr.sh_size;
+ ofst += shdr.sh_entsize) {
+ Elf64_Sym sym;
+
+ parse_elf_symbol(&elf, ofst, NULL, &sym);
+
+ /* For all OBJECTS (data objects), extract the value from the
+ * proper data segment.
+ */
+ /* if (ELF64_ST_TYPE(sym.st_info) == STT_OBJECT && sym.st_name)
+ log_msg("found data object %s\n",
+ parse_elf_string_table(&elf,
+ shdr.sh_link,
+ sym.st_name));
+ */
+
+ if (ELF64_ST_TYPE(sym.st_info) == STT_OBJECT
+ && sym.st_size == 4) {
+ Elf64_Shdr dhdr;
+ int val = 0;
+ char section_name[128];
+
+ parse_elf_section(&elf, sym.st_shndx, NULL, &dhdr);
+
+ /* For explanition - refer to _MSC_VER version of code */
+ strcpy(section_name, (char *)(elf.buf + strtab_off64 + dhdr.sh_name));
+ /* log_msg("Section_name: %s, Section_type: %d\n", section_name, dhdr.sh_type); */
+
+ if ((strcmp(section_name, ".bss"))) {
+ if (sizeof(val) != sym.st_size) {
+ /* The target value is declared as an int in
+ * *_asm_*_offsets.c, which is 4 bytes on all
+ * targets we currently use. Complain loudly if
+ * this is not true.
+ */
+ log_msg("Symbol size is wrong\n");
+ goto bail;
+ }
+
+ memcpy(&val,
+ elf.buf + dhdr.sh_offset + sym.st_value,
+ sym.st_size);
+ }
+
+ if (!elf.le_data) {
+ log_msg("Big Endian data not supported yet!\n");
+ goto bail;
+ }
+
+ switch (mode) {
+ case OUTPUT_FMT_RVDS:
+ printf("%-40s EQU %5d\n",
+ parse_elf_string_table(&elf,
+ shdr.sh_link,
+ sym.st_name),
+ val);
+ break;
+ case OUTPUT_FMT_GAS:
+ printf(".equ %-40s, %5d\n",
+ parse_elf_string_table(&elf,
+ shdr.sh_link,
+ sym.st_name),
+ val);
+ break;
+ default:
+ printf("%s = %d\n",
+ parse_elf_string_table(&elf,
+ shdr.sh_link,
+ sym.st_name),
+ val);
}
+ }
}
+ }
}
+ }
- if (mode == OUTPUT_FMT_RVDS)
- printf(" END\n");
+ if (mode == OUTPUT_FMT_RVDS)
+ printf(" END\n");
- return 0;
+ return 0;
bail:
- log_msg("Parse error: File does not appear to be valid ELF32 or ELF64\n");
- return 1;
+ log_msg("Parse error: File does not appear to be valid ELF32 or ELF64\n");
+ return 1;
}
#endif
@@ -671,244 +594,222 @@ bail:
#define get_le32(x) ((*(x)) | (*(x+1)) << 8 |(*(x+2)) << 16 | (*(x+3)) << 24 )
#define get_le16(x) ((*(x)) | (*(x+1)) << 8)
-int parse_coff(uint8_t *buf, size_t sz)
-{
- unsigned int nsections, symtab_ptr, symtab_sz, strtab_ptr;
- unsigned int sectionrawdata_ptr;
- unsigned int i;
- uint8_t *ptr;
- uint32_t symoffset;
-
- char **sectionlist; //this array holds all section names in their correct order.
- //it is used to check if the symbol is in .bss or .rdata section.
-
- nsections = get_le16(buf + 2);
- symtab_ptr = get_le32(buf + 8);
- symtab_sz = get_le32(buf + 12);
- strtab_ptr = symtab_ptr + symtab_sz * 18;
-
- if (nsections > 96)
- {
- log_msg("Too many sections\n");
- return 1;
- }
+int parse_coff(uint8_t *buf, size_t sz) {
+ unsigned int nsections, symtab_ptr, symtab_sz, strtab_ptr;
+ unsigned int sectionrawdata_ptr;
+ unsigned int i;
+ uint8_t *ptr;
+ uint32_t symoffset;
- sectionlist = malloc(nsections * sizeof(sectionlist));
+ char **sectionlist; // this array holds all section names in their correct order.
+ // it is used to check if the symbol is in .bss or .rdata section.
- if (sectionlist == NULL)
- {
- log_msg("Allocating first level of section list failed\n");
- return 1;
- }
+ nsections = get_le16(buf + 2);
+ symtab_ptr = get_le32(buf + 8);
+ symtab_sz = get_le32(buf + 12);
+ strtab_ptr = symtab_ptr + symtab_sz * 18;
- //log_msg("COFF: Found %u symbols in %u sections.\n", symtab_sz, nsections);
+ if (nsections > 96) {
+ log_msg("Too many sections\n");
+ return 1;
+ }
- /*
- The size of optional header is always zero for an obj file. So, the section header
- follows the file header immediately.
- */
+ sectionlist = malloc(nsections * sizeof(sectionlist));
- ptr = buf + 20; //section header
+ if (sectionlist == NULL) {
+ log_msg("Allocating first level of section list failed\n");
+ return 1;
+ }
- for (i = 0; i < nsections; i++)
- {
- char sectionname[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
- strncpy(sectionname, ptr, 8);
- //log_msg("COFF: Parsing section %s\n",sectionname);
+ // log_msg("COFF: Found %u symbols in %u sections.\n", symtab_sz, nsections);
- sectionlist[i] = malloc(strlen(sectionname) + 1);
+ /*
+ The size of optional header is always zero for an obj file. So, the section header
+ follows the file header immediately.
+ */
- if (sectionlist[i] == NULL)
- {
- log_msg("Allocating storage for %s failed\n", sectionname);
- goto bail;
- }
- strcpy(sectionlist[i], sectionname);
+ ptr = buf + 20; // section header
- if (!strcmp(sectionname, ".rdata")) sectionrawdata_ptr = get_le32(ptr + 20);
+ for (i = 0; i < nsections; i++) {
+ char sectionname[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+ strncpy(sectionname, ptr, 8);
+ // log_msg("COFF: Parsing section %s\n",sectionname);
- ptr += 40;
- }
+ sectionlist[i] = malloc(strlen(sectionname) + 1);
- //log_msg("COFF: Symbol table at offset %u\n", symtab_ptr);
- //log_msg("COFF: raw data pointer ofset for section .rdata is %u\n", sectionrawdata_ptr);
-
- /* The compiler puts the data with non-zero offset in .rdata section, but puts the data with
- zero offset in .bss section. So, if the data in in .bss section, set offset=0.
- Note from Wiki: In an object module compiled from C, the bss section contains
- the local variables (but not functions) that were declared with the static keyword,
- except for those with non-zero initial values. (In C, static variables are initialized
- to zero by default.) It also contains the non-local (both extern and static) variables
- that are also initialized to zero (either explicitly or by default).
- */
- //move to symbol table
- /* COFF symbol table:
- offset field
- 0 Name(*)
- 8 Value
- 12 SectionNumber
- 14 Type
- 16 StorageClass
- 17 NumberOfAuxSymbols
- */
- ptr = buf + symtab_ptr;
-
- for (i = 0; i < symtab_sz; i++)
- {
- int16_t section = get_le16(ptr + 12); //section number
-
- if (section > 0 && ptr[16] == 2)
- {
- //if(section > 0 && ptr[16] == 3 && get_le32(ptr+8)) {
-
- if (get_le32(ptr))
- {
- char name[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
- strncpy(name, ptr, 8);
- //log_msg("COFF: Parsing symbol %s\n",name);
- /* The 64bit Windows compiler doesn't prefix with an _.
- * Check what's there, and bump if necessary
- */
- if (name[0] == '_')
- printf("%-40s EQU ", name + 1);
- else
- printf("%-40s EQU ", name);
- }
- else
- {
- //log_msg("COFF: Parsing symbol %s\n",
- // buf + strtab_ptr + get_le32(ptr+4));
- if ((buf + strtab_ptr + get_le32(ptr + 4))[0] == '_')
- printf("%-40s EQU ",
- buf + strtab_ptr + get_le32(ptr + 4) + 1);
- else
- printf("%-40s EQU ", buf + strtab_ptr + get_le32(ptr + 4));
- }
-
- if (!(strcmp(sectionlist[section-1], ".bss")))
- {
- symoffset = 0;
- }
- else
- {
- symoffset = get_le32(buf + sectionrawdata_ptr + get_le32(ptr + 8));
- }
+ if (sectionlist[i] == NULL) {
+ log_msg("Allocating storage for %s failed\n", sectionname);
+ goto bail;
+ }
+ strcpy(sectionlist[i], sectionname);
+
+ if (!strcmp(sectionname, ".rdata")) sectionrawdata_ptr = get_le32(ptr + 20);
+
+ ptr += 40;
+ }
+
+ // log_msg("COFF: Symbol table at offset %u\n", symtab_ptr);
+ // log_msg("COFF: raw data pointer ofset for section .rdata is %u\n", sectionrawdata_ptr);
+
+ /* The compiler puts the data with non-zero offset in .rdata section, but puts the data with
+ zero offset in .bss section. So, if the data in in .bss section, set offset=0.
+ Note from Wiki: In an object module compiled from C, the bss section contains
+ the local variables (but not functions) that were declared with the static keyword,
+ except for those with non-zero initial values. (In C, static variables are initialized
+ to zero by default.) It also contains the non-local (both extern and static) variables
+ that are also initialized to zero (either explicitly or by default).
+ */
+ // move to symbol table
+ /* COFF symbol table:
+ offset field
+ 0 Name(*)
+ 8 Value
+ 12 SectionNumber
+ 14 Type
+ 16 StorageClass
+ 17 NumberOfAuxSymbols
+ */
+ ptr = buf + symtab_ptr;
+
+ for (i = 0; i < symtab_sz; i++) {
+ int16_t section = get_le16(ptr + 12); // section number
+
+ if (section > 0 && ptr[16] == 2) {
+ // if(section > 0 && ptr[16] == 3 && get_le32(ptr+8)) {
+
+ if (get_le32(ptr)) {
+ char name[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+ strncpy(name, ptr, 8);
+ // log_msg("COFF: Parsing symbol %s\n",name);
+ /* The 64bit Windows compiler doesn't prefix with an _.
+ * Check what's there, and bump if necessary
+ */
+ if (name[0] == '_')
+ printf("%-40s EQU ", name + 1);
+ else
+ printf("%-40s EQU ", name);
+ } else {
+ // log_msg("COFF: Parsing symbol %s\n",
+ // buf + strtab_ptr + get_le32(ptr+4));
+ if ((buf + strtab_ptr + get_le32(ptr + 4))[0] == '_')
+ printf("%-40s EQU ",
+ buf + strtab_ptr + get_le32(ptr + 4) + 1);
+ else
+ printf("%-40s EQU ", buf + strtab_ptr + get_le32(ptr + 4));
+ }
- //log_msg(" Section: %d\n",section);
- //log_msg(" Class: %d\n",ptr[16]);
- //log_msg(" Address: %u\n",get_le32(ptr+8));
- //log_msg(" Offset: %u\n", symoffset);
+ if (!(strcmp(sectionlist[section - 1], ".bss"))) {
+ symoffset = 0;
+ } else {
+ symoffset = get_le32(buf + sectionrawdata_ptr + get_le32(ptr + 8));
+ }
- printf("%5d\n", symoffset);
- }
+ // log_msg(" Section: %d\n",section);
+ // log_msg(" Class: %d\n",ptr[16]);
+ // log_msg(" Address: %u\n",get_le32(ptr+8));
+ // log_msg(" Offset: %u\n", symoffset);
- ptr += 18;
+ printf("%5d\n", symoffset);
}
- printf(" END\n");
+ ptr += 18;
+ }
- for (i = 0; i < nsections; i++)
- {
- free(sectionlist[i]);
- }
+ printf(" END\n");
+
+ for (i = 0; i < nsections; i++) {
+ free(sectionlist[i]);
+ }
- free(sectionlist);
+ free(sectionlist);
- return 0;
+ return 0;
bail:
- for (i = 0; i < nsections; i++)
- {
- free(sectionlist[i]);
- }
+ for (i = 0; i < nsections; i++) {
+ free(sectionlist[i]);
+ }
- free(sectionlist);
+ free(sectionlist);
- return 1;
+ return 1;
}
#endif /* defined(_MSC_VER) || defined(__MINGW32__) || defined(__CYGWIN__) */
-int main(int argc, char **argv)
-{
- output_fmt_t mode = OUTPUT_FMT_PLAIN;
- const char *f;
- uint8_t *file_buf;
- int res;
- FILE *fp;
- long int file_size;
-
- if (argc < 2 || argc > 3)
- {
- fprintf(stderr, "Usage: %s [output format] <obj file>\n\n", argv[0]);
- fprintf(stderr, " <obj file>\tobject file to parse\n");
- fprintf(stderr, "Output Formats:\n");
- fprintf(stderr, " gas - compatible with GNU assembler\n");
- fprintf(stderr, " rvds - compatible with armasm\n");
- goto bail;
- }
-
- f = argv[2];
-
- if (!strcmp(argv[1], "rvds"))
- mode = OUTPUT_FMT_RVDS;
- else if (!strcmp(argv[1], "gas"))
- mode = OUTPUT_FMT_GAS;
- else
- f = argv[1];
-
- fp = fopen(f, "rb");
-
- if (!fp)
- {
- perror("Unable to open file");
- goto bail;
- }
-
- if (fseek(fp, 0, SEEK_END))
- {
- perror("stat");
- goto bail;
- }
-
- file_size = ftell(fp);
- file_buf = malloc(file_size);
-
- if (!file_buf)
- {
- perror("malloc");
- goto bail;
- }
-
- rewind(fp);
-
- if (fread(file_buf, sizeof(char), file_size, fp) != file_size)
- {
- perror("read");
- goto bail;
- }
-
- if (fclose(fp))
- {
- perror("close");
- goto bail;
- }
+int main(int argc, char **argv) {
+ output_fmt_t mode = OUTPUT_FMT_PLAIN;
+ const char *f;
+ uint8_t *file_buf;
+ int res;
+ FILE *fp;
+ long int file_size;
+
+ if (argc < 2 || argc > 3) {
+ fprintf(stderr, "Usage: %s [output format] <obj file>\n\n", argv[0]);
+ fprintf(stderr, " <obj file>\tobject file to parse\n");
+ fprintf(stderr, "Output Formats:\n");
+ fprintf(stderr, " gas - compatible with GNU assembler\n");
+ fprintf(stderr, " rvds - compatible with armasm\n");
+ goto bail;
+ }
+
+ f = argv[2];
+
+ if (!strcmp(argv[1], "rvds"))
+ mode = OUTPUT_FMT_RVDS;
+ else if (!strcmp(argv[1], "gas"))
+ mode = OUTPUT_FMT_GAS;
+ else
+ f = argv[1];
+
+ fp = fopen(f, "rb");
+
+ if (!fp) {
+ perror("Unable to open file");
+ goto bail;
+ }
+
+ if (fseek(fp, 0, SEEK_END)) {
+ perror("stat");
+ goto bail;
+ }
+
+ file_size = ftell(fp);
+ file_buf = malloc(file_size);
+
+ if (!file_buf) {
+ perror("malloc");
+ goto bail;
+ }
+
+ rewind(fp);
+
+ if (fread(file_buf, sizeof(char), file_size, fp) != file_size) {
+ perror("read");
+ goto bail;
+ }
+
+ if (fclose(fp)) {
+ perror("close");
+ goto bail;
+ }
#if defined(__GNUC__) && __GNUC__
#if defined(__MACH__)
- res = parse_macho(file_buf, file_size);
+ res = parse_macho(file_buf, file_size);
#elif defined(__ELF__)
- res = parse_elf(file_buf, file_size, mode);
+ res = parse_elf(file_buf, file_size, mode);
#endif
#endif
#if defined(_MSC_VER) || defined(__MINGW32__) || defined(__CYGWIN__)
- res = parse_coff(file_buf, file_size);
+ res = parse_coff(file_buf, file_size);
#endif
- free(file_buf);
+ free(file_buf);
- if (!res)
- return EXIT_SUCCESS;
+ if (!res)
+ return EXIT_SUCCESS;
bail:
- return EXIT_FAILURE;
+ return EXIT_FAILURE;
}
diff --git a/libvpx/build/make/rtcd.sh b/libvpx/build/make/rtcd.sh
index ddf9e09..6cc3684 100755
--- a/libvpx/build/make/rtcd.sh
+++ b/libvpx/build/make/rtcd.sh
@@ -59,13 +59,13 @@ for f in $defs_file; do [ -f "$f" ] || usage; done
# Routines for the RTCD DSL to call
#
prototype() {
- local rtyp
+ rtyp=""
case "$1" in
unsigned) rtyp="$1 "; shift;;
esac
rtyp="${rtyp}$1"
- local fn="$2"
- local args="$3"
+ fn="$2"
+ args="$3"
eval "${2}_rtyp='$rtyp'"
eval "${2}_args='$3'"
@@ -74,7 +74,7 @@ prototype() {
}
specialize() {
- local fn="$1"
+ fn="$1"
shift
for opt in "$@"; do
eval "${fn}_${opt}=${fn}_${opt}"
@@ -84,13 +84,13 @@ specialize() {
require() {
for fn in $ALL_FUNCS; do
for opt in "$@"; do
- local ofn=$(eval "echo \$${fn}_${opt}")
+ ofn=$(eval "echo \$${fn}_${opt}")
[ -z "$ofn" ] && continue
# if we already have a default, then we can disable it, as we know
# we can do better.
- local best=$(eval "echo \$${fn}_default")
- local best_ofn=$(eval "echo \$${best}")
+ best=$(eval "echo \$${fn}_default")
+ best_ofn=$(eval "echo \$${best}")
[ -n "$best" ] && [ "$best_ofn" != "$ofn" ] && eval "${best}_link=false"
eval "${fn}_default=${fn}_${opt}"
eval "${fn}_${opt}_link=true"
@@ -121,15 +121,15 @@ process_forward_decls() {
determine_indirection() {
[ "$CONFIG_RUNTIME_CPU_DETECT" = "yes" ] || require $ALL_ARCHS
for fn in $ALL_FUNCS; do
- local n=""
- local rtyp="$(eval "echo \$${fn}_rtyp")"
- local args="$(eval "echo \"\$${fn}_args\"")"
- local dfn="$(eval "echo \$${fn}_default")"
+ n=""
+ rtyp="$(eval "echo \$${fn}_rtyp")"
+ args="$(eval "echo \"\$${fn}_args\"")"
+ dfn="$(eval "echo \$${fn}_default")"
dfn=$(eval "echo \$${dfn}")
for opt in "$@"; do
- local ofn=$(eval "echo \$${fn}_${opt}")
+ ofn=$(eval "echo \$${fn}_${opt}")
[ -z "$ofn" ] && continue
- local link=$(eval "echo \$${fn}_${opt}_link")
+ link=$(eval "echo \$${fn}_${opt}_link")
[ "$link" = "false" ] && continue
n="${n}x"
done
@@ -143,12 +143,12 @@ determine_indirection() {
declare_function_pointers() {
for fn in $ALL_FUNCS; do
- local rtyp="$(eval "echo \$${fn}_rtyp")"
- local args="$(eval "echo \"\$${fn}_args\"")"
- local dfn="$(eval "echo \$${fn}_default")"
+ rtyp="$(eval "echo \$${fn}_rtyp")"
+ args="$(eval "echo \"\$${fn}_args\"")"
+ dfn="$(eval "echo \$${fn}_default")"
dfn=$(eval "echo \$${dfn}")
for opt in "$@"; do
- local ofn=$(eval "echo \$${fn}_${opt}")
+ ofn=$(eval "echo \$${fn}_${opt}")
[ -z "$ofn" ] && continue
echo "$rtyp ${ofn}($args);"
done
@@ -163,20 +163,20 @@ declare_function_pointers() {
set_function_pointers() {
for fn in $ALL_FUNCS; do
- local n=""
- local rtyp="$(eval "echo \$${fn}_rtyp")"
- local args="$(eval "echo \"\$${fn}_args\"")"
- local dfn="$(eval "echo \$${fn}_default")"
+ n=""
+ rtyp="$(eval "echo \$${fn}_rtyp")"
+ args="$(eval "echo \"\$${fn}_args\"")"
+ dfn="$(eval "echo \$${fn}_default")"
dfn=$(eval "echo \$${dfn}")
if $(eval "echo \$${fn}_indirect"); then
echo " $fn = $dfn;"
for opt in "$@"; do
- local ofn=$(eval "echo \$${fn}_${opt}")
+ ofn=$(eval "echo \$${fn}_${opt}")
[ -z "$ofn" ] && continue
[ "$ofn" = "$dfn" ] && continue;
- local link=$(eval "echo \$${fn}_${opt}_link")
+ link=$(eval "echo \$${fn}_${opt}_link")
[ "$link" = "false" ] && continue
- local cond="$(eval "echo \$have_${opt}")"
+ cond="$(eval "echo \$have_${opt}")"
echo " if (${cond}) $fn = $ofn;"
done
fi
@@ -185,7 +185,7 @@ set_function_pointers() {
}
filter() {
- local filtered
+ filtered=""
for opt in "$@"; do
[ -z $(eval "echo \$disable_${opt}") ] && filtered="$filtered $opt"
done
@@ -196,8 +196,9 @@ filter() {
# Helper functions for generating the arch specific RTCD files
#
common_top() {
- local outfile_basename=$(basename ${symbol:-rtcd.h})
- local include_guard=$(echo $outfile_basename | tr '[a-z]' '[A-Z]' | tr -c '[A-Z]' _)
+ outfile_basename=$(basename ${symbol:-rtcd})
+ include_guard=$(echo $outfile_basename | tr '[a-z]' '[A-Z]' | \
+ tr -c '[A-Z0-9]' _)H_
cat <<EOF
#ifndef ${include_guard}
#define ${include_guard}
@@ -227,7 +228,7 @@ x86() {
# Assign the helper variable for each enabled extension
for opt in $ALL_ARCHS; do
- local uc=$(echo $opt | tr '[a-z]' '[A-Z]')
+ uc=$(echo $opt | tr '[a-z]' '[A-Z]')
eval "have_${opt}=\"flags & HAS_${uc}\""
done
@@ -254,7 +255,7 @@ arm() {
# Assign the helper variable for each enabled extension
for opt in $ALL_ARCHS; do
- local uc=$(echo $opt | tr '[a-z]' '[A-Z]')
+ uc=$(echo $opt | tr '[a-z]' '[A-Z]')
eval "have_${opt}=\"flags & HAS_${uc}\""
done
diff --git a/libvpx/build/make/thumb.pm b/libvpx/build/make/thumb.pm
new file mode 100644
index 0000000..f347287
--- /dev/null
+++ b/libvpx/build/make/thumb.pm
@@ -0,0 +1,70 @@
+#!/usr/bin/perl
+##
+## Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+##
+## Use of this source code is governed by a BSD-style license
+## that can be found in the LICENSE file in the root of the source
+## tree. An additional intellectual property rights grant can be found
+## in the file PATENTS. All contributing project authors may
+## be found in the AUTHORS file in the root of the source tree.
+##
+
+package thumb;
+
+sub FixThumbInstructions($$)
+{
+ my $short_branches = $_[1];
+ my $branch_shift_offset = $short_branches ? 1 : 0;
+
+ # Write additions with shifts, such as "add r10, r11, lsl #8",
+ # in three operand form, "add r10, r10, r11, lsl #8".
+ s/(add\s+)(r\d+),\s*(r\d+),\s*(lsl #\d+)/$1$2, $2, $3, $4/g;
+
+ # Convert additions with a non-constant shift into a sequence
+ # with left shift, addition and a right shift (to restore the
+ # register to the original value). Currently the right shift
+ # isn't necessary in the code base since the values in these
+ # registers aren't used, but doing the shift for consitency.
+ # This converts instructions such as "add r12, r12, r5, lsl r4"
+ # into the sequence "lsl r5, r4", "add r12, r12, r5", "lsr r5, r4".
+ s/^(\s*)(add)(\s+)(r\d+),\s*(r\d+),\s*(r\d+),\s*lsl (r\d+)/$1lsl$3$6, $7\n$1$2$3$4, $5, $6\n$1lsr$3$6, $7/g;
+
+ # Convert loads with right shifts in the indexing into a
+ # sequence of an add, load and sub. This converts
+ # "ldrb r4, [r9, lr, asr #1]" into "add r9, r9, lr, asr #1",
+ # "ldrb r9, [r9]", "sub r9, r9, lr, asr #1".
+ s/^(\s*)(ldrb)(\s+)(r\d+),\s*\[(\w+),\s*(\w+),\s*(asr #\d+)\]/$1add $3$5, $5, $6, $7\n$1$2$3$4, [$5]\n$1sub $3$5, $5, $6, $7/g;
+
+ # Convert register indexing with writeback into a separate add
+ # instruction. This converts "ldrb r12, [r1, r2]!" into
+ # "ldrb r12, [r1, r2]", "add r1, r1, r2".
+ s/^(\s*)(ldrb)(\s+)(r\d+),\s*\[(\w+),\s*(\w+)\]!/$1$2$3$4, [$5, $6]\n$1add $3$5, $6/g;
+
+ # Convert negative register indexing into separate sub/add instructions.
+ # This converts "ldrne r4, [src, -pstep, lsl #1]" into
+ # "subne src, src, pstep, lsl #1", "ldrne r4, [src]",
+ # "addne src, src, pstep, lsl #1". In a couple of cases where
+ # this is used, it's used for two subsequent load instructions,
+ # where a hand-written version of it could merge two subsequent
+ # add and sub instructions.
+ s/^(\s*)((ldr|str)(ne)?)(\s+)(r\d+),\s*\[(\w+), -([^\]]+)\]/$1sub$4$5$7, $7, $8\n$1$2$5$6, [$7]\n$1add$4$5$7, $7, $8/g;
+
+ # Convert register post indexing to a separate add instruction.
+ # This converts "ldrneb r9, [r0], r2" into "ldrneb r9, [r0]",
+ # "add r0, r2".
+ s/^(\s*)((ldr|str)(ne)?[bhd]?)(\s+)(\w+),(\s*\w+,)?\s*\[(\w+)\],\s*(\w+)/$1$2$5$6,$7 [$8]\n$1add$4$5$8, $8, $9/g;
+
+ # Convert a conditional addition to the pc register into a series of
+ # instructions. This converts "addlt pc, pc, r3, lsl #2" into
+ # "itttt lt", "movlt.n r12, pc", "addlt.w r12, #12",
+ # "addlt.w r12, r12, r3, lsl #2", "movlt.n pc, r12".
+ # This assumes that r12 is free at this point.
+ s/^(\s*)addlt(\s+)pc,\s*pc,\s*(\w+),\s*lsl\s*#(\d+)/$1itttt$2lt\n$1movlt.n$2r12, pc\n$1addlt.w$2r12, #12\n$1addlt.w$2r12, r12, $3, lsl #($4-$branch_shift_offset)\n$1movlt.n$2pc, r12/g;
+
+ # Convert "mov pc, lr" into "bx lr", since the former only works
+ # for switching from arm to thumb (and only in armv7), but not
+ # from thumb to arm.
+ s/mov(\s*)pc\s*,\s*lr/bx$1lr/g;
+}
+
+1;
diff --git a/libvpx/build/x86-msvs/obj_int_extract.bat b/libvpx/build/x86-msvs/obj_int_extract.bat
index 1bb8653..47fef97 100644
--- a/libvpx/build/x86-msvs/obj_int_extract.bat
+++ b/libvpx/build/x86-msvs/obj_int_extract.bat
@@ -7,9 +7,17 @@ REM in the file PATENTS. All contributing project authors may
REM be found in the AUTHORS file in the root of the source tree.
echo on
-cl /I "./" /I "%1" /nologo /c "%1/vp8/common/asm_com_offsets.c"
-cl /I "./" /I "%1" /nologo /c "%1/vp8/decoder/asm_dec_offsets.c"
-cl /I "./" /I "%1" /nologo /c "%1/vp8/encoder/asm_enc_offsets.c"
-obj_int_extract.exe rvds "asm_com_offsets.obj" > "asm_com_offsets.asm"
-obj_int_extract.exe rvds "asm_dec_offsets.obj" > "asm_dec_offsets.asm"
-obj_int_extract.exe rvds "asm_enc_offsets.obj" > "asm_enc_offsets.asm"
+cl /I "./" /I "%1" /nologo /c "%1/vp9/common/vp9_asm_com_offsets.c"
+cl /I "./" /I "%1" /nologo /c "%1/vp9/decoder/vp9_asm_dec_offsets.c"
+cl /I "./" /I "%1" /nologo /c "%1/vp9/encoder/vp9_asm_enc_offsets.c"
+obj_int_extract.exe rvds "vp9_asm_com_offsets.obj" > "vp9_asm_com_offsets.asm"
+obj_int_extract.exe rvds "vp9_asm_dec_offsets.obj" > "vp9_asm_dec_offsets.asm"
+obj_int_extract.exe rvds "vp9_asm_enc_offsets.obj" > "vp9_asm_enc_offsets.asm"
+
+cl /I "./" /I "%1" /nologo /c "%1/vp8/common/vp8_asm_com_offsets.c"
+cl /I "./" /I "%1" /nologo /c "%1/vp8/decoder/vp8_asm_dec_offsets.c"
+cl /I "./" /I "%1" /nologo /c "%1/vp8/encoder/vp8_asm_enc_offsets.c"
+obj_int_extract.exe rvds "vp8_asm_com_offsets.obj" > "vp8_asm_com_offsets.asm"
+obj_int_extract.exe rvds "vp8_asm_dec_offsets.obj" > "vp8_asm_dec_offsets.asm"
+obj_int_extract.exe rvds "vp8_asm_enc_offsets.obj" > "vp8_asm_enc_offsets.asm"
+
diff --git a/libvpx/build/x86-msvs/yasm.rules b/libvpx/build/x86-msvs/yasm.rules
deleted file mode 100644
index ee1fefb..0000000
--- a/libvpx/build/x86-msvs/yasm.rules
+++ /dev/null
@@ -1,115 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<VisualStudioToolFile
- Name="Yasm"
- Version="8.00"
- >
- <Rules>
- <CustomBuildRule
- Name="YASM"
- DisplayName="Yasm Assembler"
- CommandLine="yasm -Xvc -f $(PlatformName) [AllOptions] [AdditionalOptions] [Inputs]"
- Outputs="[$ObjectFileName]"
- FileExtensions="*.asm"
- ExecutionDescription="Assembling $(InputFileName)"
- ShowOnlyRuleProperties="false"
- >
- <Properties>
- <StringProperty
- Name="Defines"
- DisplayName="Definitions"
- Category="Pre-Defined Symbols"
- Description="Specify pre-defined symbols (&apos;symbol&apos; or &apos;symbol = value&apos;) "
- Switch="-D [value]"
- Delimited="true"
- Inheritable="true"
- />
- <StringProperty
- Name="IncludePaths"
- DisplayName="Include Paths"
- Category="Configuration"
- Description="Set the paths for any additional include files"
- Switch="-I [value]"
- Delimited="true"
- Inheritable="true"
- />
- <StringProperty
- Name="UnDefines"
- DisplayName="Remove Definitions"
- Category="Pre-Defined Symbols"
- Description="Remove pre-defined symbols "
- Switch="-U [value]"
- Delimited="true"
- Inheritable="true"
- />
- <StringProperty
- Name="ObjectFileName"
- DisplayName="Object File Name"
- Category="Output"
- Description="Select the output file name"
- Switch="-o [value]"
- DefaultValue="$(IntDir)\$(InputName).obj"
- />
- <StringProperty
- Name="ListFileName"
- DisplayName="List File Name"
- Category="Output"
- Description="Select an output listing by setting its file name"
- Switch="-l [value]"
- />
- <StringProperty
- Name="PreIncludeFile"
- DisplayName="Pre Include File"
- Category="Configuration"
- Description="Select a pre-included file by setting its name"
- Switch="-P [value]"
- />
- <BooleanProperty
- Name="Debug"
- DisplayName="Debug Information"
- Category="Output"
- Description="Generate debugging information"
- Switch="-g cv8"
- />
- <EnumProperty
- Name="PreProc"
- DisplayName="Pre-Processor"
- Category="Configuration"
- Description="Select the pre-processor (&apos;nasm&apos; or &apos;raw&apos;)"
- >
- <Values>
- <EnumValue
- Value="0"
- Switch="-rnasm"
- DisplayName="Nasm "
- />
- <EnumValue
- Value="1"
- Switch="-rraw"
- DisplayName="Raw"
- />
- </Values>
- </EnumProperty>
- <EnumProperty
- Name="Parser"
- DisplayName="Parser"
- Category="Configuration"
- Description="Select the parser for Intel (&apos;nasm&apos;) or AT&amp;T ( &apos;gas&apos;) syntax"
- >
- <Values>
- <EnumValue
- Value="0"
- Switch="-pnasm"
- DisplayName="Nasm"
- />
- <EnumValue
- Value="1"
- Switch="-pgas"
- DisplayName="Gas"
- />
- </Values>
- </EnumProperty>
- </Properties>
- </CustomBuildRule>
- </Rules>
-</VisualStudioToolFile>
-
diff --git a/libvpx/configure b/libvpx/configure
index b3c5fe9..28676fb 100755
--- a/libvpx/configure
+++ b/libvpx/configure
@@ -34,6 +34,7 @@ Advanced options:
${toggle_md5} support for output of checksum data
${toggle_static_msvcrt} use static MSVCRT (VS builds only)
${toggle_vp8} VP8 codec support
+ ${toggle_vp9} VP9 codec support
${toggle_internal_stats} output of encoder internal stats for debug, if supported (encoders)
${toggle_mem_tracker} track memory usage
${toggle_postproc} postprocessing
@@ -97,6 +98,7 @@ all_platforms="${all_platforms} armv7-darwin-gcc" #neon Cortex-A8
all_platforms="${all_platforms} armv7-linux-rvct" #neon Cortex-A8
all_platforms="${all_platforms} armv7-linux-gcc" #neon Cortex-A8
all_platforms="${all_platforms} armv7-none-rvct" #neon Cortex-A8
+all_platforms="${all_platforms} armv7-win32-vs11"
all_platforms="${all_platforms} mips32-linux-gcc"
all_platforms="${all_platforms} ppc32-darwin8-gcc"
all_platforms="${all_platforms} ppc32-darwin9-gcc"
@@ -105,6 +107,7 @@ all_platforms="${all_platforms} ppc64-darwin8-gcc"
all_platforms="${all_platforms} ppc64-darwin9-gcc"
all_platforms="${all_platforms} ppc64-linux-gcc"
all_platforms="${all_platforms} sparc-solaris-gcc"
+all_platforms="${all_platforms} x86-android-gcc"
all_platforms="${all_platforms} x86-darwin8-gcc"
all_platforms="${all_platforms} x86-darwin8-icc"
all_platforms="${all_platforms} x86-darwin9-gcc"
@@ -120,6 +123,8 @@ all_platforms="${all_platforms} x86-win32-gcc"
all_platforms="${all_platforms} x86-win32-vs7"
all_platforms="${all_platforms} x86-win32-vs8"
all_platforms="${all_platforms} x86-win32-vs9"
+all_platforms="${all_platforms} x86-win32-vs10"
+all_platforms="${all_platforms} x86-win32-vs11"
all_platforms="${all_platforms} x86_64-darwin9-gcc"
all_platforms="${all_platforms} x86_64-darwin10-gcc"
all_platforms="${all_platforms} x86_64-darwin11-gcc"
@@ -130,6 +135,8 @@ all_platforms="${all_platforms} x86_64-solaris-gcc"
all_platforms="${all_platforms} x86_64-win64-gcc"
all_platforms="${all_platforms} x86_64-win64-vs8"
all_platforms="${all_platforms} x86_64-win64-vs9"
+all_platforms="${all_platforms} x86_64-win64-vs10"
+all_platforms="${all_platforms} x86_64-win64-vs11"
all_platforms="${all_platforms} universal-darwin8-gcc"
all_platforms="${all_platforms} universal-darwin9-gcc"
all_platforms="${all_platforms} universal-darwin10-gcc"
@@ -176,19 +183,24 @@ enable os_support
enable temporal_denoising
[ -d ${source_path}/../include ] && enable alt_tree_layout
-for d in vp8; do
+for d in vp8 vp9; do
[ -d ${source_path}/${d} ] && disable alt_tree_layout;
done
if ! enabled alt_tree_layout; then
# development environment
[ -d ${source_path}/vp8 ] && CODECS="${CODECS} vp8_encoder vp8_decoder"
+[ -d ${source_path}/vp9 ] && CODECS="${CODECS} vp9_encoder vp9_decoder"
else
# customer environment
[ -f ${source_path}/../include/vpx/vp8cx.h ] && CODECS="${CODECS} vp8_encoder"
[ -f ${source_path}/../include/vpx/vp8dx.h ] && CODECS="${CODECS} vp8_decoder"
+[ -f ${source_path}/../include/vpx/vp9cx.h ] && CODECS="${CODECS} vp9_encoder"
+[ -f ${source_path}/../include/vpx/vp9dx.h ] && CODECS="${CODECS} vp9_decoder"
[ -f ${source_path}/../include/vpx/vp8cx.h ] || disable vp8_encoder
[ -f ${source_path}/../include/vpx/vp8dx.h ] || disable vp8_decoder
+[ -f ${source_path}/../include/vpx/vp9cx.h ] || disable vp9_encoder
+[ -f ${source_path}/../include/vpx/vp9dx.h ] || disable vp9_decoder
[ -f ${source_path}/../lib/*/*mt.lib ] && soft_enable static_msvcrt
fi
@@ -230,6 +242,13 @@ HAVE_LIST="
sys_mman_h
unistd_h
"
+EXPERIMENT_LIST="
+ oneshotq
+ multiple_arf
+ non420
+ alpha
+ balanced_coeftree
+"
CONFIG_LIST="
external_build
install_docs
@@ -276,8 +295,12 @@ CONFIG_LIST="
unit_tests
multi_res_encoding
temporal_denoising
+ experimental
+ decrypt
+ ${EXPERIMENT_LIST}
"
CMDLINE_SELECT="
+ external_build
extra_warnings
werror
install_docs
@@ -291,6 +314,7 @@ CMDLINE_SELECT="
optimizations
ccache
runtime_cpu_detect
+ thumb
libs
examples
@@ -322,6 +346,8 @@ CMDLINE_SELECT="
unit_tests
multi_res_encoding
temporal_denoising
+ experimental
+ decrypt
"
process_cmdline() {
@@ -329,6 +355,18 @@ process_cmdline() {
optval="${opt#*=}"
case "$opt" in
--disable-codecs) for c in ${CODECS}; do disable $c; done ;;
+ --enable-?*|--disable-?*)
+ eval `echo "$opt" | sed 's/--/action=/;s/-/ option=/;s/-/_/g'`
+ if echo "${EXPERIMENT_LIST}" | grep "^ *$option\$" >/dev/null; then
+ if enabled experimental; then
+ $action $option
+ else
+ log_echo "Ignoring $opt -- not in experimental mode."
+ fi
+ else
+ process_common_cmdline $opt
+ fi
+ ;;
*) process_common_cmdline "$opt"
;;
esac
@@ -464,7 +502,7 @@ process_detect() {
fi
fi
fi
- if [ -z "$CC" ]; then
+ if [ -z "$CC" ] || enabled external_build; then
echo "Bypassing toolchain for environment detection."
enable external_build
check_header() {
@@ -473,6 +511,7 @@ process_detect() {
shift
var=`echo $header | sed 's/[^A-Za-z0-9_]/_/g'`
disable $var
+ # Headers common to all environments
case $header in
stdio.h)
true;
@@ -484,6 +523,25 @@ process_detect() {
done
${result:-true}
esac && enable $var
+
+ # Specialize windows and POSIX environments.
+ case $toolchain in
+ *-win*-*)
+ case $header-$toolchain in
+ stdint*-gcc) true;;
+ *) false;;
+ esac && enable $var
+ ;;
+ *)
+ case $header in
+ stdint.h) true;;
+ pthread.h) true;;
+ sys/mman.h) true;;
+ unistd.h) true;;
+ *) false;;
+ esac && enable $var
+ esac
+ enabled $var
}
check_ld() {
true
@@ -497,6 +555,7 @@ EOF
check_header stdint.h
check_header pthread.h
check_header sys/mman.h
+ check_header unistd.h # for sysconf(3) and friends.
check_header vpx/vpx_integer.h -I${source_path} && enable vpx_ports
}
@@ -537,10 +596,14 @@ process_toolchain() {
check_add_cflags -Wpointer-arith
check_add_cflags -Wtype-limits
check_add_cflags -Wcast-qual
+ check_add_cflags -Wvla
check_add_cflags -Wimplicit-function-declaration
check_add_cflags -Wuninitialized
check_add_cflags -Wunused-variable
- check_add_cflags -Wunused-but-set-variable
+ case ${CC} in
+ *clang*) ;;
+ *) check_add_cflags -Wunused-but-set-variable ;;
+ esac
enabled extra_warnings || check_add_cflags -Wno-unused-function
fi
@@ -585,7 +648,18 @@ process_toolchain() {
vs*) enable msvs
enable solution
vs_version=${tgt_cc##vs}
+ case $vs_version in
+ [789])
+ VCPROJ_SFX=vcproj
+ gen_vcproj_cmd=${source_path}/build/make/gen_msvs_proj.sh
+ ;;
+ 10|11)
+ VCPROJ_SFX=vcxproj
+ gen_vcproj_cmd=${source_path}/build/make/gen_msvs_vcxproj.sh
+ ;;
+ esac
all_targets="${all_targets} solution"
+ INLINE="__forceinline"
;;
esac
@@ -596,7 +670,7 @@ process_toolchain() {
enabled postproc || die "postproc_visualizer requires postproc to be enabled"
fi
- # Enable unit tests if we have a working C++ compiler
+ # Enable unit tests by default if we have a working C++ compiler.
case "$toolchain" in
*-vs*)
soft_enable unit_tests
@@ -604,8 +678,12 @@ process_toolchain() {
*-android-*)
# GTestLog must be modified to use Android logging utilities.
;;
+ *-darwin-*)
+ # iOS/ARM builds do not work with gtest. This does not match
+ # x86 targets.
+ ;;
*)
- check_cxx "$@" <<EOF && soft_enable unit_tests
+ enabled pthread_h && check_cxx "$@" <<EOF && soft_enable unit_tests
int z;
EOF
;;
diff --git a/libvpx/example_xma.c b/libvpx/example_xma.c
index 72eb470..7aa8798 100644
--- a/libvpx/example_xma.c
+++ b/libvpx/example_xma.c
@@ -18,197 +18,174 @@
#include "vpx_config.h"
#include "vpx/vpx_decoder.h"
#include "vpx/vpx_integer.h"
-#if CONFIG_VP8_DECODER
+#if CONFIG_VP9_DECODER
#include "vpx/vp8dx.h"
#endif
static char *exec_name;
static int verbose = 0;
-static const struct
-{
- const char *name;
- const vpx_codec_iface_t *iface;
-} ifaces[] =
-{
-#if CONFIG_VP8_DECODER
- {"vp8", &vpx_codec_vp8_dx_algo},
+static const struct {
+ const char *name;
+ const vpx_codec_iface_t *iface;
+} ifaces[] = {
+#if CONFIG_VP9_DECODER
+ {"vp9", &vpx_codec_vp8_dx_algo},
#endif
};
-static void usage_exit(void)
-{
- int i;
-
- printf("Usage: %s <options>\n\n"
- "Options:\n"
- "\t--codec <name>\tCodec to use (default=%s)\n"
- "\t-h <height>\tHeight of the simulated video frame, in pixels\n"
- "\t-w <width> \tWidth of the simulated video frame, in pixels\n"
- "\t-v \tVerbose mode (show individual segment sizes)\n"
- "\t--help \tShow this message\n"
- "\n"
- "Included decoders:\n"
- "\n",
- exec_name,
- ifaces[0].name);
-
- for (i = 0; i < sizeof(ifaces) / sizeof(ifaces[0]); i++)
- printf(" %-6s - %s\n",
- ifaces[i].name,
- vpx_codec_iface_name(ifaces[i].iface));
-
- exit(EXIT_FAILURE);
+static void usage_exit(void) {
+ int i;
+
+ printf("Usage: %s <options>\n\n"
+ "Options:\n"
+ "\t--codec <name>\tCodec to use (default=%s)\n"
+ "\t-h <height>\tHeight of the simulated video frame, in pixels\n"
+ "\t-w <width> \tWidth of the simulated video frame, in pixels\n"
+ "\t-v \tVerbose mode (show individual segment sizes)\n"
+ "\t--help \tShow this message\n"
+ "\n"
+ "Included decoders:\n"
+ "\n",
+ exec_name,
+ ifaces[0].name);
+
+ for (i = 0; i < sizeof(ifaces) / sizeof(ifaces[0]); i++)
+ printf(" %-6s - %s\n",
+ ifaces[i].name,
+ vpx_codec_iface_name(ifaces[i].iface));
+
+ exit(EXIT_FAILURE);
}
-static void usage_error(const char *fmt, ...)
-{
- va_list ap;
- va_start(ap, fmt);
- vprintf(fmt, ap);
- printf("\n");
- usage_exit();
+static void usage_error(const char *fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ vprintf(fmt, ap);
+ printf("\n");
+ usage_exit();
}
-void my_mem_dtor(vpx_codec_mmap_t *mmap)
-{
- if (verbose)
- printf("freeing segment %d\n", mmap->id);
+void my_mem_dtor(vpx_codec_mmap_t *mmap) {
+ if (verbose)
+ printf("freeing segment %d\n", mmap->id);
- free(mmap->priv);
+ free(mmap->priv);
}
-int main(int argc, char **argv)
-{
- vpx_codec_ctx_t decoder;
- vpx_codec_iface_t *iface = ifaces[0].iface;
- vpx_codec_iter_t iter;
- vpx_codec_dec_cfg_t cfg;
- vpx_codec_err_t res = VPX_CODEC_OK;
- unsigned int alloc_sz = 0;
- unsigned int w = 352;
- unsigned int h = 288;
- int i;
-
- exec_name = argv[0];
-
- for (i = 1; i < argc; i++)
- {
- if (!strcmp(argv[i], "--codec"))
- {
- if (i + 1 < argc)
- {
- int j, k = -1;
-
- i++;
-
- for (j = 0; j < sizeof(ifaces) / sizeof(ifaces[0]); j++)
- if (!strcmp(ifaces[j].name, argv[i]))
- k = j;
-
- if (k >= 0)
- iface = ifaces[k].iface;
- else
- usage_error("Error: Unrecognized argument (%s) to --codec\n",
- argv[i]);
- }
- else
- usage_error("Error: Option --codec requires argument.\n");
- }
- else if (!strcmp(argv[i], "-v"))
- verbose = 1;
- else if (!strcmp(argv[i], "-h"))
- if (i + 1 < argc)
- {
- h = atoi(argv[++i]);
- }
- else
- usage_error("Error: Option -h requires argument.\n");
- else if (!strcmp(argv[i], "-w"))
- if (i + 1 < argc)
- {
- w = atoi(argv[++i]);
- }
- else
- usage_error("Error: Option -w requires argument.\n");
- else if (!strcmp(argv[i], "--help"))
- usage_exit();
- else
- usage_error("Error: Unrecognized option %s\n\n", argv[i]);
- }
+int main(int argc, char **argv) {
+ vpx_codec_ctx_t decoder;
+ vpx_codec_iface_t *iface = ifaces[0].iface;
+ vpx_codec_iter_t iter;
+ vpx_codec_dec_cfg_t cfg;
+ vpx_codec_err_t res = VPX_CODEC_OK;
+ unsigned int alloc_sz = 0;
+ unsigned int w = 352;
+ unsigned int h = 288;
+ int i;
- if (argc == 1)
- printf("Using built-in defaults. For options, rerun with --help\n\n");
+ exec_name = argv[0];
- /* XMA mode is not supported on all decoders! */
- if (!(vpx_codec_get_caps(iface) & VPX_CODEC_CAP_XMA))
- {
- printf("%s does not support XMA mode!\n", vpx_codec_iface_name(iface));
- return EXIT_FAILURE;
- }
+ for (i = 1; i < argc; i++) {
+ if (!strcmp(argv[i], "--codec")) {
+ if (i + 1 < argc) {
+ int j, k = -1;
- /* The codec knows how much memory to allocate based on the size of the
- * encoded frames. This data can be parsed from the bitstream with
- * vpx_codec_peek_stream_info() if a bitstream is available. Otherwise,
- * a fixed size can be used that will be the upper limit on the frame
- * size the decoder can decode.
- */
- cfg.w = w;
- cfg.h = h;
-
- /* Initialize the decoder in XMA mode. */
- if (vpx_codec_dec_init(&decoder, iface, &cfg, VPX_CODEC_USE_XMA))
- {
- printf("Failed to initialize decoder in XMA mode: %s\n", vpx_codec_error(&decoder));
- return EXIT_FAILURE;
- }
+ i++;
+
+ for (j = 0; j < sizeof(ifaces) / sizeof(ifaces[0]); j++)
+ if (!strcmp(ifaces[j].name, argv[i]))
+ k = j;
- /* Iterate through the list of memory maps, allocating them with the
- * requested alignment.
- */
- iter = NULL;
-
- do
- {
- vpx_codec_mmap_t mmap;
- unsigned int align;
-
- res = vpx_codec_get_mem_map(&decoder, &mmap, &iter);
- align = mmap.align ? mmap.align - 1 : 0;
-
- if (!res)
- {
- if (verbose)
- printf("Allocating segment %u, size %lu, align %u %s\n",
- mmap.id, mmap.sz, mmap.align,
- mmap.flags & VPX_CODEC_MEM_ZERO ? "(ZEROED)" : "");
-
- if (mmap.flags & VPX_CODEC_MEM_ZERO)
- mmap.priv = calloc(1, mmap.sz + align);
- else
- mmap.priv = malloc(mmap.sz + align);
-
- mmap.base = (void *)((((uintptr_t)mmap.priv) + align) & ~(uintptr_t)align);
- mmap.dtor = my_mem_dtor;
- alloc_sz += mmap.sz + align;
-
- if (vpx_codec_set_mem_map(&decoder, &mmap, 1))
- {
- printf("Failed to set mmap: %s\n", vpx_codec_error(&decoder));
- return EXIT_FAILURE;
- }
- }
- else if (res != VPX_CODEC_LIST_END)
- {
- printf("Failed to get mmap: %s\n", vpx_codec_error(&decoder));
- return EXIT_FAILURE;
- }
+ if (k >= 0)
+ iface = ifaces[k].iface;
+ else
+ usage_error("Error: Unrecognized argument (%s) to --codec\n",
+ argv[i]);
+ } else
+ usage_error("Error: Option --codec requires argument.\n");
+ } else if (!strcmp(argv[i], "-v"))
+ verbose = 1;
+ else if (!strcmp(argv[i], "-h"))
+ if (i + 1 < argc) {
+ h = atoi(argv[++i]);
+ } else
+ usage_error("Error: Option -h requires argument.\n");
+ else if (!strcmp(argv[i], "-w"))
+ if (i + 1 < argc) {
+ w = atoi(argv[++i]);
+ } else
+ usage_error("Error: Option -w requires argument.\n");
+ else if (!strcmp(argv[i], "--help"))
+ usage_exit();
+ else
+ usage_error("Error: Unrecognized option %s\n\n", argv[i]);
+ }
+
+ if (argc == 1)
+ printf("Using built-in defaults. For options, rerun with --help\n\n");
+
+ /* XMA mode is not supported on all decoders! */
+ if (!(vpx_codec_get_caps(iface) & VPX_CODEC_CAP_XMA)) {
+ printf("%s does not support XMA mode!\n", vpx_codec_iface_name(iface));
+ return EXIT_FAILURE;
+ }
+
+ /* The codec knows how much memory to allocate based on the size of the
+ * encoded frames. This data can be parsed from the bitstream with
+ * vpx_codec_peek_stream_info() if a bitstream is available. Otherwise,
+ * a fixed size can be used that will be the upper limit on the frame
+ * size the decoder can decode.
+ */
+ cfg.w = w;
+ cfg.h = h;
+
+ /* Initialize the decoder in XMA mode. */
+ if (vpx_codec_dec_init(&decoder, iface, &cfg, VPX_CODEC_USE_XMA)) {
+ printf("Failed to initialize decoder in XMA mode: %s\n", vpx_codec_error(&decoder));
+ return EXIT_FAILURE;
+ }
+
+ /* Iterate through the list of memory maps, allocating them with the
+ * requested alignment.
+ */
+ iter = NULL;
+
+ do {
+ vpx_codec_mmap_t mmap;
+ unsigned int align;
+
+ res = vpx_codec_get_mem_map(&decoder, &mmap, &iter);
+ align = mmap.align ? mmap.align - 1 : 0;
+
+ if (!res) {
+ if (verbose)
+ printf("Allocating segment %u, size %lu, align %u %s\n",
+ mmap.id, mmap.sz, mmap.align,
+ mmap.flags & VPX_CODEC_MEM_ZERO ? "(ZEROED)" : "");
+
+ if (mmap.flags & VPX_CODEC_MEM_ZERO)
+ mmap.priv = calloc(1, mmap.sz + align);
+ else
+ mmap.priv = malloc(mmap.sz + align);
+
+ mmap.base = (void *)((((uintptr_t)mmap.priv) + align) & ~(uintptr_t)align);
+ mmap.dtor = my_mem_dtor;
+ alloc_sz += mmap.sz + align;
+
+ if (vpx_codec_set_mem_map(&decoder, &mmap, 1)) {
+ printf("Failed to set mmap: %s\n", vpx_codec_error(&decoder));
+ return EXIT_FAILURE;
+ }
+ } else if (res != VPX_CODEC_LIST_END) {
+ printf("Failed to get mmap: %s\n", vpx_codec_error(&decoder));
+ return EXIT_FAILURE;
}
- while (res != VPX_CODEC_LIST_END);
+ } while (res != VPX_CODEC_LIST_END);
- printf("%s\n %d bytes external memory required for %dx%d.\n",
- decoder.name, alloc_sz, cfg.w, cfg.h);
- vpx_codec_destroy(&decoder);
- return EXIT_SUCCESS;
+ printf("%s\n %d bytes external memory required for %dx%d.\n",
+ decoder.name, alloc_sz, cfg.w, cfg.h);
+ vpx_codec_destroy(&decoder);
+ return EXIT_SUCCESS;
}
diff --git a/libvpx/examples.mk b/libvpx/examples.mk
index 90913e6..5b5ca23 100644
--- a/libvpx/examples.mk
+++ b/libvpx/examples.mk
@@ -8,6 +8,12 @@
## be found in the AUTHORS file in the root of the source tree.
##
+LIBYUV_SRCS += third_party/libyuv/include/libyuv/basic_types.h \
+ third_party/libyuv/include/libyuv/cpu_id.h \
+ third_party/libyuv/include/libyuv/scale.h \
+ third_party/libyuv/source/row.h \
+ third_party/libyuv/source/scale.c \
+ third_party/libyuv/source/cpu_id.c
# List of examples to build. UTILS are files that are taken from the source
# tree directly, and GEN_EXAMPLES are files that are created from the
@@ -25,6 +31,7 @@ vpxdec.SRCS += nestegg/halloc/src/hlist.h
vpxdec.SRCS += nestegg/halloc/src/macros.h
vpxdec.SRCS += nestegg/include/nestegg/nestegg.h
vpxdec.SRCS += nestegg/src/nestegg.c
+vpxdec.SRCS += $(LIBYUV_SRCS)
vpxdec.GUID = BA5FE66F-38DD-E034-F542-B1578C5FB950
vpxdec.DESCRIPTION = Full featured decoder
UTILS-$(CONFIG_ENCODERS) += vpxenc.c
@@ -36,9 +43,10 @@ vpxenc.SRCS += vpx_ports/vpx_timer.h
vpxenc.SRCS += libmkv/EbmlIDs.h
vpxenc.SRCS += libmkv/EbmlWriter.c
vpxenc.SRCS += libmkv/EbmlWriter.h
+vpxenc.SRCS += $(LIBYUV_SRCS)
vpxenc.GUID = 548DEC74-7A15-4B2B-AFC3-AA102E7C25C1
vpxenc.DESCRIPTION = Full featured encoder
-UTILS-$(CONFIG_ENCODERS) += vp8_scalable_patterns.c
+UTILS-$(CONFIG_VP8_ENCODER) += vp8_scalable_patterns.c
vp8_scalable_patterns.GUID = 0D6A210B-F482-4D6F-8570-4A9C01ACC88C
vp8_scalable_patterns.DESCRIPTION = Temporal Scalability Encoder
@@ -56,37 +64,37 @@ endif
#example_xma.GUID = A955FC4A-73F1-44F7-135E-30D84D32F022
#example_xma.DESCRIPTION = External Memory Allocation mode usage
-GEN_EXAMPLES-$(CONFIG_DECODERS) += simple_decoder.c
+GEN_EXAMPLES-$(CONFIG_VP8_DECODER) += simple_decoder.c
simple_decoder.GUID = D3BBF1E9-2427-450D-BBFF-B2843C1D44CC
simple_decoder.DESCRIPTION = Simplified decoder loop
-GEN_EXAMPLES-$(CONFIG_DECODERS) += postproc.c
+GEN_EXAMPLES-$(CONFIG_VP8_DECODER) += postproc.c
postproc.GUID = 65E33355-F35E-4088-884D-3FD4905881D7
postproc.DESCRIPTION = Decoder postprocessor control
-GEN_EXAMPLES-$(CONFIG_DECODERS) += decode_to_md5.c
+GEN_EXAMPLES-$(CONFIG_VP8_DECODER) += decode_to_md5.c
decode_to_md5.SRCS += md5_utils.h md5_utils.c
decode_to_md5.GUID = 59120B9B-2735-4BFE-B022-146CA340FE42
decode_to_md5.DESCRIPTION = Frame by frame MD5 checksum
-GEN_EXAMPLES-$(CONFIG_ENCODERS) += simple_encoder.c
+GEN_EXAMPLES-$(CONFIG_VP8_ENCODER) += simple_encoder.c
simple_encoder.GUID = 4607D299-8A71-4D2C-9B1D-071899B6FBFD
simple_encoder.DESCRIPTION = Simplified encoder loop
-GEN_EXAMPLES-$(CONFIG_ENCODERS) += twopass_encoder.c
+GEN_EXAMPLES-$(CONFIG_VP8_ENCODER) += twopass_encoder.c
twopass_encoder.GUID = 73494FA6-4AF9-4763-8FBB-265C92402FD8
twopass_encoder.DESCRIPTION = Two-pass encoder loop
-GEN_EXAMPLES-$(CONFIG_ENCODERS) += force_keyframe.c
+GEN_EXAMPLES-$(CONFIG_VP8_ENCODER) += force_keyframe.c
force_keyframe.GUID = 3C67CADF-029F-4C86-81F5-D6D4F51177F0
force_keyframe.DESCRIPTION = Force generation of keyframes
ifeq ($(CONFIG_DECODERS),yes)
-GEN_EXAMPLES-$(CONFIG_ENCODERS) += decode_with_drops.c
+GEN_EXAMPLES-$(CONFIG_VP8_ENCODER) += decode_with_drops.c
endif
decode_with_drops.GUID = CE5C53C4-8DDA-438A-86ED-0DDD3CDB8D26
decode_with_drops.DESCRIPTION = Drops frames while decoding
-ifeq ($(CONFIG_DECODERS),yes)
+ifeq ($(CONFIG_VP8_DECODER),yes)
GEN_EXAMPLES-$(CONFIG_ERROR_CONCEALMENT) += decode_with_partial_drops.c
endif
decode_with_partial_drops.GUID = 61C2D026-5754-46AC-916F-1343ECC5537E
decode_with_partial_drops.DESCRIPTION = Drops parts of frames while decoding
-GEN_EXAMPLES-$(CONFIG_ENCODERS) += error_resilient.c
+GEN_EXAMPLES-$(CONFIG_VP8_ENCODER) += error_resilient.c
error_resilient.GUID = DF5837B9-4145-4F92-A031-44E4F832E00C
error_resilient.DESCRIPTION = Error Resiliency Feature
@@ -99,13 +107,7 @@ vp8cx_set_ref.DESCRIPTION = VP8 set encoder reference frame
# C file is provided, not generated automatically.
UTILS-$(CONFIG_MULTI_RES_ENCODING) += vp8_multi_resolution_encoder.c
-vp8_multi_resolution_encoder.SRCS \
- += third_party/libyuv/include/libyuv/basic_types.h \
- third_party/libyuv/include/libyuv/cpu_id.h \
- third_party/libyuv/include/libyuv/scale.h \
- third_party/libyuv/source/row.h \
- third_party/libyuv/source/scale.c \
- third_party/libyuv/source/cpu_id.c
+vp8_multi_resolution_encoder.SRCS += $(LIBYUV_SRCS)
vp8_multi_resolution_encoder.GUID = 04f8738e-63c8-423b-90fa-7c2703a374de
vp8_multi_resolution_encoder.DESCRIPTION = VP8 Multiple-resolution Encoding
@@ -115,9 +117,11 @@ vp8_multi_resolution_encoder.DESCRIPTION = VP8 Multiple-resolution Encoding
# when building for bare-metal targets
ifeq ($(CONFIG_OS_SUPPORT), yes)
CODEC_EXTRA_LIBS-$(CONFIG_VP8) += m
+CODEC_EXTRA_LIBS-$(CONFIG_VP9) += m
else
ifeq ($(CONFIG_GCC), yes)
CODEC_EXTRA_LIBS-$(CONFIG_VP8) += m
+ CODEC_EXTRA_LIBS-$(CONFIG_VP9) += m
endif
endif
#
@@ -136,6 +140,8 @@ else
LIB_PATH-yes += $(if $(BUILD_PFX),$(BUILD_PFX),.)
INC_PATH-$(CONFIG_VP8_DECODER) += $(SRC_PATH_BARE)/vp8
INC_PATH-$(CONFIG_VP8_ENCODER) += $(SRC_PATH_BARE)/vp8
+ INC_PATH-$(CONFIG_VP9_DECODER) += $(SRC_PATH_BARE)/vp9
+ INC_PATH-$(CONFIG_VP9_ENCODER) += $(SRC_PATH_BARE)/vp9
LIB_PATH := $(call enabled,LIB_PATH)
INC_PATH := $(call enabled,INC_PATH)
endif
@@ -179,7 +185,8 @@ BINS-$(NOT_MSVS) += $(addprefix $(BUILD_PFX),$(ALL_EXAMPLES:.c=$(EXE_S
# Instantiate linker template for all examples.
CODEC_LIB=$(if $(CONFIG_DEBUG_LIBS),vpx_g,vpx)
-CODEC_LIB_SUF=$(if $(CONFIG_SHARED),.so,.a)
+SHARED_LIB_SUF=$(if $(filter darwin%,$(TGT_OS)),.dylib,.so)
+CODEC_LIB_SUF=$(if $(CONFIG_SHARED),$(SHARED_LIB_SUF),.a)
$(foreach bin,$(BINS-yes),\
$(if $(BUILD_OBJS),$(eval $(bin):\
$(LIB_PATH)/lib$(CODEC_LIB)$(CODEC_LIB_SUF)))\
@@ -209,7 +216,7 @@ INSTALL_MAPS += % %
# Set up additional MSVS environment
ifeq ($(CONFIG_MSVS),yes)
-CODEC_LIB=$(if $(CONFIG_STATIC_MSVCRT),vpxmt,vpxmd)
+CODEC_LIB=$(if $(CONFIG_SHARED),vpx,$(if $(CONFIG_STATIC_MSVCRT),vpxmt,vpxmd))
# This variable uses deferred expansion intentionally, since the results of
# $(wildcard) may change during the course of the Make.
VS_PLATFORMS = $(foreach d,$(wildcard */Release/$(CODEC_LIB).lib),$(word 1,$(subst /, ,$(d))))
@@ -224,19 +231,19 @@ endif
# even though there is no real dependency there (the dependency is on
# the makefiles). We may want to revisit this.
define vcproj_template
-$(1): $($(1:.vcproj=).SRCS)
+$(1): $($(1:.$(VCPROJ_SFX)=).SRCS) vpx.$(VCPROJ_SFX)
@echo " [vcproj] $$@"
- $$(SRC_PATH_BARE)/build/make/gen_msvs_proj.sh\
+ $$(GEN_VCPROJ)\
--exe\
--target=$$(TOOLCHAIN)\
- --name=$$(@:.vcproj=)\
+ --name=$$(@:.$(VCPROJ_SFX)=)\
--ver=$$(CONFIG_VS_VERSION)\
- --proj-guid=$$($$(@:.vcproj=).GUID)\
+ --proj-guid=$$($$(@:.$(VCPROJ_SFX)=).GUID)\
$$(if $$(CONFIG_STATIC_MSVCRT),--static-crt) \
--out=$$@ $$(INTERNAL_CFLAGS) $$(CFLAGS) \
- $$(INTERNAL_LDFLAGS) $$(LDFLAGS) -l$$(CODEC_LIB) -lwinmm $$^
+ $$(INTERNAL_LDFLAGS) $$(LDFLAGS) -l$$(CODEC_LIB) $$^
endef
-PROJECTS-$(CONFIG_MSVS) += $(ALL_EXAMPLES:.c=.vcproj)
+PROJECTS-$(CONFIG_MSVS) += $(ALL_EXAMPLES:.c=.$(VCPROJ_SFX))
INSTALL-BINS-$(CONFIG_MSVS) += $(foreach p,$(VS_PLATFORMS),\
$(addprefix bin/$(p)/,$(ALL_EXAMPLES:.c=.exe)))
$(foreach proj,$(call enabled,PROJECTS),\
diff --git a/libvpx/examples/decoder_tmpl.c b/libvpx/examples/decoder_tmpl.c
index 8194f0a..597fea2 100644
--- a/libvpx/examples/decoder_tmpl.c
+++ b/libvpx/examples/decoder_tmpl.c
@@ -12,6 +12,7 @@
/*
@*INTRODUCTION
*/
+#include "vpx_config.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
diff --git a/libvpx/examples/decoder_tmpl.txt b/libvpx/examples/decoder_tmpl.txt
index e652a63..3d230a5 100644
--- a/libvpx/examples/decoder_tmpl.txt
+++ b/libvpx/examples/decoder_tmpl.txt
@@ -1,7 +1,7 @@
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DEC_INCLUDES
#define VPX_CODEC_DISABLE_COMPAT 1
#include "vpx/vpx_decoder.h"
-#include "vpx/vp8dx.h"
+#include "vpx/vp9dx.h"
#define interface (vpx_codec_vp8_dx())
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DEC_INCLUDES
diff --git a/libvpx/examples/encoder_tmpl.txt b/libvpx/examples/encoder_tmpl.txt
index 1afbd8b..9f8f4af 100644
--- a/libvpx/examples/encoder_tmpl.txt
+++ b/libvpx/examples/encoder_tmpl.txt
@@ -1,7 +1,7 @@
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ENC_INCLUDES
#define VPX_CODEC_DISABLE_COMPAT 1
#include "vpx/vpx_encoder.h"
-#include "vpx/vp8cx.h"
+#include "vpx/vp9cx.h"
#define interface (vpx_codec_vp8_cx())
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ENC_INCLUDES
diff --git a/libvpx/examples/postproc.txt b/libvpx/examples/postproc.txt
index 51b251a..e00bf59 100644
--- a/libvpx/examples/postproc.txt
+++ b/libvpx/examples/postproc.txt
@@ -51,7 +51,7 @@ Some codecs provide fine grained controls over their built-in
postprocessors. VP8 is one example. The following sample code toggles
postprocessing on and off every 15 frames.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PRE_DECODE
-#if CONFIG_VP8_DECODER
+#if CONFIG_VP9_DECODER
if(frame_cnt%30 == 1) {
vp8_postproc_cfg_t pp = {0, 0, 0};
diff --git a/libvpx/libmkv/EbmlBufferWriter.c b/libvpx/libmkv/EbmlBufferWriter.c
index d9b04a8..574e478 100644
--- a/libvpx/libmkv/EbmlBufferWriter.c
+++ b/libvpx/libmkv/EbmlBufferWriter.c
@@ -1,60 +1,54 @@
-//#include <strmif.h>
+// #include <strmif.h>
#include "EbmlBufferWriter.h"
#include "EbmlWriter.h"
-//#include <cassert>
-//#include <limits>
-//#include <malloc.h> //_alloca
+// #include <cassert>
+// #include <limits>
+// #include <malloc.h> //_alloca
#include <stdlib.h>
#include <wchar.h>
#include <string.h>
-void Ebml_Write(EbmlGlobal *glob, const void *buffer_in, unsigned long len)
-{
- unsigned char *src = glob->buf;
- src += glob->offset;
- memcpy(src, buffer_in, len);
- glob->offset += len;
+void Ebml_Write(EbmlGlobal *glob, const void *buffer_in, unsigned long len) {
+ unsigned char *src = glob->buf;
+ src += glob->offset;
+ memcpy(src, buffer_in, len);
+ glob->offset += len;
}
-static void _Serialize(EbmlGlobal *glob, const unsigned char *p, const unsigned char *q)
-{
- while (q != p)
- {
- --q;
+static void _Serialize(EbmlGlobal *glob, const unsigned char *p, const unsigned char *q) {
+ while (q != p) {
+ --q;
- unsigned long cbWritten;
- memcpy(&(glob->buf[glob->offset]), q, 1);
- glob->offset ++;
- }
+ unsigned long cbWritten;
+ memcpy(&(glob->buf[glob->offset]), q, 1);
+ glob->offset++;
+ }
}
-void Ebml_Serialize(EbmlGlobal *glob, const void *buffer_in, unsigned long len)
-{
- //assert(buf);
+void Ebml_Serialize(EbmlGlobal *glob, const void *buffer_in, unsigned long len) {
+ // assert(buf);
- const unsigned char *const p = (const unsigned char *)(buffer_in);
- const unsigned char *const q = p + len;
+ const unsigned char *const p = (const unsigned char *)(buffer_in);
+ const unsigned char *const q = p + len;
- _Serialize(glob, p, q);
+ _Serialize(glob, p, q);
}
-void Ebml_StartSubElement(EbmlGlobal *glob, EbmlLoc *ebmlLoc, unsigned long class_id)
-{
- Ebml_WriteID(glob, class_id);
- ebmlLoc->offset = glob->offset;
- //todo this is always taking 8 bytes, this may need later optimization
- unsigned long long unknownLen = 0x01FFFFFFFFFFFFFFLLU;
- Ebml_Serialize(glob, (void *)&unknownLen, 8); //this is a key that says lenght unknown
+void Ebml_StartSubElement(EbmlGlobal *glob, EbmlLoc *ebmlLoc, unsigned long class_id) {
+ Ebml_WriteID(glob, class_id);
+ ebmlLoc->offset = glob->offset;
+ // todo this is always taking 8 bytes, this may need later optimization
+ unsigned long long unknownLen = 0x01FFFFFFFFFFFFFFLLU;
+ Ebml_Serialize(glob, (void *)&unknownLen, 8); // this is a key that says lenght unknown
}
-void Ebml_EndSubElement(EbmlGlobal *glob, EbmlLoc *ebmlLoc)
-{
- unsigned long long size = glob->offset - ebmlLoc->offset - 8;
- unsigned long long curOffset = glob->offset;
- glob->offset = ebmlLoc->offset;
- size |= 0x0100000000000000LLU;
- Ebml_Serialize(glob, &size, 8);
- glob->offset = curOffset;
+void Ebml_EndSubElement(EbmlGlobal *glob, EbmlLoc *ebmlLoc) {
+ unsigned long long size = glob->offset - ebmlLoc->offset - 8;
+ unsigned long long curOffset = glob->offset;
+ glob->offset = ebmlLoc->offset;
+ size |= 0x0100000000000000LLU;
+ Ebml_Serialize(glob, &size, 8);
+ glob->offset = curOffset;
}
diff --git a/libvpx/libmkv/EbmlBufferWriter.h b/libvpx/libmkv/EbmlBufferWriter.h
index ba0a9b3..acd5c2a 100644
--- a/libvpx/libmkv/EbmlBufferWriter.h
+++ b/libvpx/libmkv/EbmlBufferWriter.h
@@ -1,16 +1,14 @@
#ifndef EBMLBUFFERWRITER_HPP
#define EBMLBUFFERWRITER_HPP
-typedef struct
-{
- unsigned long long offset;
+typedef struct {
+ unsigned long long offset;
} EbmlLoc;
-typedef struct
-{
- unsigned char *buf;
- unsigned int length;
- unsigned int offset;
+typedef struct {
+ unsigned char *buf;
+ unsigned int length;
+ unsigned int offset;
} EbmlGlobal;
diff --git a/libvpx/libmkv/EbmlIDs.h b/libvpx/libmkv/EbmlIDs.h
index e3ce585..44d4385 100644
--- a/libvpx/libmkv/EbmlIDs.h
+++ b/libvpx/libmkv/EbmlIDs.h
@@ -12,35 +12,34 @@
/* Commenting out values not available in webm, but available in matroska */
-enum mkv
-{
- EBML = 0x1A45DFA3,
- EBMLVersion = 0x4286,
- EBMLReadVersion = 0x42F7,
- EBMLMaxIDLength = 0x42F2,
- EBMLMaxSizeLength = 0x42F3,
- DocType = 0x4282,
- DocTypeVersion = 0x4287,
- DocTypeReadVersion = 0x4285,
+enum mkv {
+ EBML = 0x1A45DFA3,
+ EBMLVersion = 0x4286,
+ EBMLReadVersion = 0x42F7,
+ EBMLMaxIDLength = 0x42F2,
+ EBMLMaxSizeLength = 0x42F3,
+ DocType = 0x4282,
+ DocTypeVersion = 0x4287,
+ DocTypeReadVersion = 0x4285,
/* CRC_32 = 0xBF, */
- Void = 0xEC,
- SignatureSlot = 0x1B538667,
- SignatureAlgo = 0x7E8A,
- SignatureHash = 0x7E9A,
- SignaturePublicKey = 0x7EA5,
- Signature = 0x7EB5,
- SignatureElements = 0x7E5B,
- SignatureElementList = 0x7E7B,
- SignedElement = 0x6532,
- /* segment */
- Segment = 0x18538067,
- /* Meta Seek Information */
- SeekHead = 0x114D9B74,
- Seek = 0x4DBB,
- SeekID = 0x53AB,
- SeekPosition = 0x53AC,
- /* Segment Information */
- Info = 0x1549A966,
+ Void = 0xEC,
+ SignatureSlot = 0x1B538667,
+ SignatureAlgo = 0x7E8A,
+ SignatureHash = 0x7E9A,
+ SignaturePublicKey = 0x7EA5,
+ Signature = 0x7EB5,
+ SignatureElements = 0x7E5B,
+ SignatureElementList = 0x7E7B,
+ SignedElement = 0x6532,
+ /* segment */
+ Segment = 0x18538067,
+ /* Meta Seek Information */
+ SeekHead = 0x114D9B74,
+ Seek = 0x4DBB,
+ SeekID = 0x53AB,
+ SeekPosition = 0x53AC,
+ /* Segment Information */
+ Info = 0x1549A966,
/* SegmentUID = 0x73A4, */
/* SegmentFilename = 0x7384, */
/* PrevUID = 0x3CB923, */
@@ -52,61 +51,61 @@ enum mkv
/* ChapterTranslateEditionUID = 0x69FC, */
/* ChapterTranslateCodec = 0x69BF, */
/* ChapterTranslateID = 0x69A5, */
- TimecodeScale = 0x2AD7B1,
- Segment_Duration = 0x4489,
- DateUTC = 0x4461,
+ TimecodeScale = 0x2AD7B1,
+ Segment_Duration = 0x4489,
+ DateUTC = 0x4461,
/* Title = 0x7BA9, */
- MuxingApp = 0x4D80,
- WritingApp = 0x5741,
- /* Cluster */
- Cluster = 0x1F43B675,
- Timecode = 0xE7,
+ MuxingApp = 0x4D80,
+ WritingApp = 0x5741,
+ /* Cluster */
+ Cluster = 0x1F43B675,
+ Timecode = 0xE7,
/* SilentTracks = 0x5854, */
/* SilentTrackNumber = 0x58D7, */
/* Position = 0xA7, */
- PrevSize = 0xAB,
- BlockGroup = 0xA0,
- Block = 0xA1,
+ PrevSize = 0xAB,
+ BlockGroup = 0xA0,
+ Block = 0xA1,
/* BlockVirtual = 0xA2, */
-/* BlockAdditions = 0x75A1, */
-/* BlockMore = 0xA6, */
-/* BlockAddID = 0xEE, */
-/* BlockAdditional = 0xA5, */
- BlockDuration = 0x9B,
+ BlockAdditions = 0x75A1,
+ BlockMore = 0xA6,
+ BlockAddID = 0xEE,
+ BlockAdditional = 0xA5,
+ BlockDuration = 0x9B,
/* ReferencePriority = 0xFA, */
- ReferenceBlock = 0xFB,
+ ReferenceBlock = 0xFB,
/* ReferenceVirtual = 0xFD, */
/* CodecState = 0xA4, */
/* Slices = 0x8E, */
/* TimeSlice = 0xE8, */
- LaceNumber = 0xCC,
+ LaceNumber = 0xCC,
/* FrameNumber = 0xCD, */
/* BlockAdditionID = 0xCB, */
/* MkvDelay = 0xCE, */
/* Cluster_Duration = 0xCF, */
- SimpleBlock = 0xA3,
+ SimpleBlock = 0xA3,
/* EncryptedBlock = 0xAF, */
- /* Track */
- Tracks = 0x1654AE6B,
- TrackEntry = 0xAE,
- TrackNumber = 0xD7,
- TrackUID = 0x73C5,
- TrackType = 0x83,
- FlagEnabled = 0xB9,
- FlagDefault = 0x88,
- FlagForced = 0x55AA,
- FlagLacing = 0x9C,
+ /* Track */
+ Tracks = 0x1654AE6B,
+ TrackEntry = 0xAE,
+ TrackNumber = 0xD7,
+ TrackUID = 0x73C5,
+ TrackType = 0x83,
+ FlagEnabled = 0xB9,
+ FlagDefault = 0x88,
+ FlagForced = 0x55AA,
+ FlagLacing = 0x9C,
/* MinCache = 0x6DE7, */
/* MaxCache = 0x6DF8, */
- DefaultDuration = 0x23E383,
+ DefaultDuration = 0x23E383,
/* TrackTimecodeScale = 0x23314F, */
/* TrackOffset = 0x537F, */
-/* MaxBlockAdditionID = 0x55EE, */
- Name = 0x536E,
- Language = 0x22B59C,
- CodecID = 0x86,
- CodecPrivate = 0x63A2,
- CodecName = 0x258688,
+ MaxBlockAdditionID = 0x55EE,
+ Name = 0x536E,
+ Language = 0x22B59C,
+ CodecID = 0x86,
+ CodecPrivate = 0x63A2,
+ CodecName = 0x258688,
/* AttachmentLink = 0x7446, */
/* CodecSettings = 0x3A9697, */
/* CodecInfoURL = 0x3B4040, */
@@ -117,33 +116,34 @@ enum mkv
/* TrackTranslateEditionUID = 0x66FC, */
/* TrackTranslateCodec = 0x66BF, */
/* TrackTranslateTrackID = 0x66A5, */
- /* video */
- Video = 0xE0,
- FlagInterlaced = 0x9A,
- StereoMode = 0x53B8,
- PixelWidth = 0xB0,
- PixelHeight = 0xBA,
- PixelCropBottom = 0x54AA,
- PixelCropTop = 0x54BB,
- PixelCropLeft = 0x54CC,
- PixelCropRight = 0x54DD,
- DisplayWidth = 0x54B0,
- DisplayHeight = 0x54BA,
- DisplayUnit = 0x54B2,
- AspectRatioType = 0x54B3,
+ /* video */
+ Video = 0xE0,
+ FlagInterlaced = 0x9A,
+ StereoMode = 0x53B8,
+ AlphaMode = 0x53C0,
+ PixelWidth = 0xB0,
+ PixelHeight = 0xBA,
+ PixelCropBottom = 0x54AA,
+ PixelCropTop = 0x54BB,
+ PixelCropLeft = 0x54CC,
+ PixelCropRight = 0x54DD,
+ DisplayWidth = 0x54B0,
+ DisplayHeight = 0x54BA,
+ DisplayUnit = 0x54B2,
+ AspectRatioType = 0x54B3,
/* ColourSpace = 0x2EB524, */
/* GammaValue = 0x2FB523, */
- FrameRate = 0x2383E3,
- /* end video */
- /* audio */
- Audio = 0xE1,
- SamplingFrequency = 0xB5,
- OutputSamplingFrequency = 0x78B5,
- Channels = 0x9F,
+ FrameRate = 0x2383E3,
+ /* end video */
+ /* audio */
+ Audio = 0xE1,
+ SamplingFrequency = 0xB5,
+ OutputSamplingFrequency = 0x78B5,
+ Channels = 0x9F,
/* ChannelPositions = 0x7D7B, */
- BitDepth = 0x6264,
- /* end audio */
- /* content encoding */
+ BitDepth = 0x6264,
+ /* end audio */
+ /* content encoding */
/* ContentEncodings = 0x6d80, */
/* ContentEncoding = 0x6240, */
/* ContentEncodingOrder = 0x5031, */
@@ -159,22 +159,22 @@ enum mkv
/* ContentSigKeyID = 0x47e4, */
/* ContentSigAlgo = 0x47e5, */
/* ContentSigHashAlgo = 0x47e6, */
- /* end content encoding */
- /* Cueing Data */
- Cues = 0x1C53BB6B,
- CuePoint = 0xBB,
- CueTime = 0xB3,
- CueTrackPositions = 0xB7,
- CueTrack = 0xF7,
- CueClusterPosition = 0xF1,
- CueBlockNumber = 0x5378
+ /* end content encoding */
+ /* Cueing Data */
+ Cues = 0x1C53BB6B,
+ CuePoint = 0xBB,
+ CueTime = 0xB3,
+ CueTrackPositions = 0xB7,
+ CueTrack = 0xF7,
+ CueClusterPosition = 0xF1,
+ CueBlockNumber = 0x5378
/* CueCodecState = 0xEA, */
/* CueReference = 0xDB, */
/* CueRefTime = 0x96, */
/* CueRefCluster = 0x97, */
/* CueRefNumber = 0x535F, */
/* CueRefCodecState = 0xEB, */
- /* Attachment */
+ /* Attachment */
/* Attachments = 0x1941A469, */
/* AttachedFile = 0x61A7, */
/* FileDescription = 0x467E, */
@@ -183,7 +183,7 @@ enum mkv
/* FileData = 0x465C, */
/* FileUID = 0x46AE, */
/* FileReferral = 0x4675, */
- /* Chapters */
+ /* Chapters */
/* Chapters = 0x1043A770, */
/* EditionEntry = 0x45B9, */
/* EditionUID = 0x45BC, */
@@ -211,7 +211,7 @@ enum mkv
/* ChapProcessCommand = 0x6911, */
/* ChapProcessTime = 0x6922, */
/* ChapProcessData = 0x6933, */
- /* Tagging */
+ /* Tagging */
/* Tags = 0x1254C367, */
/* Tag = 0x7373, */
/* Targets = 0x63C0, */
diff --git a/libvpx/libmkv/EbmlWriter.c b/libvpx/libmkv/EbmlWriter.c
index d70f06e..5fc5ed2 100644
--- a/libvpx/libmkv/EbmlWriter.c
+++ b/libvpx/libmkv/EbmlWriter.c
@@ -18,158 +18,140 @@
#define LITERALU64(n) n##LLU
#endif
-void Ebml_WriteLen(EbmlGlobal *glob, int64_t val)
-{
- /* TODO check and make sure we are not > than 0x0100000000000000LLU */
- unsigned char size = 8; /* size in bytes to output */
+void Ebml_WriteLen(EbmlGlobal *glob, int64_t val) {
+ /* TODO check and make sure we are not > than 0x0100000000000000LLU */
+ unsigned char size = 8; /* size in bytes to output */
- /* mask to compare for byte size */
- int64_t minVal = 0xff;
+ /* mask to compare for byte size */
+ int64_t minVal = 0xff;
- for (size = 1; size < 8; size ++)
- {
- if (val < minVal)
- break;
+ for (size = 1; size < 8; size ++) {
+ if (val < minVal)
+ break;
- minVal = (minVal << 7);
- }
+ minVal = (minVal << 7);
+ }
- val |= (((uint64_t)0x80) << ((size - 1) * 7));
+ val |= (((uint64_t)0x80) << ((size - 1) * 7));
- Ebml_Serialize(glob, (void *) &val, sizeof(val), size);
+ Ebml_Serialize(glob, (void *) &val, sizeof(val), size);
}
-void Ebml_WriteString(EbmlGlobal *glob, const char *str)
-{
- const size_t size_ = strlen(str);
- const uint64_t size = size_;
- Ebml_WriteLen(glob, size);
- /* TODO: it's not clear from the spec whether the nul terminator
- * should be serialized too. For now we omit the null terminator.
- */
- Ebml_Write(glob, str, (unsigned long)size);
+void Ebml_WriteString(EbmlGlobal *glob, const char *str) {
+ const size_t size_ = strlen(str);
+ const uint64_t size = size_;
+ Ebml_WriteLen(glob, size);
+ /* TODO: it's not clear from the spec whether the nul terminator
+ * should be serialized too. For now we omit the null terminator.
+ */
+ Ebml_Write(glob, str, (unsigned long)size);
}
-void Ebml_WriteUTF8(EbmlGlobal *glob, const wchar_t *wstr)
-{
- const size_t strlen = wcslen(wstr);
+void Ebml_WriteUTF8(EbmlGlobal *glob, const wchar_t *wstr) {
+ const size_t strlen = wcslen(wstr);
- /* TODO: it's not clear from the spec whether the nul terminator
- * should be serialized too. For now we include it.
- */
- const uint64_t size = strlen;
+ /* TODO: it's not clear from the spec whether the nul terminator
+ * should be serialized too. For now we include it.
+ */
+ const uint64_t size = strlen;
- Ebml_WriteLen(glob, size);
- Ebml_Write(glob, wstr, (unsigned long)size);
+ Ebml_WriteLen(glob, size);
+ Ebml_Write(glob, wstr, (unsigned long)size);
}
-void Ebml_WriteID(EbmlGlobal *glob, unsigned long class_id)
-{
- int len;
+void Ebml_WriteID(EbmlGlobal *glob, unsigned long class_id) {
+ int len;
- if (class_id >= 0x01000000)
- len = 4;
- else if (class_id >= 0x00010000)
- len = 3;
- else if (class_id >= 0x00000100)
- len = 2;
- else
- len = 1;
+ if (class_id >= 0x01000000)
+ len = 4;
+ else if (class_id >= 0x00010000)
+ len = 3;
+ else if (class_id >= 0x00000100)
+ len = 2;
+ else
+ len = 1;
- Ebml_Serialize(glob, (void *)&class_id, sizeof(class_id), len);
+ Ebml_Serialize(glob, (void *)&class_id, sizeof(class_id), len);
}
-void Ebml_SerializeUnsigned64(EbmlGlobal *glob, unsigned long class_id, uint64_t ui)
-{
- unsigned char sizeSerialized = 8 | 0x80;
- Ebml_WriteID(glob, class_id);
- Ebml_Serialize(glob, &sizeSerialized, sizeof(sizeSerialized), 1);
- Ebml_Serialize(glob, &ui, sizeof(ui), 8);
+void Ebml_SerializeUnsigned64(EbmlGlobal *glob, unsigned long class_id, uint64_t ui) {
+ unsigned char sizeSerialized = 8 | 0x80;
+ Ebml_WriteID(glob, class_id);
+ Ebml_Serialize(glob, &sizeSerialized, sizeof(sizeSerialized), 1);
+ Ebml_Serialize(glob, &ui, sizeof(ui), 8);
}
-void Ebml_SerializeUnsigned(EbmlGlobal *glob, unsigned long class_id, unsigned long ui)
-{
- unsigned char size = 8; /* size in bytes to output */
- unsigned char sizeSerialized = 0;
- unsigned long minVal;
-
- Ebml_WriteID(glob, class_id);
- minVal = 0x7fLU; /* mask to compare for byte size */
+void Ebml_SerializeUnsigned(EbmlGlobal *glob, unsigned long class_id, unsigned long ui) {
+ unsigned char size = 8; /* size in bytes to output */
+ unsigned char sizeSerialized = 0;
+ unsigned long minVal;
- for (size = 1; size < 4; size ++)
- {
- if (ui < minVal)
- {
- break;
- }
+ Ebml_WriteID(glob, class_id);
+ minVal = 0x7fLU; /* mask to compare for byte size */
- minVal <<= 7;
+ for (size = 1; size < 4; size ++) {
+ if (ui < minVal) {
+ break;
}
- sizeSerialized = 0x80 | size;
- Ebml_Serialize(glob, &sizeSerialized, sizeof(sizeSerialized), 1);
- Ebml_Serialize(glob, &ui, sizeof(ui), size);
+ minVal <<= 7;
+ }
+
+ sizeSerialized = 0x80 | size;
+ Ebml_Serialize(glob, &sizeSerialized, sizeof(sizeSerialized), 1);
+ Ebml_Serialize(glob, &ui, sizeof(ui), size);
}
/* TODO: perhaps this is a poor name for this id serializer helper function */
-void Ebml_SerializeBinary(EbmlGlobal *glob, unsigned long class_id, unsigned long bin)
-{
- int size;
- for (size=4; size > 1; size--)
- {
- if (bin & 0x000000ff << ((size-1) * 8))
- break;
- }
- Ebml_WriteID(glob, class_id);
- Ebml_WriteLen(glob, size);
- Ebml_WriteID(glob, bin);
+void Ebml_SerializeBinary(EbmlGlobal *glob, unsigned long class_id, unsigned long bin) {
+ int size;
+ for (size = 4; size > 1; size--) {
+ if (bin & 0x000000ff << ((size - 1) * 8))
+ break;
+ }
+ Ebml_WriteID(glob, class_id);
+ Ebml_WriteLen(glob, size);
+ Ebml_WriteID(glob, bin);
}
-void Ebml_SerializeFloat(EbmlGlobal *glob, unsigned long class_id, double d)
-{
- unsigned char len = 0x88;
+void Ebml_SerializeFloat(EbmlGlobal *glob, unsigned long class_id, double d) {
+ unsigned char len = 0x88;
- Ebml_WriteID(glob, class_id);
- Ebml_Serialize(glob, &len, sizeof(len), 1);
- Ebml_Serialize(glob, &d, sizeof(d), 8);
+ Ebml_WriteID(glob, class_id);
+ Ebml_Serialize(glob, &len, sizeof(len), 1);
+ Ebml_Serialize(glob, &d, sizeof(d), 8);
}
-void Ebml_WriteSigned16(EbmlGlobal *glob, short val)
-{
- signed long out = ((val & 0x003FFFFF) | 0x00200000) << 8;
- Ebml_Serialize(glob, &out, sizeof(out), 3);
+void Ebml_WriteSigned16(EbmlGlobal *glob, short val) {
+ signed long out = ((val & 0x003FFFFF) | 0x00200000) << 8;
+ Ebml_Serialize(glob, &out, sizeof(out), 3);
}
-void Ebml_SerializeString(EbmlGlobal *glob, unsigned long class_id, const char *s)
-{
- Ebml_WriteID(glob, class_id);
- Ebml_WriteString(glob, s);
+void Ebml_SerializeString(EbmlGlobal *glob, unsigned long class_id, const char *s) {
+ Ebml_WriteID(glob, class_id);
+ Ebml_WriteString(glob, s);
}
-void Ebml_SerializeUTF8(EbmlGlobal *glob, unsigned long class_id, wchar_t *s)
-{
- Ebml_WriteID(glob, class_id);
- Ebml_WriteUTF8(glob, s);
+void Ebml_SerializeUTF8(EbmlGlobal *glob, unsigned long class_id, wchar_t *s) {
+ Ebml_WriteID(glob, class_id);
+ Ebml_WriteUTF8(glob, s);
}
-void Ebml_SerializeData(EbmlGlobal *glob, unsigned long class_id, unsigned char *data, unsigned long data_length)
-{
- Ebml_WriteID(glob, class_id);
- Ebml_WriteLen(glob, data_length);
- Ebml_Write(glob, data, data_length);
+void Ebml_SerializeData(EbmlGlobal *glob, unsigned long class_id, unsigned char *data, unsigned long data_length) {
+ Ebml_WriteID(glob, class_id);
+ Ebml_WriteLen(glob, data_length);
+ Ebml_Write(glob, data, data_length);
}
-void Ebml_WriteVoid(EbmlGlobal *glob, unsigned long vSize)
-{
- unsigned char tmp = 0;
- unsigned long i = 0;
+void Ebml_WriteVoid(EbmlGlobal *glob, unsigned long vSize) {
+ unsigned char tmp = 0;
+ unsigned long i = 0;
- Ebml_WriteID(glob, 0xEC);
- Ebml_WriteLen(glob, vSize);
+ Ebml_WriteID(glob, 0xEC);
+ Ebml_WriteLen(glob, vSize);
- for (i = 0; i < vSize; i++)
- {
- Ebml_Write(glob, &tmp, 1);
- }
+ for (i = 0; i < vSize; i++) {
+ Ebml_Write(glob, &tmp, 1);
+ }
}
/* TODO Serialize Date */
diff --git a/libvpx/libmkv/WebMElement.c b/libvpx/libmkv/WebMElement.c
index 0ef5100..2f79a3c 100644
--- a/libvpx/libmkv/WebMElement.c
+++ b/libvpx/libmkv/WebMElement.c
@@ -14,106 +14,100 @@
#define kVorbisPrivateMaxSize 4000
-void writeHeader(EbmlGlobal *glob)
-{
- EbmlLoc start;
- Ebml_StartSubElement(glob, &start, EBML);
- Ebml_SerializeUnsigned(glob, EBMLVersion, 1);
- Ebml_SerializeUnsigned(glob, EBMLReadVersion, 1); //EBML Read Version
- Ebml_SerializeUnsigned(glob, EBMLMaxIDLength, 4); //EBML Max ID Length
- Ebml_SerializeUnsigned(glob, EBMLMaxSizeLength, 8); //EBML Max Size Length
- Ebml_SerializeString(glob, DocType, "webm"); //Doc Type
- Ebml_SerializeUnsigned(glob, DocTypeVersion, 2); //Doc Type Version
- Ebml_SerializeUnsigned(glob, DocTypeReadVersion, 2); //Doc Type Read Version
- Ebml_EndSubElement(glob, &start);
+void writeHeader(EbmlGlobal *glob) {
+ EbmlLoc start;
+ Ebml_StartSubElement(glob, &start, EBML);
+ Ebml_SerializeUnsigned(glob, EBMLVersion, 1);
+ Ebml_SerializeUnsigned(glob, EBMLReadVersion, 1); // EBML Read Version
+ Ebml_SerializeUnsigned(glob, EBMLMaxIDLength, 4); // EBML Max ID Length
+ Ebml_SerializeUnsigned(glob, EBMLMaxSizeLength, 8); // EBML Max Size Length
+ Ebml_SerializeString(glob, DocType, "webm"); // Doc Type
+ Ebml_SerializeUnsigned(glob, DocTypeVersion, 2); // Doc Type Version
+ Ebml_SerializeUnsigned(glob, DocTypeReadVersion, 2); // Doc Type Read Version
+ Ebml_EndSubElement(glob, &start);
}
void writeSimpleBlock(EbmlGlobal *glob, unsigned char trackNumber, short timeCode,
int isKeyframe, unsigned char lacingFlag, int discardable,
- unsigned char *data, unsigned long dataLength)
-{
- Ebml_WriteID(glob, SimpleBlock);
- unsigned long blockLength = 4 + dataLength;
- blockLength |= 0x10000000; //TODO check length < 0x0FFFFFFFF
- Ebml_Serialize(glob, &blockLength, sizeof(blockLength), 4);
- trackNumber |= 0x80; //TODO check track nubmer < 128
- Ebml_Write(glob, &trackNumber, 1);
- //Ebml_WriteSigned16(glob, timeCode,2); //this is 3 bytes
- Ebml_Serialize(glob, &timeCode, sizeof(timeCode), 2);
- unsigned char flags = 0x00 | (isKeyframe ? 0x80 : 0x00) | (lacingFlag << 1) | discardable;
- Ebml_Write(glob, &flags, 1);
- Ebml_Write(glob, data, dataLength);
-}
-
-static UInt64 generateTrackID(unsigned int trackNumber)
-{
- UInt64 t = time(NULL) * trackNumber;
- UInt64 r = rand();
- r = r << 32;
- r += rand();
- UInt64 rval = t ^ r;
- return rval;
+ unsigned char *data, unsigned long dataLength) {
+ Ebml_WriteID(glob, SimpleBlock);
+ unsigned long blockLength = 4 + dataLength;
+ blockLength |= 0x10000000; // TODO check length < 0x0FFFFFFFF
+ Ebml_Serialize(glob, &blockLength, sizeof(blockLength), 4);
+ trackNumber |= 0x80; // TODO check track nubmer < 128
+ Ebml_Write(glob, &trackNumber, 1);
+ // Ebml_WriteSigned16(glob, timeCode,2); //this is 3 bytes
+ Ebml_Serialize(glob, &timeCode, sizeof(timeCode), 2);
+ unsigned char flags = 0x00 | (isKeyframe ? 0x80 : 0x00) | (lacingFlag << 1) | discardable;
+ Ebml_Write(glob, &flags, 1);
+ Ebml_Write(glob, data, dataLength);
+}
+
+static UInt64 generateTrackID(unsigned int trackNumber) {
+ UInt64 t = time(NULL) * trackNumber;
+ UInt64 r = rand();
+ r = r << 32;
+ r += rand();
+ UInt64 rval = t ^ r;
+ return rval;
}
void writeVideoTrack(EbmlGlobal *glob, unsigned int trackNumber, int flagLacing,
char *codecId, unsigned int pixelWidth, unsigned int pixelHeight,
- double frameRate)
-{
- EbmlLoc start;
- Ebml_StartSubElement(glob, &start, TrackEntry);
- Ebml_SerializeUnsigned(glob, TrackNumber, trackNumber);
- UInt64 trackID = generateTrackID(trackNumber);
- Ebml_SerializeUnsigned(glob, TrackUID, trackID);
- Ebml_SerializeString(glob, CodecName, "VP8"); //TODO shouldn't be fixed
-
- Ebml_SerializeUnsigned(glob, TrackType, 1); //video is always 1
- Ebml_SerializeString(glob, CodecID, codecId);
- {
- EbmlLoc videoStart;
- Ebml_StartSubElement(glob, &videoStart, Video);
- Ebml_SerializeUnsigned(glob, PixelWidth, pixelWidth);
- Ebml_SerializeUnsigned(glob, PixelHeight, pixelHeight);
- Ebml_SerializeFloat(glob, FrameRate, frameRate);
- Ebml_EndSubElement(glob, &videoStart); //Video
- }
- Ebml_EndSubElement(glob, &start); //Track Entry
+ double frameRate) {
+ EbmlLoc start;
+ Ebml_StartSubElement(glob, &start, TrackEntry);
+ Ebml_SerializeUnsigned(glob, TrackNumber, trackNumber);
+ UInt64 trackID = generateTrackID(trackNumber);
+ Ebml_SerializeUnsigned(glob, TrackUID, trackID);
+ Ebml_SerializeString(glob, CodecName, "VP8"); // TODO shouldn't be fixed
+
+ Ebml_SerializeUnsigned(glob, TrackType, 1); // video is always 1
+ Ebml_SerializeString(glob, CodecID, codecId);
+ {
+ EbmlLoc videoStart;
+ Ebml_StartSubElement(glob, &videoStart, Video);
+ Ebml_SerializeUnsigned(glob, PixelWidth, pixelWidth);
+ Ebml_SerializeUnsigned(glob, PixelHeight, pixelHeight);
+ Ebml_SerializeFloat(glob, FrameRate, frameRate);
+ Ebml_EndSubElement(glob, &videoStart); // Video
+ }
+ Ebml_EndSubElement(glob, &start); // Track Entry
}
void writeAudioTrack(EbmlGlobal *glob, unsigned int trackNumber, int flagLacing,
char *codecId, double samplingFrequency, unsigned int channels,
- unsigned char *private, unsigned long privateSize)
-{
- EbmlLoc start;
- Ebml_StartSubElement(glob, &start, TrackEntry);
- Ebml_SerializeUnsigned(glob, TrackNumber, trackNumber);
- UInt64 trackID = generateTrackID(trackNumber);
- Ebml_SerializeUnsigned(glob, TrackUID, trackID);
- Ebml_SerializeUnsigned(glob, TrackType, 2); //audio is always 2
- //I am using defaults for thesed required fields
- /* Ebml_SerializeUnsigned(glob, FlagEnabled, 1);
- Ebml_SerializeUnsigned(glob, FlagDefault, 1);
- Ebml_SerializeUnsigned(glob, FlagForced, 1);
- Ebml_SerializeUnsigned(glob, FlagLacing, flagLacing);*/
- Ebml_SerializeString(glob, CodecID, codecId);
- Ebml_SerializeData(glob, CodecPrivate, private, privateSize);
-
- Ebml_SerializeString(glob, CodecName, "VORBIS"); //fixed for now
- {
- EbmlLoc AudioStart;
- Ebml_StartSubElement(glob, &AudioStart, Audio);
- Ebml_SerializeFloat(glob, SamplingFrequency, samplingFrequency);
- Ebml_SerializeUnsigned(glob, Channels, channels);
- Ebml_EndSubElement(glob, &AudioStart);
- }
- Ebml_EndSubElement(glob, &start);
-}
-void writeSegmentInformation(EbmlGlobal *ebml, EbmlLoc* startInfo, unsigned long timeCodeScale, double duration)
-{
- Ebml_StartSubElement(ebml, startInfo, Info);
- Ebml_SerializeUnsigned(ebml, TimecodeScale, timeCodeScale);
- Ebml_SerializeFloat(ebml, Segment_Duration, duration * 1000.0); //Currently fixed to using milliseconds
- Ebml_SerializeString(ebml, 0x4D80, "QTmuxingAppLibWebM-0.0.1");
- Ebml_SerializeString(ebml, 0x5741, "QTwritingAppLibWebM-0.0.1");
- Ebml_EndSubElement(ebml, startInfo);
+ unsigned char *private, unsigned long privateSize) {
+ EbmlLoc start;
+ Ebml_StartSubElement(glob, &start, TrackEntry);
+ Ebml_SerializeUnsigned(glob, TrackNumber, trackNumber);
+ UInt64 trackID = generateTrackID(trackNumber);
+ Ebml_SerializeUnsigned(glob, TrackUID, trackID);
+ Ebml_SerializeUnsigned(glob, TrackType, 2); // audio is always 2
+ // I am using defaults for thesed required fields
+ /* Ebml_SerializeUnsigned(glob, FlagEnabled, 1);
+ Ebml_SerializeUnsigned(glob, FlagDefault, 1);
+ Ebml_SerializeUnsigned(glob, FlagForced, 1);
+ Ebml_SerializeUnsigned(glob, FlagLacing, flagLacing);*/
+ Ebml_SerializeString(glob, CodecID, codecId);
+ Ebml_SerializeData(glob, CodecPrivate, private, privateSize);
+
+ Ebml_SerializeString(glob, CodecName, "VORBIS"); // fixed for now
+ {
+ EbmlLoc AudioStart;
+ Ebml_StartSubElement(glob, &AudioStart, Audio);
+ Ebml_SerializeFloat(glob, SamplingFrequency, samplingFrequency);
+ Ebml_SerializeUnsigned(glob, Channels, channels);
+ Ebml_EndSubElement(glob, &AudioStart);
+ }
+ Ebml_EndSubElement(glob, &start);
+}
+void writeSegmentInformation(EbmlGlobal *ebml, EbmlLoc *startInfo, unsigned long timeCodeScale, double duration) {
+ Ebml_StartSubElement(ebml, startInfo, Info);
+ Ebml_SerializeUnsigned(ebml, TimecodeScale, timeCodeScale);
+ Ebml_SerializeFloat(ebml, Segment_Duration, duration * 1000.0); // Currently fixed to using milliseconds
+ Ebml_SerializeString(ebml, 0x4D80, "QTmuxingAppLibWebM-0.0.1");
+ Ebml_SerializeString(ebml, 0x5741, "QTwritingAppLibWebM-0.0.1");
+ Ebml_EndSubElement(ebml, startInfo);
}
/*
@@ -142,7 +136,7 @@ void Mkv_WriteSegmentInformation(Ebml& ebml_out, SegmentInformationStruct& segme
Ebml_SerializeString(ebml_out, 0x7384, segmentInformation.filename);
Ebml_SerializeUnsigned(ebml_out, 0x2AD7B1, segmentInformation.TimecodeScale);
Ebml_SerializeUnsigned(ebml_out, 0x4489, segmentInformation.Duration);
- //TODO date
+ // TODO date
Ebml_SerializeWString(ebml_out, 0x4D80, L"MKVMUX");
Ebml_SerializeWString(ebml_out, 0x5741, segmentInformation.WritingApp);
}
@@ -173,9 +167,9 @@ static void Mkv_WriteGenericTrackData(Ebml& ebml_out, TrackStruct& track)
void Mkv_WriteVideoTrack(Ebml& ebml_out, TrackStruct & track, VideoTrackStruct& video)
{
EbmlLoc trackHeadLoc, videoHeadLoc;
- Ebml_StartSubElement(ebml_out, trackHeadLoc, 0xAE); //start Track
+ Ebml_StartSubElement(ebml_out, trackHeadLoc, 0xAE); // start Track
Mkv_WriteGenericTrackData(ebml_out, track);
- Ebml_StartSubElement(ebml_out, videoHeadLoc, 0xE0); //start Video
+ Ebml_StartSubElement(ebml_out, videoHeadLoc, 0xE0); // start Video
Ebml_SerializeUnsigned(ebml_out, 0x9A, video.FlagInterlaced ? 1 :0);
Ebml_SerializeUnsigned(ebml_out, 0xB0, video.PixelWidth);
Ebml_SerializeUnsigned(ebml_out, 0xBA, video.PixelHeight);
@@ -193,7 +187,7 @@ void Mkv_WriteAudioTrack(Ebml& ebml_out, TrackStruct & track, AudioTrackStruct&
EbmlLoc trackHeadLoc, audioHeadLoc;
Ebml_StartSubElement(ebml_out, trackHeadLoc, 0xAE);
Mkv_WriteGenericTrackData(ebml_out, track);
- Ebml_StartSubElement(ebml_out, audioHeadLoc, 0xE0); //start Audio
+ Ebml_StartSubElement(ebml_out, audioHeadLoc, 0xE0); // start Audio
Ebml_SerializeFloat(ebml_out, 0xB5, video.SamplingFrequency);
Ebml_SerializeUnsigned(ebml_out, 0x9F, video.Channels);
Ebml_SerializeUnsigned(ebml_out, 0x6264, video.BitDepth);
@@ -213,7 +207,7 @@ void Mkv_WriteSimpleBlockHead(Ebml& ebml_out, EbmlLoc& ebmlLoc, SimpleBlockStru
Ebml_Write1UInt(ebml_out, block.TrackNumber);
Ebml_WriteSigned16(ebml_out,block.TimeCode);
unsigned char flags = 0x00 | (block.iskey ? 0x80:0x00) | (block.lacing << 1) | block.discardable;
- Ebml_Write1UInt(ebml_out, flags); //TODO this may be the wrong function
+ Ebml_Write1UInt(ebml_out, flags); // TODO this may be the wrong function
Ebml_Serialize(ebml_out, block.data, block.dataLength);
Ebml_EndSubElement(ebml_out,ebmlLoc);
}
diff --git a/libvpx/libmkv/WebMElement.h b/libvpx/libmkv/WebMElement.h
index b4208f2..d9ad0a0 100644
--- a/libvpx/libmkv/WebMElement.h
+++ b/libvpx/libmkv/WebMElement.h
@@ -17,8 +17,8 @@ void writeSimpleBock(EbmlGlobal *ebml, unsigned char trackNumber, unsigned short
// these are helper functions
void writeHeader(EbmlGlobal *ebml);
-void writeSegmentInformation(EbmlGlobal *ebml, EbmlLoc* startInfo , unsigned long timeCodeScale, double duration);
-//this function is a helper only, it assumes a lot of defaults
+void writeSegmentInformation(EbmlGlobal *ebml, EbmlLoc *startInfo, unsigned long timeCodeScale, double duration);
+// this function is a helper only, it assumes a lot of defaults
void writeVideoTrack(EbmlGlobal *ebml, unsigned int trackNumber, int flagLacing,
char *codecId, unsigned int pixelWidth, unsigned int pixelHeight,
double frameRate);
diff --git a/libvpx/libmkv/testlibmkv.c b/libvpx/libmkv/testlibmkv.c
index 7edfc43..97bcf95 100644
--- a/libvpx/libmkv/testlibmkv.c
+++ b/libvpx/libmkv/testlibmkv.c
@@ -13,51 +13,50 @@
#include "WebMElement.h"
#include <stdio.h>
-int main(int argc, char *argv[])
-{
- //init the datatype we're using for ebml output
- unsigned char data[8192];
- EbmlGlobal ebml;
- ebml.buf = data;
- ebml.offset = 0;
- ebml.length = 8192;
-
- writeHeader(&ebml);
+int main(int argc, char *argv[]) {
+ // init the datatype we're using for ebml output
+ unsigned char data[8192];
+ EbmlGlobal ebml;
+ ebml.buf = data;
+ ebml.offset = 0;
+ ebml.length = 8192;
+
+ writeHeader(&ebml);
+ {
+ EbmlLoc startSegment;
+ Ebml_StartSubElement(&ebml, &startSegment, Segment); // segment
{
- EbmlLoc startSegment;
- Ebml_StartSubElement(&ebml, &startSegment, Segment); //segment
- {
- //segment info
- EbmlLoc startInfo;
- Ebml_StartSubElement(&ebml, &startInfo, Info);
- Ebml_SerializeString(&ebml, 0x4D80, "muxingAppLibMkv");
- Ebml_SerializeString(&ebml, 0x5741, "writingAppLibMkv");
- Ebml_EndSubElement(&ebml, &startInfo);
- }
-
- {
- EbmlLoc trackStart;
- Ebml_StartSubElement(&ebml, &trackStart, Tracks);
- writeVideoTrack(&ebml, 1, 1, "V_MS/VFW/FOURCC", 320, 240, 29.97);
- //writeAudioTrack(&ebml,2,1, "A_VORBIS", 32000, 1, NULL, 0);
- Ebml_EndSubElement(&ebml, &trackStart);
- }
-
- {
- EbmlLoc clusterStart;
- Ebml_StartSubElement(&ebml, &clusterStart, Cluster); //cluster
- Ebml_SerializeUnsigned(&ebml, Timecode, 0);
-
- unsigned char someData[4] = {1, 2, 3, 4};
- writeSimpleBlock(&ebml, 1, 0, 1, 0, 0, someData, 4);
- Ebml_EndSubElement(&ebml, &clusterStart);
- } //end cluster
- Ebml_EndSubElement(&ebml, &startSegment);
+ // segment info
+ EbmlLoc startInfo;
+ Ebml_StartSubElement(&ebml, &startInfo, Info);
+ Ebml_SerializeString(&ebml, 0x4D80, "muxingAppLibMkv");
+ Ebml_SerializeString(&ebml, 0x5741, "writingAppLibMkv");
+ Ebml_EndSubElement(&ebml, &startInfo);
}
- //dump ebml stuff to the file
- FILE *file_out = fopen("test.mkv", "wb");
- size_t bytesWritten = fwrite(data, 1, ebml.offset, file_out);
- fclose(file_out);
- return 0;
+ {
+ EbmlLoc trackStart;
+ Ebml_StartSubElement(&ebml, &trackStart, Tracks);
+ writeVideoTrack(&ebml, 1, 1, "V_MS/VFW/FOURCC", 320, 240, 29.97);
+ // writeAudioTrack(&ebml,2,1, "A_VORBIS", 32000, 1, NULL, 0);
+ Ebml_EndSubElement(&ebml, &trackStart);
+ }
+
+ {
+ EbmlLoc clusterStart;
+ Ebml_StartSubElement(&ebml, &clusterStart, Cluster); // cluster
+ Ebml_SerializeUnsigned(&ebml, Timecode, 0);
+
+ unsigned char someData[4] = {1, 2, 3, 4};
+ writeSimpleBlock(&ebml, 1, 0, 1, 0, 0, someData, 4);
+ Ebml_EndSubElement(&ebml, &clusterStart);
+ } // end cluster
+ Ebml_EndSubElement(&ebml, &startSegment);
+ }
+
+ // dump ebml stuff to the file
+ FILE *file_out = fopen("test.mkv", "wb");
+ size_t bytesWritten = fwrite(data, 1, ebml.offset, file_out);
+ fclose(file_out);
+ return 0;
} \ No newline at end of file
diff --git a/libvpx/libs.mk b/libvpx/libs.mk
index 4115dd8..f7ed95b 100644
--- a/libvpx/libs.mk
+++ b/libvpx/libs.mk
@@ -12,11 +12,51 @@
# ARM assembly files are written in RVCT-style. We use some make magic to
# filter those files to allow GCC compilation
ifeq ($(ARCH_ARM),yes)
- ASM:=$(if $(filter yes,$(CONFIG_GCC)),.asm.s,.asm)
+ ASM:=$(if $(filter yes,$(CONFIG_GCC)$(CONFIG_MSVS)),.asm.s,.asm)
else
ASM:=.asm
endif
+#
+# Calculate platform- and compiler-specific offsets for hand coded assembly
+#
+ifeq ($(filter icc gcc,$(TGT_CC)), $(TGT_CC))
+OFFSET_PATTERN:='^[a-zA-Z0-9_]* EQU'
+define asm_offsets_template
+$$(BUILD_PFX)$(1): $$(BUILD_PFX)$(2).S
+ @echo " [CREATE] $$@"
+ $$(qexec)LC_ALL=C grep $$(OFFSET_PATTERN) $$< | tr -d '$$$$\#' $$(ADS2GAS) > $$@
+$$(BUILD_PFX)$(2).S: $(2)
+CLEAN-OBJS += $$(BUILD_PFX)$(1) $(2).S
+endef
+else
+ ifeq ($(filter rvct,$(TGT_CC)), $(TGT_CC))
+define asm_offsets_template
+$$(BUILD_PFX)$(1): obj_int_extract
+$$(BUILD_PFX)$(1): $$(BUILD_PFX)$(2).o
+ @echo " [CREATE] $$@"
+ $$(qexec)./obj_int_extract rvds $$< $$(ADS2GAS) > $$@
+OBJS-yes += $$(BUILD_PFX)$(2).o
+CLEAN-OBJS += $$(BUILD_PFX)$(1)
+$$(filter %$$(ASM).o,$$(OBJS-yes)): $$(BUILD_PFX)$(1)
+endef
+endif # rvct
+endif # !gcc
+
+#
+# Rule to generate runtime cpu detection files
+#
+define rtcd_h_template
+$$(BUILD_PFX)$(1).h: $$(SRC_PATH_BARE)/$(2)
+ @echo " [CREATE] $$@"
+ $$(qexec)$$(SRC_PATH_BARE)/build/make/rtcd.sh --arch=$$(TGT_ISA) \
+ --sym=$(1) \
+ --config=$$(CONFIG_DIR)$$(target)$$(if $$(FAT_ARCHS),,-$$(TOOLCHAIN)).mk \
+ $$(RTCD_OPTIONS) $$^ > $$@
+CLEAN-OBJS += $$(BUILD_PFX)$(1).h
+RTCD += $$(BUILD_PFX)$(1).h
+endef
+
CODEC_SRCS-yes += CHANGELOG
CODEC_SRCS-yes += libs.mk
@@ -40,9 +80,12 @@ CODEC_SRCS-yes += $(addprefix vpx_scale/,$(call enabled,SCALE_SRCS))
include $(SRC_PATH_BARE)/vpx_ports/vpx_ports.mk
CODEC_SRCS-yes += $(addprefix vpx_ports/,$(call enabled,PORTS_SRCS))
+ifneq ($(CONFIG_VP8_ENCODER)$(CONFIG_VP8_DECODER),)
+ VP8_PREFIX=vp8/
+ include $(SRC_PATH_BARE)/$(VP8_PREFIX)vp8_common.mk
+endif
ifeq ($(CONFIG_VP8_ENCODER),yes)
- VP8_PREFIX=vp8/
include $(SRC_PATH_BARE)/$(VP8_PREFIX)vp8cx.mk
CODEC_SRCS-yes += $(addprefix $(VP8_PREFIX),$(call enabled,VP8_CX_SRCS))
CODEC_EXPORTS-yes += $(addprefix $(VP8_PREFIX),$(VP8_CX_EXPORTS))
@@ -52,7 +95,6 @@ ifeq ($(CONFIG_VP8_ENCODER),yes)
endif
ifeq ($(CONFIG_VP8_DECODER),yes)
- VP8_PREFIX=vp8/
include $(SRC_PATH_BARE)/$(VP8_PREFIX)vp8dx.mk
CODEC_SRCS-yes += $(addprefix $(VP8_PREFIX),$(call enabled,VP8_DX_SRCS))
CODEC_EXPORTS-yes += $(addprefix $(VP8_PREFIX),$(VP8_DX_EXPORTS))
@@ -61,6 +103,35 @@ ifeq ($(CONFIG_VP8_DECODER),yes)
CODEC_DOC_SECTIONS += vp8 vp8_decoder
endif
+ifneq ($(CONFIG_VP9_ENCODER)$(CONFIG_VP9_DECODER),)
+ VP9_PREFIX=vp9/
+ include $(SRC_PATH_BARE)/$(VP9_PREFIX)vp9_common.mk
+endif
+
+ifeq ($(CONFIG_VP9_ENCODER),yes)
+ VP9_PREFIX=vp9/
+ include $(SRC_PATH_BARE)/$(VP9_PREFIX)vp9cx.mk
+ CODEC_SRCS-yes += $(addprefix $(VP9_PREFIX),$(call enabled,VP9_CX_SRCS))
+ CODEC_EXPORTS-yes += $(addprefix $(VP9_PREFIX),$(VP9_CX_EXPORTS))
+ CODEC_SRCS-yes += $(VP9_PREFIX)vp9cx.mk vpx/vp8.h vpx/vp8cx.h
+ INSTALL-LIBS-yes += include/vpx/vp8.h include/vpx/vp8cx.h
+ INSTALL_MAPS += include/vpx/% $(SRC_PATH_BARE)/$(VP9_PREFIX)/%
+ CODEC_DOC_SRCS += vpx/vp8.h vpx/vp8cx.h
+ CODEC_DOC_SECTIONS += vp9 vp9_encoder
+endif
+
+ifeq ($(CONFIG_VP9_DECODER),yes)
+ VP9_PREFIX=vp9/
+ include $(SRC_PATH_BARE)/$(VP9_PREFIX)vp9dx.mk
+ CODEC_SRCS-yes += $(addprefix $(VP9_PREFIX),$(call enabled,VP9_DX_SRCS))
+ CODEC_EXPORTS-yes += $(addprefix $(VP9_PREFIX),$(VP9_DX_EXPORTS))
+ CODEC_SRCS-yes += $(VP9_PREFIX)vp9dx.mk vpx/vp8.h vpx/vp8dx.h
+ INSTALL-LIBS-yes += include/vpx/vp8.h include/vpx/vp8dx.h
+ INSTALL_MAPS += include/vpx/% $(SRC_PATH_BARE)/$(VP9_PREFIX)/%
+ CODEC_DOC_SRCS += vpx/vp8.h vpx/vp8dx.h
+ CODEC_DOC_SECTIONS += vp9 vp9_decoder
+endif
+
ifeq ($(CONFIG_ENCODERS),yes)
CODEC_DOC_SECTIONS += encoder
@@ -91,8 +162,13 @@ endif
CODEC_SRCS-$(BUILD_LIBVPX) += build/make/version.sh
CODEC_SRCS-$(BUILD_LIBVPX) += build/make/rtcd.sh
+CODEC_SRCS-$(BUILD_LIBVPX) += vpx_ports/emmintrin_compat.h
+CODEC_SRCS-$(BUILD_LIBVPX) += vpx_ports/vpx_once.h
CODEC_SRCS-$(BUILD_LIBVPX) += $(BUILD_PFX)vpx_config.c
INSTALL-SRCS-no += $(BUILD_PFX)vpx_config.c
+ifeq ($(ARCH_X86)$(ARCH_X86_64),yes)
+CODEC_SRCS-$(BUILD_LIBVPX) += third_party/x86inc/x86inc.asm
+endif
CODEC_EXPORTS-$(BUILD_LIBVPX) += vpx/exports_com
CODEC_EXPORTS-$(CONFIG_ENCODERS) += vpx/exports_enc
CODEC_EXPORTS-$(CONFIG_DECODERS) += vpx/exports_dec
@@ -116,7 +192,7 @@ INSTALL-LIBS-$(CONFIG_STATIC) += $(LIBSUBDIR)/libvpx.a
INSTALL-LIBS-$(CONFIG_DEBUG_LIBS) += $(LIBSUBDIR)/libvpx_g.a
endif
-CODEC_SRCS=$(filter-out %_test.cc,$(call enabled,CODEC_SRCS))
+CODEC_SRCS=$(call enabled,CODEC_SRCS)
INSTALL-SRCS-$(CONFIG_CODEC_SRCS) += $(CODEC_SRCS)
INSTALL-SRCS-$(CONFIG_CODEC_SRCS) += $(call enabled,CODEC_EXPORTS)
@@ -131,10 +207,10 @@ libvpx_srcs.txt:
ifeq ($(CONFIG_EXTERNAL_BUILD),yes)
ifeq ($(CONFIG_MSVS),yes)
-obj_int_extract.vcproj: $(SRC_PATH_BARE)/build/make/obj_int_extract.c
- @cp $(SRC_PATH_BARE)/build/x86-msvs/obj_int_extract.bat .
+obj_int_extract.$(VCPROJ_SFX): $(SRC_PATH_BARE)/build/make/obj_int_extract.c
+ @cp $(SRC_PATH_BARE)/build/$(MSVS_ARCH_DIR)/obj_int_extract.bat .
@echo " [CREATE] $@"
- $(qexec)$(SRC_PATH_BARE)/build/make/gen_msvs_proj.sh \
+ $(qexec)$(GEN_VCPROJ) \
--exe \
--target=$(TOOLCHAIN) \
--name=obj_int_extract \
@@ -145,8 +221,7 @@ obj_int_extract.vcproj: $(SRC_PATH_BARE)/build/make/obj_int_extract.c
-I. \
-I"$(SRC_PATH_BARE)" \
-PROJECTS-$(BUILD_LIBVPX) += obj_int_extract.vcproj
-PROJECTS-$(BUILD_LIBVPX) += obj_int_extract.bat
+PROJECTS-$(BUILD_LIBVPX) += obj_int_extract.$(VCPROJ_SFX)
vpx.def: $(call enabled,CODEC_EXPORTS)
@echo " [CREATE] $@"
@@ -155,11 +230,11 @@ vpx.def: $(call enabled,CODEC_EXPORTS)
--out=$@ $^
CLEAN-OBJS += vpx.def
-vpx.vcproj: $(CODEC_SRCS) vpx.def
+vpx.$(VCPROJ_SFX): $(CODEC_SRCS) vpx.def obj_int_extract.$(VCPROJ_SFX)
@echo " [CREATE] $@"
- $(qexec)$(SRC_PATH_BARE)/build/make/gen_msvs_proj.sh \
- --lib \
- --target=$(TOOLCHAIN) \
+ $(qexec)$(GEN_VCPROJ) \
+ $(if $(CONFIG_SHARED),--dll,--lib) \
+ --target=$(TOOLCHAIN) \
$(if $(CONFIG_STATIC_MSVCRT),--static-crt) \
--name=vpx \
--proj-guid=DCE19DAF-69AC-46DB-B14A-39F0FAA5DB74 \
@@ -168,10 +243,10 @@ vpx.vcproj: $(CODEC_SRCS) vpx.def
--out=$@ $(CFLAGS) $^ \
--src-path-bare="$(SRC_PATH_BARE)" \
-PROJECTS-$(BUILD_LIBVPX) += vpx.vcproj
+PROJECTS-$(BUILD_LIBVPX) += vpx.$(VCPROJ_SFX)
-vpx.vcproj: vpx_config.asm
-vpx.vcproj: vpx_rtcd.h
+vpx.$(VCPROJ_SFX): vpx_config.asm
+vpx.$(VCPROJ_SFX): $(RTCD)
endif
else
@@ -180,17 +255,29 @@ OBJS-$(BUILD_LIBVPX) += $(LIBVPX_OBJS)
LIBS-$(if $(BUILD_LIBVPX),$(CONFIG_STATIC)) += $(BUILD_PFX)libvpx.a $(BUILD_PFX)libvpx_g.a
$(BUILD_PFX)libvpx_g.a: $(LIBVPX_OBJS)
+
BUILD_LIBVPX_SO := $(if $(BUILD_LIBVPX),$(CONFIG_SHARED))
+
+ifeq ($(filter darwin%,$(TGT_OS)),$(TGT_OS))
+LIBVPX_SO := libvpx.$(VERSION_MAJOR).dylib
+EXPORT_FILE := libvpx.syms
+LIBVPX_SO_SYMLINKS := $(addprefix $(LIBSUBDIR)/, \
+ libvpx.dylib )
+else
LIBVPX_SO := libvpx.so.$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_PATCH)
+EXPORT_FILE := libvpx.ver
+SYM_LINK := libvpx.so
+LIBVPX_SO_SYMLINKS := $(addprefix $(LIBSUBDIR)/, \
+ libvpx.so libvpx.so.$(VERSION_MAJOR) \
+ libvpx.so.$(VERSION_MAJOR).$(VERSION_MINOR))
+endif
+
LIBS-$(BUILD_LIBVPX_SO) += $(BUILD_PFX)$(LIBVPX_SO)\
$(notdir $(LIBVPX_SO_SYMLINKS))
-$(BUILD_PFX)$(LIBVPX_SO): $(LIBVPX_OBJS) libvpx.ver
+$(BUILD_PFX)$(LIBVPX_SO): $(LIBVPX_OBJS) $(EXPORT_FILE)
$(BUILD_PFX)$(LIBVPX_SO): extralibs += -lm
$(BUILD_PFX)$(LIBVPX_SO): SONAME = libvpx.so.$(VERSION_MAJOR)
-$(BUILD_PFX)$(LIBVPX_SO): SO_VERSION_SCRIPT = libvpx.ver
-LIBVPX_SO_SYMLINKS := $(addprefix $(LIBSUBDIR)/, \
- libvpx.so libvpx.so.$(VERSION_MAJOR) \
- libvpx.so.$(VERSION_MAJOR).$(VERSION_MINOR))
+$(BUILD_PFX)$(LIBVPX_SO): EXPORTS_FILE = $(EXPORT_FILE)
libvpx.ver: $(call enabled,CODEC_EXPORTS)
@echo " [CREATE] $@"
@@ -199,10 +286,16 @@ libvpx.ver: $(call enabled,CODEC_EXPORTS)
$(qexec)echo "local: *; };" >> $@
CLEAN-OBJS += libvpx.ver
+libvpx.syms: $(call enabled,CODEC_EXPORTS)
+ @echo " [CREATE] $@"
+ $(qexec)awk '{print "_"$$2}' $^ >$@
+CLEAN-OBJS += libvpx.syms
+
define libvpx_symlink_template
$(1): $(2)
- @echo " [LN] $$@"
- $(qexec)ln -sf $(LIBVPX_SO) $$@
+ @echo " [LN] $(2) $$@"
+ $(qexec)mkdir -p $$(dir $$@)
+ $(qexec)ln -sf $(2) $$@
endef
$(eval $(call libvpx_symlink_template,\
@@ -210,10 +303,12 @@ $(eval $(call libvpx_symlink_template,\
$(BUILD_PFX)$(LIBVPX_SO)))
$(eval $(call libvpx_symlink_template,\
$(addprefix $(DIST_DIR)/,$(LIBVPX_SO_SYMLINKS)),\
- $(DIST_DIR)/$(LIBSUBDIR)/$(LIBVPX_SO)))
+ $(LIBVPX_SO)))
+
+
+INSTALL-LIBS-$(BUILD_LIBVPX_SO) += $(LIBVPX_SO_SYMLINKS)
+INSTALL-LIBS-$(BUILD_LIBVPX_SO) += $(LIBSUBDIR)/$(LIBVPX_SO)
-INSTALL-LIBS-$(CONFIG_SHARED) += $(LIBVPX_SO_SYMLINKS)
-INSTALL-LIBS-$(CONFIG_SHARED) += $(LIBSUBDIR)/$(LIBVPX_SO)
LIBS-$(BUILD_LIBVPX) += vpx.pc
vpx.pc: config.mk libs.mk
@@ -229,8 +324,12 @@ vpx.pc: config.mk libs.mk
$(qexec)echo 'Version: $(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_PATCH)' >> $@
$(qexec)echo 'Requires:' >> $@
$(qexec)echo 'Conflicts:' >> $@
- $(qexec)echo 'Libs: -L$${libdir} -lvpx' >> $@
+ $(qexec)echo 'Libs: -L$${libdir} -lvpx -lm' >> $@
+ifeq ($(HAVE_PTHREAD_H),yes)
$(qexec)echo 'Libs.private: -lm -lpthread' >> $@
+else
+ $(qexec)echo 'Libs.private: -lm' >> $@
+endif
$(qexec)echo 'Cflags: -I$${includedir}' >> $@
INSTALL-LIBS-yes += $(LIBSUBDIR)/pkgconfig/vpx.pc
INSTALL_MAPS += $(LIBSUBDIR)/pkgconfig/%.pc %.pc
@@ -265,71 +364,10 @@ endif
$(filter %.s.o,$(OBJS-yes)): $(BUILD_PFX)vpx_config.asm
$(filter %$(ASM).o,$(OBJS-yes)): $(BUILD_PFX)vpx_config.asm
-#
-# Calculate platform- and compiler-specific offsets for hand coded assembly
-#
-
-OFFSET_PATTERN:='^[a-zA-Z0-9_]* EQU'
-
-ifeq ($(filter icc gcc,$(TGT_CC)), $(TGT_CC))
- $(BUILD_PFX)asm_com_offsets.asm: $(BUILD_PFX)$(VP8_PREFIX)common/asm_com_offsets.c.S
- @echo " [CREATE] $@"
- $(qexec)LC_ALL=C grep $(OFFSET_PATTERN) $< | tr -d '$$\#' $(ADS2GAS) > $@
- $(BUILD_PFX)$(VP8_PREFIX)common/asm_com_offsets.c.S: $(VP8_PREFIX)common/asm_com_offsets.c
- CLEAN-OBJS += $(BUILD_PFX)asm_com_offsets.asm $(BUILD_PFX)$(VP8_PREFIX)common/asm_com_offsets.c.S
-
- $(BUILD_PFX)asm_enc_offsets.asm: $(BUILD_PFX)$(VP8_PREFIX)encoder/asm_enc_offsets.c.S
- @echo " [CREATE] $@"
- $(qexec)LC_ALL=C grep $(OFFSET_PATTERN) $< | tr -d '$$\#' $(ADS2GAS) > $@
- $(BUILD_PFX)$(VP8_PREFIX)encoder/asm_enc_offsets.c.S: $(VP8_PREFIX)encoder/asm_enc_offsets.c
- CLEAN-OBJS += $(BUILD_PFX)asm_enc_offsets.asm $(BUILD_PFX)$(VP8_PREFIX)encoder/asm_enc_offsets.c.S
-
- $(BUILD_PFX)asm_dec_offsets.asm: $(BUILD_PFX)$(VP8_PREFIX)decoder/asm_dec_offsets.c.S
- @echo " [CREATE] $@"
- $(qexec)LC_ALL=C grep $(OFFSET_PATTERN) $< | tr -d '$$\#' $(ADS2GAS) > $@
- $(BUILD_PFX)$(VP8_PREFIX)decoder/asm_dec_offsets.c.S: $(VP8_PREFIX)decoder/asm_dec_offsets.c
- CLEAN-OBJS += $(BUILD_PFX)asm_dec_offsets.asm $(BUILD_PFX)$(VP8_PREFIX)decoder/asm_dec_offsets.c.S
-else
- ifeq ($(filter rvct,$(TGT_CC)), $(TGT_CC))
- asm_com_offsets.asm: obj_int_extract
- asm_com_offsets.asm: $(VP8_PREFIX)common/asm_com_offsets.c.o
- @echo " [CREATE] $@"
- $(qexec)./obj_int_extract rvds $< $(ADS2GAS) > $@
- OBJS-yes += $(VP8_PREFIX)common/asm_com_offsets.c.o
- CLEAN-OBJS += asm_com_offsets.asm
- $(filter %$(ASM).o,$(OBJS-yes)): $(BUILD_PFX)asm_com_offsets.asm
-
- asm_enc_offsets.asm: obj_int_extract
- asm_enc_offsets.asm: $(VP8_PREFIX)encoder/asm_enc_offsets.c.o
- @echo " [CREATE] $@"
- $(qexec)./obj_int_extract rvds $< $(ADS2GAS) > $@
- OBJS-yes += $(VP8_PREFIX)encoder/asm_enc_offsets.c.o
- CLEAN-OBJS += asm_enc_offsets.asm
- $(filter %$(ASM).o,$(OBJS-yes)): $(BUILD_PFX)asm_enc_offsets.asm
-
- asm_dec_offsets.asm: obj_int_extract
- asm_dec_offsets.asm: $(VP8_PREFIX)decoder/asm_dec_offsets.c.o
- @echo " [CREATE] $@"
- $(qexec)./obj_int_extract rvds $< $(ADS2GAS) > $@
- OBJS-yes += $(VP8_PREFIX)decoder/asm_dec_offsets.c.o
- CLEAN-OBJS += asm_dec_offsets.asm
- $(filter %$(ASM).o,$(OBJS-yes)): $(BUILD_PFX)asm_dec_offsets.asm
- endif
-endif
$(shell $(SRC_PATH_BARE)/build/make/version.sh "$(SRC_PATH_BARE)" $(BUILD_PFX)vpx_version.h)
CLEAN-OBJS += $(BUILD_PFX)vpx_version.h
-#
-# Rule to generate runtime cpu detection files
-#
-$(BUILD_PFX)vpx_rtcd.h: $(SRC_PATH_BARE)/$(sort $(filter %rtcd_defs.sh,$(CODEC_SRCS)))
- @echo " [CREATE] $@"
- $(qexec)$(SRC_PATH_BARE)/build/make/rtcd.sh --arch=$(TGT_ISA) \
- --sym=vpx_rtcd \
- --config=$(target)$(if $(FAT_ARCHS),,-$(TOOLCHAIN)).mk \
- $(RTCD_OPTIONS) $^ > $@
-CLEAN-OBJS += $(BUILD_PFX)vpx_rtcd.h
##
## libvpx test directives
@@ -339,7 +377,7 @@ LIBVPX_TEST_DATA_PATH ?= .
include $(SRC_PATH_BARE)/test/test.mk
LIBVPX_TEST_SRCS=$(addprefix test/,$(call enabled,LIBVPX_TEST_SRCS))
-LIBVPX_TEST_BINS=./test_libvpx
+LIBVPX_TEST_BINS=./test_libvpx$(EXE_SFX)
LIBVPX_TEST_DATA=$(addprefix $(LIBVPX_TEST_DATA_PATH)/,\
$(call enabled,LIBVPX_TEST_DATA))
libvpx_test_data_url=http://downloads.webmproject.org/test_data/libvpx/$(1)
@@ -365,9 +403,9 @@ testdata:: $(LIBVPX_TEST_DATA)
ifeq ($(CONFIG_EXTERNAL_BUILD),yes)
ifeq ($(CONFIG_MSVS),yes)
-gtest.vcproj: $(SRC_PATH_BARE)/third_party/googletest/src/src/gtest-all.cc
+gtest.$(VCPROJ_SFX): $(SRC_PATH_BARE)/third_party/googletest/src/src/gtest-all.cc
@echo " [CREATE] $@"
- $(qexec)$(SRC_PATH_BARE)/build/make/gen_msvs_proj.sh \
+ $(qexec)$(GEN_VCPROJ) \
--lib \
--target=$(TOOLCHAIN) \
$(if $(CONFIG_STATIC_MSVCRT),--static-crt) \
@@ -375,28 +413,29 @@ gtest.vcproj: $(SRC_PATH_BARE)/third_party/googletest/src/src/gtest-all.cc
--proj-guid=EC00E1EC-AF68-4D92-A255-181690D1C9B1 \
--ver=$(CONFIG_VS_VERSION) \
--src-path-bare="$(SRC_PATH_BARE)" \
- --out=gtest.vcproj $(SRC_PATH_BARE)/third_party/googletest/src/src/gtest-all.cc \
+ -D_VARIADIC_MAX=10 \
+ --out=gtest.$(VCPROJ_SFX) $(SRC_PATH_BARE)/third_party/googletest/src/src/gtest-all.cc \
-I. -I"$(SRC_PATH_BARE)/third_party/googletest/src/include" -I"$(SRC_PATH_BARE)/third_party/googletest/src"
-PROJECTS-$(CONFIG_MSVS) += gtest.vcproj
+PROJECTS-$(CONFIG_MSVS) += gtest.$(VCPROJ_SFX)
-test_libvpx.vcproj: $(LIBVPX_TEST_SRCS)
+test_libvpx.$(VCPROJ_SFX): $(LIBVPX_TEST_SRCS) vpx.$(VCPROJ_SFX) gtest.$(VCPROJ_SFX)
@echo " [CREATE] $@"
- $(qexec)$(SRC_PATH_BARE)/build/make/gen_msvs_proj.sh \
+ $(qexec)$(GEN_VCPROJ) \
--exe \
--target=$(TOOLCHAIN) \
--name=test_libvpx \
+ -D_VARIADIC_MAX=10 \
--proj-guid=CD837F5F-52D8-4314-A370-895D614166A7 \
--ver=$(CONFIG_VS_VERSION) \
$(if $(CONFIG_STATIC_MSVCRT),--static-crt) \
--out=$@ $(INTERNAL_CFLAGS) $(CFLAGS) \
-I. -I"$(SRC_PATH_BARE)/third_party/googletest/src/include" \
- -L. -l$(CODEC_LIB) -lwinmm -l$(GTEST_LIB) $^
+ -L. -l$(CODEC_LIB) -l$(GTEST_LIB) $^
-PROJECTS-$(CONFIG_MSVS) += test_libvpx.vcproj
+PROJECTS-$(CONFIG_MSVS) += test_libvpx.$(VCPROJ_SFX)
-test:: testdata
- @set -e; for t in $(addprefix Win32/Release/,$(notdir $(LIBVPX_TEST_BINS:.cc=.exe))); do $$t; done
+LIBVPX_TEST_BINS := $(addprefix $(TGT_OS:win64=x64)/Release/,$(notdir $(LIBVPX_TEST_BINS)))
endif
else
@@ -431,10 +470,24 @@ $(foreach bin,$(LIBVPX_TEST_BINS),\
)))\
$(if $(LIPO_LIBS),$(eval $(call lipo_bin_template,$(bin))))\
-test:: $(LIBVPX_TEST_BINS) testdata
- @set -e; for t in $(LIBVPX_TEST_BINS); do $$t; done
-
endif
+
+define test_shard_template
+test:: test_shard.$(1)
+test_shard.$(1): $(LIBVPX_TEST_BINS) testdata
+ @set -e; \
+ for t in $(LIBVPX_TEST_BINS); do \
+ export GTEST_SHARD_INDEX=$(1); \
+ export GTEST_TOTAL_SHARDS=$(2); \
+ $$$$t; \
+ done
+.PHONY: test_shard.$(1)
+endef
+
+NUM_SHARDS := 10
+SHARDS := 0 1 2 3 4 5 6 7 8 9
+$(foreach s,$(SHARDS),$(eval $(call test_shard_template,$(s),$(NUM_SHARDS))))
+
endif
##
@@ -450,5 +503,8 @@ libs.doxy: $(CODEC_DOC_SRCS)
@echo "INCLUDE_PATH += ." >> $@;
@echo "ENABLED_SECTIONS += $(sort $(CODEC_DOC_SECTIONS))" >> $@
-## Generate vpx_rtcd.h for all objects
-$(OBJS-yes:.o=.d): $(BUILD_PFX)vpx_rtcd.h
+## Generate rtcd.h for all objects
+$(OBJS-yes:.o=.d): $(RTCD)
+
+## Update the global src list
+SRCS += $(CODEC_SRCS) $(LIBVPX_TEST_SRCS) $(GTEST_SRCS)
diff --git a/libvpx/md5_utils.c b/libvpx/md5_utils.c
index 9a584fa..8fb26e2 100644
--- a/libvpx/md5_utils.c
+++ b/libvpx/md5_utils.c
@@ -25,25 +25,22 @@
#include "md5_utils.h"
void
-byteSwap(UWORD32 *buf, unsigned words)
-{
- md5byte *p;
+byteSwap(UWORD32 *buf, unsigned words) {
+ md5byte *p;
- /* Only swap bytes for big endian machines */
- int i = 1;
+ /* Only swap bytes for big endian machines */
+ int i = 1;
- if (*(char *)&i == 1)
- return;
+ if (*(char *)&i == 1)
+ return;
- p = (md5byte *)buf;
+ p = (md5byte *)buf;
- do
- {
- *buf++ = (UWORD32)((unsigned)p[3] << 8 | p[2]) << 16 |
- ((unsigned)p[1] << 8 | p[0]);
- p += 4;
- }
- while (--words);
+ do {
+ *buf++ = (UWORD32)((unsigned)p[3] << 8 | p[2]) << 16 |
+ ((unsigned)p[1] << 8 | p[0]);
+ p += 4;
+ } while (--words);
}
/*
@@ -51,15 +48,14 @@ byteSwap(UWORD32 *buf, unsigned words)
* initialization constants.
*/
void
-MD5Init(struct MD5Context *ctx)
-{
- ctx->buf[0] = 0x67452301;
- ctx->buf[1] = 0xefcdab89;
- ctx->buf[2] = 0x98badcfe;
- ctx->buf[3] = 0x10325476;
-
- ctx->bytes[0] = 0;
- ctx->bytes[1] = 0;
+MD5Init(struct MD5Context *ctx) {
+ ctx->buf[0] = 0x67452301;
+ ctx->buf[1] = 0xefcdab89;
+ ctx->buf[2] = 0x98badcfe;
+ ctx->buf[3] = 0x10325476;
+
+ ctx->bytes[0] = 0;
+ ctx->bytes[1] = 0;
}
/*
@@ -67,44 +63,41 @@ MD5Init(struct MD5Context *ctx)
* of bytes.
*/
void
-MD5Update(struct MD5Context *ctx, md5byte const *buf, unsigned len)
-{
- UWORD32 t;
+MD5Update(struct MD5Context *ctx, md5byte const *buf, unsigned len) {
+ UWORD32 t;
- /* Update byte count */
+ /* Update byte count */
- t = ctx->bytes[0];
+ t = ctx->bytes[0];
- if ((ctx->bytes[0] = t + len) < t)
- ctx->bytes[1]++; /* Carry from low to high */
+ if ((ctx->bytes[0] = t + len) < t)
+ ctx->bytes[1]++; /* Carry from low to high */
- t = 64 - (t & 0x3f); /* Space available in ctx->in (at least 1) */
+ t = 64 - (t & 0x3f); /* Space available in ctx->in (at least 1) */
- if (t > len)
- {
- memcpy((md5byte *)ctx->in + 64 - t, buf, len);
- return;
- }
+ if (t > len) {
+ memcpy((md5byte *)ctx->in + 64 - t, buf, len);
+ return;
+ }
- /* First chunk is an odd size */
- memcpy((md5byte *)ctx->in + 64 - t, buf, t);
+ /* First chunk is an odd size */
+ memcpy((md5byte *)ctx->in + 64 - t, buf, t);
+ byteSwap(ctx->in, 16);
+ MD5Transform(ctx->buf, ctx->in);
+ buf += t;
+ len -= t;
+
+ /* Process data in 64-byte chunks */
+ while (len >= 64) {
+ memcpy(ctx->in, buf, 64);
byteSwap(ctx->in, 16);
MD5Transform(ctx->buf, ctx->in);
- buf += t;
- len -= t;
-
- /* Process data in 64-byte chunks */
- while (len >= 64)
- {
- memcpy(ctx->in, buf, 64);
- byteSwap(ctx->in, 16);
- MD5Transform(ctx->buf, ctx->in);
- buf += 64;
- len -= 64;
- }
-
- /* Handle any remaining bytes of data. */
- memcpy(ctx->in, buf, len);
+ buf += 64;
+ len -= 64;
+ }
+
+ /* Handle any remaining bytes of data. */
+ memcpy(ctx->in, buf, len);
}
/*
@@ -112,37 +105,35 @@ MD5Update(struct MD5Context *ctx, md5byte const *buf, unsigned len)
* 1 0* (64-bit count of bits processed, MSB-first)
*/
void
-MD5Final(md5byte digest[16], struct MD5Context *ctx)
-{
- int count = ctx->bytes[0] & 0x3f; /* Number of bytes in ctx->in */
- md5byte *p = (md5byte *)ctx->in + count;
-
- /* Set the first char of padding to 0x80. There is always room. */
- *p++ = 0x80;
-
- /* Bytes of padding needed to make 56 bytes (-8..55) */
- count = 56 - 1 - count;
-
- if (count < 0) /* Padding forces an extra block */
- {
- memset(p, 0, count + 8);
- byteSwap(ctx->in, 16);
- MD5Transform(ctx->buf, ctx->in);
- p = (md5byte *)ctx->in;
- count = 56;
- }
-
- memset(p, 0, count);
- byteSwap(ctx->in, 14);
-
- /* Append length in bits and transform */
- ctx->in[14] = ctx->bytes[0] << 3;
- ctx->in[15] = ctx->bytes[1] << 3 | ctx->bytes[0] >> 29;
+MD5Final(md5byte digest[16], struct MD5Context *ctx) {
+ int count = ctx->bytes[0] & 0x3f; /* Number of bytes in ctx->in */
+ md5byte *p = (md5byte *)ctx->in + count;
+
+ /* Set the first char of padding to 0x80. There is always room. */
+ *p++ = 0x80;
+
+ /* Bytes of padding needed to make 56 bytes (-8..55) */
+ count = 56 - 1 - count;
+
+ if (count < 0) { /* Padding forces an extra block */
+ memset(p, 0, count + 8);
+ byteSwap(ctx->in, 16);
MD5Transform(ctx->buf, ctx->in);
+ p = (md5byte *)ctx->in;
+ count = 56;
+ }
+
+ memset(p, 0, count);
+ byteSwap(ctx->in, 14);
+
+ /* Append length in bits and transform */
+ ctx->in[14] = ctx->bytes[0] << 3;
+ ctx->in[15] = ctx->bytes[1] << 3 | ctx->bytes[0] >> 29;
+ MD5Transform(ctx->buf, ctx->in);
- byteSwap(ctx->buf, 4);
- memcpy(digest, ctx->buf, 16);
- memset(ctx, 0, sizeof(*ctx)); /* In case it's sensitive */
+ byteSwap(ctx->buf, 4);
+ memcpy(digest, ctx->buf, 16);
+ memset(ctx, 0, sizeof(*ctx)); /* In case it's sensitive */
}
#ifndef ASM_MD5
@@ -157,7 +148,7 @@ MD5Final(md5byte digest[16], struct MD5Context *ctx)
/* This is the central step in the MD5 algorithm. */
#define MD5STEP(f,w,x,y,z,in,s) \
- (w += f(x,y,z) + in, w = (w<<s | w>>(32-s)) + x)
+ (w += f(x,y,z) + in, w = (w<<s | w>>(32-s)) + x)
/*
* The core of the MD5 algorithm, this alters an existing MD5 hash to
@@ -165,87 +156,86 @@ MD5Final(md5byte digest[16], struct MD5Context *ctx)
* the data and converts bytes into longwords for this routine.
*/
void
-MD5Transform(UWORD32 buf[4], UWORD32 const in[16])
-{
- register UWORD32 a, b, c, d;
-
- a = buf[0];
- b = buf[1];
- c = buf[2];
- d = buf[3];
-
- MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
- MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
- MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
- MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
- MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
- MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
- MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
- MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
- MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
- MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
- MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
- MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
- MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
- MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
- MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
- MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
-
- MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
- MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
- MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
- MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
- MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
- MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
- MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
- MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
- MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
- MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
- MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
- MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
- MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
- MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
- MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
- MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
-
- MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
- MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
- MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
- MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
- MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
- MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
- MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
- MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
- MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
- MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
- MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
- MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
- MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
- MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
- MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
- MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
-
- MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
- MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
- MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
- MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
- MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
- MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
- MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
- MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
- MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
- MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
- MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
- MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
- MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
- MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
- MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
- MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
-
- buf[0] += a;
- buf[1] += b;
- buf[2] += c;
- buf[3] += d;
+MD5Transform(UWORD32 buf[4], UWORD32 const in[16]) {
+ register UWORD32 a, b, c, d;
+
+ a = buf[0];
+ b = buf[1];
+ c = buf[2];
+ d = buf[3];
+
+ MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
+ MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
+ MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
+ MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
+ MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
+ MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
+ MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
+ MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
+ MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
+ MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
+ MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
+ MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
+ MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
+ MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
+ MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
+ MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
+
+ MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
+ MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
+ MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
+ MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
+ MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
+ MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
+ MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
+ MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
+ MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
+ MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
+ MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
+ MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
+ MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
+ MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
+ MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
+ MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
+
+ MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
+ MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
+ MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
+ MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
+ MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
+ MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
+ MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
+ MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
+ MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
+ MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
+ MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
+ MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
+ MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
+ MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
+ MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
+ MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
+
+ MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
+ MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
+ MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
+ MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
+ MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
+ MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
+ MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
+ MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
+ MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
+ MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
+ MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
+ MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
+ MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
+ MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
+ MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
+ MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
+
+ buf[0] += a;
+ buf[1] += b;
+ buf[2] += c;
+ buf[3] += d;
}
#endif
diff --git a/libvpx/md5_utils.h b/libvpx/md5_utils.h
index 5ca1b5f..81792c4 100644
--- a/libvpx/md5_utils.h
+++ b/libvpx/md5_utils.h
@@ -27,11 +27,10 @@
#define UWORD32 unsigned int
typedef struct MD5Context MD5Context;
-struct MD5Context
-{
- UWORD32 buf[4];
- UWORD32 bytes[2];
- UWORD32 in[16];
+struct MD5Context {
+ UWORD32 buf[4];
+ UWORD32 bytes[2];
+ UWORD32 in[16];
};
void MD5Init(struct MD5Context *context);
diff --git a/libvpx/nestegg/halloc/src/macros.h b/libvpx/nestegg/halloc/src/macros.h
index c36b516..1f84bc2 100644
--- a/libvpx/nestegg/halloc/src/macros.h
+++ b/libvpx/nestegg/halloc/src/macros.h
@@ -20,7 +20,7 @@
/*
restore pointer to the structure by a pointer to its field
*/
-#define structof(p,t,f) ((t*)(- offsetof(t,f) + (char*)(p)))
+#define structof(p,t,f) ((t*)(- (ptrdiff_t) offsetof(t,f) + (char*)(p)))
/*
* redefine for the target compiler
diff --git a/libvpx/nestegg/include/nestegg/nestegg.h b/libvpx/nestegg/include/nestegg/nestegg.h
index 7447d14..6510694 100644
--- a/libvpx/nestegg/include/nestegg/nestegg.h
+++ b/libvpx/nestegg/include/nestegg/nestegg.h
@@ -67,6 +67,7 @@ extern "C" {
#define NESTEGG_CODEC_VP8 0 /**< Track uses Google On2 VP8 codec. */
#define NESTEGG_CODEC_VORBIS 1 /**< Track uses Xiph Vorbis codec. */
+#define NESTEGG_CODEC_VP9 2 /**< Track uses Google On2 VP9 codec. */
#define NESTEGG_SEEK_SET 0 /**< Seek offset relative to beginning of stream. */
#define NESTEGG_SEEK_CUR 1 /**< Seek offset relative to current position in stream. */
diff --git a/libvpx/nestegg/src/nestegg.c b/libvpx/nestegg/src/nestegg.c
index cc87788..ae87e8f8 100644
--- a/libvpx/nestegg/src/nestegg.c
+++ b/libvpx/nestegg/src/nestegg.c
@@ -127,6 +127,7 @@ enum ebml_type_enum {
/* Track IDs */
#define TRACK_ID_VP8 "V_VP8"
+#define TRACK_ID_VP9 "V_VP9"
#define TRACK_ID_VORBIS "A_VORBIS"
enum vint_mask {
@@ -1669,6 +1670,9 @@ nestegg_track_codec_id(nestegg * ctx, unsigned int track)
if (strcmp(codec_id, TRACK_ID_VP8) == 0)
return NESTEGG_CODEC_VP8;
+ if (strcmp(codec_id, TRACK_ID_VP9) == 0)
+ return NESTEGG_CODEC_VP9;
+
if (strcmp(codec_id, TRACK_ID_VORBIS) == 0)
return NESTEGG_CODEC_VORBIS;
diff --git a/libvpx/solution.mk b/libvpx/solution.mk
index 948305f..2c8d29a 100644
--- a/libvpx/solution.mk
+++ b/libvpx/solution.mk
@@ -9,14 +9,14 @@
##
# libvpx reverse dependencies (targets that depend on libvpx)
-VPX_NONDEPS=$(addsuffix .vcproj,vpx gtest obj_int_extract)
+VPX_NONDEPS=$(addsuffix .$(VCPROJ_SFX),vpx gtest obj_int_extract)
VPX_RDEPS=$(foreach vcp,\
- $(filter-out $(VPX_NONDEPS),$^), --dep=$(vcp:.vcproj=):vpx)
+ $(filter-out $(VPX_NONDEPS),$^), --dep=$(vcp:.$(VCPROJ_SFX)=):vpx)
-vpx.sln: $(wildcard *.vcproj)
+vpx.sln: $(wildcard *.$(VCPROJ_SFX))
@echo " [CREATE] $@"
$(SRC_PATH_BARE)/build/make/gen_msvs_sln.sh \
- $(if $(filter vpx.vcproj,$^),$(VPX_RDEPS)) \
+ $(if $(filter vpx.$(VCPROJ_SFX),$^),$(VPX_RDEPS)) \
--dep=vpx:obj_int_extract \
--dep=test_libvpx:gtest \
--ver=$(CONFIG_VS_VERSION)\
diff --git a/libvpx/test/acm_random.h b/libvpx/test/acm_random.h
index 514894e..cd33d12 100644
--- a/libvpx/test/acm_random.h
+++ b/libvpx/test/acm_random.h
@@ -11,7 +11,7 @@
#ifndef LIBVPX_TEST_ACM_RANDOM_H_
#define LIBVPX_TEST_ACM_RANDOM_H_
-#include <stdlib.h>
+#include "third_party/googletest/src/include/gtest/gtest.h"
#include "vpx/vpx_integer.h"
@@ -19,24 +19,30 @@ namespace libvpx_test {
class ACMRandom {
public:
- ACMRandom() {
- Reset(DeterministicSeed());
- }
+ ACMRandom() : random_(DeterministicSeed()) {}
- explicit ACMRandom(int seed) {
- Reset(seed);
- }
+ explicit ACMRandom(int seed) : random_(seed) {}
void Reset(int seed) {
- srand(seed);
+ random_.Reseed(seed);
}
uint8_t Rand8(void) {
- return (rand() >> 8) & 0xff;
+ const uint32_t value =
+ random_.Generate(testing::internal::Random::kMaxRange);
+ // There's a bit more entropy in the upper bits of this implementation.
+ return (value >> 24) & 0xff;
+ }
+
+ uint8_t Rand8Extremes(void) {
+ // Returns a random value near 0 or near 255, to better exercise
+ // saturation behavior.
+ const uint8_t r = Rand8();
+ return r < 128 ? r << 4 : r >> 4;
}
int PseudoUniform(int range) {
- return (rand() >> 8) % range;
+ return random_.Generate(range);
}
int operator()(int n) {
@@ -46,6 +52,9 @@ class ACMRandom {
static int DeterministicSeed(void) {
return 0xbaba;
}
+
+ private:
+ testing::internal::Random random_;
};
} // namespace libvpx_test
diff --git a/libvpx/test/altref_test.cc b/libvpx/test/altref_test.cc
index ca05577..14af265 100644
--- a/libvpx/test/altref_test.cc
+++ b/libvpx/test/altref_test.cc
@@ -8,19 +8,20 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
#include "test/encode_test_driver.h"
#include "test/i420_video_source.h"
-
+#include "test/util.h"
namespace {
// lookahead range: [kLookAheadMin, kLookAheadMax).
const int kLookAheadMin = 5;
const int kLookAheadMax = 26;
-class AltRefTest : public libvpx_test::EncoderTest,
- public ::testing::TestWithParam<int> {
+class AltRefTest : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWithParam<int> {
protected:
- AltRefTest() : altref_count_(0) {}
+ AltRefTest() : EncoderTest(GET_PARAM(0)), altref_count_(0) {}
virtual ~AltRefTest() {}
virtual void SetUp() {
@@ -58,7 +59,7 @@ TEST_P(AltRefTest, MonotonicTimestamps) {
const vpx_rational timebase = { 33333333, 1000000000 };
cfg_.g_timebase = timebase;
cfg_.rc_target_bitrate = 1000;
- cfg_.g_lag_in_frames = GetParam();
+ cfg_.g_lag_in_frames = GET_PARAM(1);
libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
timebase.den, timebase.num, 0, 30);
@@ -66,6 +67,7 @@ TEST_P(AltRefTest, MonotonicTimestamps) {
EXPECT_GE(altref_count(), 1);
}
-INSTANTIATE_TEST_CASE_P(NonZeroLag, AltRefTest,
- ::testing::Range(kLookAheadMin, kLookAheadMax));
+
+VP8_INSTANTIATE_TEST_CASE(AltRefTest,
+ ::testing::Range(kLookAheadMin, kLookAheadMax));
} // namespace
diff --git a/libvpx/test/borders_test.cc b/libvpx/test/borders_test.cc
new file mode 100644
index 0000000..49505ee
--- /dev/null
+++ b/libvpx/test/borders_test.cc
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <climits>
+#include <vector>
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
+#include "test/encode_test_driver.h"
+#include "test/i420_video_source.h"
+#include "test/util.h"
+
+namespace {
+
+class BordersTest : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
+ protected:
+ BordersTest() : EncoderTest(GET_PARAM(0)) {}
+
+ virtual void SetUp() {
+ InitializeConfig();
+ SetMode(GET_PARAM(1));
+ }
+
+ virtual bool Continue() const {
+ return !HasFatalFailure() && !abort_;
+ }
+
+ virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
+ ::libvpx_test::Encoder *encoder) {
+ if ( video->frame() == 1) {
+ encoder->Control(VP8E_SET_CPUUSED, 0);
+ encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
+ encoder->Control(VP8E_SET_ARNR_MAXFRAMES, 7);
+ encoder->Control(VP8E_SET_ARNR_STRENGTH, 5);
+ encoder->Control(VP8E_SET_ARNR_TYPE, 3);
+ }
+ }
+
+ virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
+ if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
+ }
+ }
+};
+
+TEST_P(BordersTest, TestEncodeHighBitrate) {
+ // Validate that this non multiple of 64 wide clip encodes and decodes
+ // without a mismatch when passing in a very low max q. This pushes
+ // the encoder to producing lots of big partitions which will likely
+ // extend into the border and test the border condition.
+ cfg_.g_lag_in_frames = 25;
+ cfg_.rc_2pass_vbr_minsection_pct = 5;
+ cfg_.rc_2pass_vbr_minsection_pct = 2000;
+ cfg_.rc_target_bitrate = 2000;
+ cfg_.rc_max_quantizer = 10;
+
+ ::libvpx_test::I420VideoSource video("hantro_odd.yuv", 208, 144, 30, 1, 0,
+ 40);
+
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+}
+TEST_P(BordersTest, TestLowBitrate) {
+ // Validate that this clip encodes and decodes without a mismatch
+ // when passing in a very high min q. This pushes the encoder to producing
+ // lots of small partitions which might will test the other condition.
+
+ cfg_.g_lag_in_frames = 25;
+ cfg_.rc_2pass_vbr_minsection_pct = 5;
+ cfg_.rc_2pass_vbr_minsection_pct = 2000;
+ cfg_.rc_target_bitrate = 200;
+ cfg_.rc_min_quantizer = 40;
+
+ ::libvpx_test::I420VideoSource video("hantro_odd.yuv", 208, 144, 30, 1, 0,
+ 40);
+
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+}
+
+VP9_INSTANTIATE_TEST_CASE(BordersTest, ::testing::Values(
+ ::libvpx_test::kTwoPassGood));
+} // namespace
diff --git a/libvpx/test/clear_system_state.h b/libvpx/test/clear_system_state.h
new file mode 100644
index 0000000..e240981
--- /dev/null
+++ b/libvpx/test/clear_system_state.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef TEST_CLEAR_SYSTEM_STATE_H_
+#define TEST_CLEAR_SYSTEM_STATE_H_
+
+#include "vpx_config.h"
+extern "C" {
+#if ARCH_X86 || ARCH_X86_64
+# include "vpx_ports/x86.h"
+#endif
+}
+
+namespace libvpx_test {
+
+// Reset system to a known state. This function should be used for all non-API
+// test cases.
+inline void ClearSystemState() {
+#if ARCH_X86 || ARCH_X86_64
+ vpx_reset_mmx_state();
+#endif
+}
+
+} // namespace libvpx_test
+#endif // TEST_CLEAR_SYSTEM_STATE_H_
diff --git a/libvpx/test/codec_factory.h b/libvpx/test/codec_factory.h
new file mode 100644
index 0000000..fdae572
--- /dev/null
+++ b/libvpx/test/codec_factory.h
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef TEST_CODEC_FACTORY_H_
+#define TEST_CODEC_FACTORY_H_
+
+extern "C" {
+#include "./vpx_config.h"
+#include "vpx/vpx_decoder.h"
+#include "vpx/vpx_encoder.h"
+#if CONFIG_VP8_ENCODER || CONFIG_VP9_ENCODER
+#include "vpx/vp8cx.h"
+#endif
+#if CONFIG_VP8_DECODER || CONFIG_VP9_DECODER
+#include "vpx/vp8dx.h"
+#endif
+}
+
+#include "test/decode_test_driver.h"
+#include "test/encode_test_driver.h"
+namespace libvpx_test {
+
+class CodecFactory {
+ public:
+ CodecFactory() {}
+
+ virtual ~CodecFactory() {}
+
+ virtual Decoder* CreateDecoder(vpx_codec_dec_cfg_t cfg,
+ unsigned long deadline) const = 0;
+
+ virtual Encoder* CreateEncoder(vpx_codec_enc_cfg_t cfg,
+ unsigned long deadline,
+ const unsigned long init_flags,
+ TwopassStatsStore *stats) const = 0;
+
+ virtual vpx_codec_err_t DefaultEncoderConfig(vpx_codec_enc_cfg_t *cfg,
+ int usage) const = 0;
+};
+
+/* Provide CodecTestWith<n>Params classes for a variable number of parameters
+ * to avoid having to include a pointer to the CodecFactory in every test
+ * definition.
+ */
+template<class T1>
+class CodecTestWithParam : public ::testing::TestWithParam<
+ std::tr1::tuple< const libvpx_test::CodecFactory*, T1 > > {
+};
+
+template<class T1, class T2>
+class CodecTestWith2Params : public ::testing::TestWithParam<
+ std::tr1::tuple< const libvpx_test::CodecFactory*, T1, T2 > > {
+};
+
+template<class T1, class T2, class T3>
+class CodecTestWith3Params : public ::testing::TestWithParam<
+ std::tr1::tuple< const libvpx_test::CodecFactory*, T1, T2, T3 > > {
+};
+
+/*
+ * VP8 Codec Definitions
+ */
+#if CONFIG_VP8
+class VP8Decoder : public Decoder {
+ public:
+ VP8Decoder(vpx_codec_dec_cfg_t cfg, unsigned long deadline)
+ : Decoder(cfg, deadline) {}
+
+ protected:
+ virtual const vpx_codec_iface_t* CodecInterface() const {
+#if CONFIG_VP8_DECODER
+ return &vpx_codec_vp8_dx_algo;
+#else
+ return NULL;
+#endif
+ }
+};
+
+class VP8Encoder : public Encoder {
+ public:
+ VP8Encoder(vpx_codec_enc_cfg_t cfg, unsigned long deadline,
+ const unsigned long init_flags, TwopassStatsStore *stats)
+ : Encoder(cfg, deadline, init_flags, stats) {}
+
+ protected:
+ virtual const vpx_codec_iface_t* CodecInterface() const {
+#if CONFIG_VP8_ENCODER
+ return &vpx_codec_vp8_cx_algo;
+#else
+ return NULL;
+#endif
+ }
+};
+
+class VP8CodecFactory : public CodecFactory {
+ public:
+ VP8CodecFactory() : CodecFactory() {}
+
+ virtual Decoder* CreateDecoder(vpx_codec_dec_cfg_t cfg,
+ unsigned long deadline) const {
+#if CONFIG_VP8_DECODER
+ return new VP8Decoder(cfg, deadline);
+#else
+ return NULL;
+#endif
+ }
+
+ virtual Encoder* CreateEncoder(vpx_codec_enc_cfg_t cfg,
+ unsigned long deadline,
+ const unsigned long init_flags,
+ TwopassStatsStore *stats) const {
+#if CONFIG_VP8_ENCODER
+ return new VP8Encoder(cfg, deadline, init_flags, stats);
+#else
+ return NULL;
+#endif
+ }
+
+ virtual vpx_codec_err_t DefaultEncoderConfig(vpx_codec_enc_cfg_t *cfg,
+ int usage) const {
+#if CONFIG_VP8_ENCODER
+ return vpx_codec_enc_config_default(&vpx_codec_vp8_cx_algo, cfg, usage);
+#else
+ return VPX_CODEC_INCAPABLE;
+#endif
+ }
+};
+
+const libvpx_test::VP8CodecFactory kVP8;
+
+#define VP8_INSTANTIATE_TEST_CASE(test, params)\
+ INSTANTIATE_TEST_CASE_P(VP8, test, \
+ ::testing::Combine( \
+ ::testing::Values(static_cast<const libvpx_test::CodecFactory*>( \
+ &libvpx_test::kVP8)), \
+ params))
+#else
+#define VP8_INSTANTIATE_TEST_CASE(test, params)
+#endif // CONFIG_VP8
+
+
+/*
+ * VP9 Codec Definitions
+ */
+#if CONFIG_VP9
+class VP9Decoder : public Decoder {
+ public:
+ VP9Decoder(vpx_codec_dec_cfg_t cfg, unsigned long deadline)
+ : Decoder(cfg, deadline) {}
+
+ protected:
+ virtual const vpx_codec_iface_t* CodecInterface() const {
+#if CONFIG_VP9_DECODER
+ return &vpx_codec_vp9_dx_algo;
+#else
+ return NULL;
+#endif
+ }
+};
+
+class VP9Encoder : public Encoder {
+ public:
+ VP9Encoder(vpx_codec_enc_cfg_t cfg, unsigned long deadline,
+ const unsigned long init_flags, TwopassStatsStore *stats)
+ : Encoder(cfg, deadline, init_flags, stats) {}
+
+ protected:
+ virtual const vpx_codec_iface_t* CodecInterface() const {
+#if CONFIG_VP9_ENCODER
+ return &vpx_codec_vp9_cx_algo;
+#else
+ return NULL;
+#endif
+ }
+};
+
+class VP9CodecFactory : public CodecFactory {
+ public:
+ VP9CodecFactory() : CodecFactory() {}
+
+ virtual Decoder* CreateDecoder(vpx_codec_dec_cfg_t cfg,
+ unsigned long deadline) const {
+#if CONFIG_VP9_DECODER
+ return new VP9Decoder(cfg, deadline);
+#else
+ return NULL;
+#endif
+ }
+
+ virtual Encoder* CreateEncoder(vpx_codec_enc_cfg_t cfg,
+ unsigned long deadline,
+ const unsigned long init_flags,
+ TwopassStatsStore *stats) const {
+#if CONFIG_VP9_ENCODER
+ return new VP9Encoder(cfg, deadline, init_flags, stats);
+#else
+ return NULL;
+#endif
+ }
+
+ virtual vpx_codec_err_t DefaultEncoderConfig(vpx_codec_enc_cfg_t *cfg,
+ int usage) const {
+#if CONFIG_VP9_ENCODER
+ return vpx_codec_enc_config_default(&vpx_codec_vp9_cx_algo, cfg, usage);
+#else
+ return VPX_CODEC_INCAPABLE;
+#endif
+ }
+};
+
+const libvpx_test::VP9CodecFactory kVP9;
+
+#define VP9_INSTANTIATE_TEST_CASE(test, params)\
+ INSTANTIATE_TEST_CASE_P(VP9, test, \
+ ::testing::Combine( \
+ ::testing::Values(static_cast<const libvpx_test::CodecFactory*>( \
+ &libvpx_test::kVP9)), \
+ params))
+#else
+#define VP9_INSTANTIATE_TEST_CASE(test, params)
+#endif // CONFIG_VP9
+
+
+} // namespace libvpx_test
+
+#endif // TEST_CODEC_FACTORY_H_
diff --git a/libvpx/test/config_test.cc b/libvpx/test/config_test.cc
index c4da46e..9008728 100644
--- a/libvpx/test/config_test.cc
+++ b/libvpx/test/config_test.cc
@@ -8,20 +8,22 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
#include "test/encode_test_driver.h"
+#include "test/util.h"
#include "test/video_source.h"
namespace {
class ConfigTest : public ::libvpx_test::EncoderTest,
- public ::testing::TestWithParam<enum libvpx_test::TestMode> {
- public:
- ConfigTest() : frame_count_in_(0), frame_count_out_(0), frame_count_max_(0) {}
-
+ public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
protected:
+ ConfigTest() : EncoderTest(GET_PARAM(0)),
+ frame_count_in_(0), frame_count_out_(0), frame_count_max_(0) {}
+
virtual void SetUp() {
InitializeConfig();
- SetMode(GetParam());
+ SetMode(GET_PARAM(1));
}
virtual void BeginPassHook(unsigned int /*pass*/) {
@@ -57,5 +59,5 @@ TEST_P(ConfigTest, LagIsDisabled) {
EXPECT_EQ(frame_count_in_, frame_count_out_);
}
-INSTANTIATE_TEST_CASE_P(OnePassModes, ConfigTest, ONE_PASS_TEST_MODES);
+VP8_INSTANTIATE_TEST_CASE(ConfigTest, ONE_PASS_TEST_MODES);
} // namespace
diff --git a/libvpx/test/convolve_test.cc b/libvpx/test/convolve_test.cc
new file mode 100644
index 0000000..fd2bd36
--- /dev/null
+++ b/libvpx/test/convolve_test.cc
@@ -0,0 +1,549 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "test/acm_random.h"
+#include "test/register_state_check.h"
+#include "test/util.h"
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
+extern "C" {
+#include "./vpx_config.h"
+#include "./vp9_rtcd.h"
+#include "vp9/common/vp9_filter.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/mem.h"
+}
+
+namespace {
+typedef void (*convolve_fn_t)(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int16_t *filter_x, int filter_x_stride,
+ const int16_t *filter_y, int filter_y_stride,
+ int w, int h);
+
+struct ConvolveFunctions {
+ ConvolveFunctions(convolve_fn_t h8, convolve_fn_t h8_avg,
+ convolve_fn_t v8, convolve_fn_t v8_avg,
+ convolve_fn_t hv8, convolve_fn_t hv8_avg)
+ : h8_(h8), v8_(v8), hv8_(hv8), h8_avg_(h8_avg), v8_avg_(v8_avg),
+ hv8_avg_(hv8_avg) {}
+
+ convolve_fn_t h8_;
+ convolve_fn_t v8_;
+ convolve_fn_t hv8_;
+ convolve_fn_t h8_avg_;
+ convolve_fn_t v8_avg_;
+ convolve_fn_t hv8_avg_;
+};
+
+// Reference 8-tap subpixel filter, slightly modified to fit into this test.
+#define VP9_FILTER_WEIGHT 128
+#define VP9_FILTER_SHIFT 7
+uint8_t clip_pixel(int x) {
+ return x < 0 ? 0 :
+ x > 255 ? 255 :
+ x;
+}
+
+void filter_block2d_8_c(const uint8_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint8_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height) {
+ // Between passes, we use an intermediate buffer whose height is extended to
+ // have enough horizontally filtered values as input for the vertical pass.
+ // This buffer is allocated to be big enough for the largest block type we
+ // support.
+ const int kInterp_Extend = 4;
+ const unsigned int intermediate_height =
+ (kInterp_Extend - 1) + output_height + kInterp_Extend;
+
+ /* Size of intermediate_buffer is max_intermediate_height * filter_max_width,
+ * where max_intermediate_height = (kInterp_Extend - 1) + filter_max_height
+ * + kInterp_Extend
+ * = 3 + 16 + 4
+ * = 23
+ * and filter_max_width = 16
+ */
+ uint8_t intermediate_buffer[71 * 64];
+ const int intermediate_next_stride = 1 - intermediate_height * output_width;
+
+ // Horizontal pass (src -> transposed intermediate).
+ {
+ uint8_t *output_ptr = intermediate_buffer;
+ const int src_next_row_stride = src_stride - output_width;
+ unsigned int i, j;
+ src_ptr -= (kInterp_Extend - 1) * src_stride + (kInterp_Extend - 1);
+ for (i = 0; i < intermediate_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * HFilter[0]) +
+ (src_ptr[1] * HFilter[1]) +
+ (src_ptr[2] * HFilter[2]) +
+ (src_ptr[3] * HFilter[3]) +
+ (src_ptr[4] * HFilter[4]) +
+ (src_ptr[5] * HFilter[5]) +
+ (src_ptr[6] * HFilter[6]) +
+ (src_ptr[7] * HFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
+
+ // Normalize back to 0-255...
+ *output_ptr = clip_pixel(temp >> VP9_FILTER_SHIFT);
+ ++src_ptr;
+ output_ptr += intermediate_height;
+ }
+ src_ptr += src_next_row_stride;
+ output_ptr += intermediate_next_stride;
+ }
+ }
+
+ // Vertical pass (transposed intermediate -> dst).
+ {
+ uint8_t *src_ptr = intermediate_buffer;
+ const int dst_next_row_stride = dst_stride - output_width;
+ unsigned int i, j;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ // Apply filter...
+ const int temp = (src_ptr[0] * VFilter[0]) +
+ (src_ptr[1] * VFilter[1]) +
+ (src_ptr[2] * VFilter[2]) +
+ (src_ptr[3] * VFilter[3]) +
+ (src_ptr[4] * VFilter[4]) +
+ (src_ptr[5] * VFilter[5]) +
+ (src_ptr[6] * VFilter[6]) +
+ (src_ptr[7] * VFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
+
+ // Normalize back to 0-255...
+ *dst_ptr++ = clip_pixel(temp >> VP9_FILTER_SHIFT);
+ src_ptr += intermediate_height;
+ }
+ src_ptr += intermediate_next_stride;
+ dst_ptr += dst_next_row_stride;
+ }
+ }
+}
+
+void block2d_average_c(uint8_t *src,
+ unsigned int src_stride,
+ uint8_t *output_ptr,
+ unsigned int output_stride,
+ unsigned int output_width,
+ unsigned int output_height) {
+ unsigned int i, j;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; ++j) {
+ output_ptr[j] = (output_ptr[j] + src[i * src_stride + j] + 1) >> 1;
+ }
+ output_ptr += output_stride;
+ }
+}
+
+void filter_average_block2d_8_c(const uint8_t *src_ptr,
+ const unsigned int src_stride,
+ const int16_t *HFilter,
+ const int16_t *VFilter,
+ uint8_t *dst_ptr,
+ unsigned int dst_stride,
+ unsigned int output_width,
+ unsigned int output_height) {
+ uint8_t tmp[64 * 64];
+
+ assert(output_width <= 64);
+ assert(output_height <= 64);
+ filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, tmp, 64,
+ output_width, output_height);
+ block2d_average_c(tmp, 64, dst_ptr, dst_stride,
+ output_width, output_height);
+}
+
+class ConvolveTest : public PARAMS(int, int, const ConvolveFunctions*) {
+ public:
+ static void SetUpTestCase() {
+ // Force input_ to be unaligned, output to be 16 byte aligned.
+ input_ = reinterpret_cast<uint8_t*>(
+ vpx_memalign(kDataAlignment, kInputBufferSize + 1)) + 1;
+ output_ = reinterpret_cast<uint8_t*>(
+ vpx_memalign(kDataAlignment, kOutputBufferSize));
+ }
+
+ static void TearDownTestCase() {
+ vpx_free(input_ - 1);
+ input_ = NULL;
+ vpx_free(output_);
+ output_ = NULL;
+ }
+
+ protected:
+ static const int kDataAlignment = 16;
+ static const int kOuterBlockSize = 128;
+ static const int kInputStride = kOuterBlockSize;
+ static const int kOutputStride = kOuterBlockSize;
+ static const int kMaxDimension = 64;
+ static const int kInputBufferSize = kOuterBlockSize * kOuterBlockSize;
+ static const int kOutputBufferSize = kOuterBlockSize * kOuterBlockSize;
+
+ int Width() const { return GET_PARAM(0); }
+ int Height() const { return GET_PARAM(1); }
+ int BorderLeft() const {
+ const int center = (kOuterBlockSize - Width()) / 2;
+ return (center + (kDataAlignment - 1)) & ~(kDataAlignment - 1);
+ }
+ int BorderTop() const { return (kOuterBlockSize - Height()) / 2; }
+
+ bool IsIndexInBorder(int i) {
+ return (i < BorderTop() * kOuterBlockSize ||
+ i >= (BorderTop() + Height()) * kOuterBlockSize ||
+ i % kOuterBlockSize < BorderLeft() ||
+ i % kOuterBlockSize >= (BorderLeft() + Width()));
+ }
+
+ virtual void SetUp() {
+ UUT_ = GET_PARAM(2);
+ /* Set up guard blocks for an inner block cetered in the outer block */
+ for (int i = 0; i < kOutputBufferSize; ++i) {
+ if (IsIndexInBorder(i))
+ output_[i] = 255;
+ else
+ output_[i] = 0;
+ }
+
+ ::libvpx_test::ACMRandom prng;
+ for (int i = 0; i < kInputBufferSize; ++i)
+ input_[i] = prng.Rand8Extremes();
+ }
+
+ void CheckGuardBlocks() {
+ for (int i = 0; i < kOutputBufferSize; ++i) {
+ if (IsIndexInBorder(i))
+ EXPECT_EQ(255, output_[i]);
+ }
+ }
+
+ uint8_t* input() const {
+ return input_ + BorderTop() * kOuterBlockSize + BorderLeft();
+ }
+
+ uint8_t* output() const {
+ return output_ + BorderTop() * kOuterBlockSize + BorderLeft();
+ }
+
+ const ConvolveFunctions* UUT_;
+ static uint8_t* input_;
+ static uint8_t* output_;
+};
+uint8_t* ConvolveTest::input_ = NULL;
+uint8_t* ConvolveTest::output_ = NULL;
+
+TEST_P(ConvolveTest, GuardBlocks) {
+ CheckGuardBlocks();
+}
+
+TEST_P(ConvolveTest, CopyHoriz) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+ DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
+
+ REGISTER_STATE_CHECK(
+ UUT_->h8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
+ Width(), Height()));
+
+ CheckGuardBlocks();
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ << "(" << x << "," << y << ")";
+}
+
+TEST_P(ConvolveTest, CopyVert) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+ DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
+
+ REGISTER_STATE_CHECK(
+ UUT_->v8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
+ Width(), Height()));
+
+ CheckGuardBlocks();
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ << "(" << x << "," << y << ")";
+}
+
+TEST_P(ConvolveTest, Copy2D) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+ DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
+
+ REGISTER_STATE_CHECK(
+ UUT_->hv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
+ Width(), Height()));
+
+ CheckGuardBlocks();
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(out[y * kOutputStride + x], in[y * kInputStride + x])
+ << "(" << x << "," << y << ")";
+}
+
+const int16_t (*kTestFilterList[])[8] = {
+ vp9_bilinear_filters,
+ vp9_sub_pel_filters_8,
+ vp9_sub_pel_filters_8s,
+ vp9_sub_pel_filters_8lp
+};
+const int kNumFilterBanks = sizeof(kTestFilterList) /
+ sizeof(kTestFilterList[0]);
+const int kNumFilters = 16;
+
+TEST(ConvolveTest, FiltersWontSaturateWhenAddedPairwise) {
+ for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
+ const int16_t (*filters)[8] = kTestFilterList[filter_bank];
+ for (int i = 0; i < kNumFilters; i++) {
+ const int p0 = filters[i][0] + filters[i][1];
+ const int p1 = filters[i][2] + filters[i][3];
+ const int p2 = filters[i][4] + filters[i][5];
+ const int p3 = filters[i][6] + filters[i][7];
+ EXPECT_LE(p0, 128);
+ EXPECT_LE(p1, 128);
+ EXPECT_LE(p2, 128);
+ EXPECT_LE(p3, 128);
+ EXPECT_LE(p0 + p3, 128);
+ EXPECT_LE(p0 + p3 + p1, 128);
+ EXPECT_LE(p0 + p3 + p1 + p2, 128);
+ EXPECT_EQ(p0 + p1 + p2 + p3, 128);
+ }
+ }
+}
+
+const int16_t kInvalidFilter[8] = { 0 };
+
+TEST_P(ConvolveTest, MatchesReferenceSubpixelFilter) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+ uint8_t ref[kOutputStride * kMaxDimension];
+
+
+ for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
+ const int16_t (*filters)[8] = kTestFilterList[filter_bank];
+
+ for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
+ for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
+ filter_block2d_8_c(in, kInputStride,
+ filters[filter_x], filters[filter_y],
+ ref, kOutputStride,
+ Width(), Height());
+
+ if (filters == vp9_sub_pel_filters_8lp || (filter_x && filter_y))
+ REGISTER_STATE_CHECK(
+ UUT_->hv8_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, filters[filter_y], 16,
+ Width(), Height()));
+ else if (filter_y)
+ REGISTER_STATE_CHECK(
+ UUT_->v8_(in, kInputStride, out, kOutputStride,
+ kInvalidFilter, 16, filters[filter_y], 16,
+ Width(), Height()));
+ else
+ REGISTER_STATE_CHECK(
+ UUT_->h8_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, kInvalidFilter, 16,
+ Width(), Height()));
+
+ CheckGuardBlocks();
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(ref[y * kOutputStride + x], out[y * kOutputStride + x])
+ << "mismatch at (" << x << "," << y << "), "
+ << "filters (" << filter_bank << ","
+ << filter_x << "," << filter_y << ")";
+ }
+ }
+ }
+}
+
+TEST_P(ConvolveTest, MatchesReferenceAveragingSubpixelFilter) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+ uint8_t ref[kOutputStride * kMaxDimension];
+
+ // Populate ref and out with some random data
+ ::libvpx_test::ACMRandom prng;
+ for (int y = 0; y < Height(); ++y) {
+ for (int x = 0; x < Width(); ++x) {
+ const uint8_t r = prng.Rand8Extremes();
+
+ out[y * kOutputStride + x] = r;
+ ref[y * kOutputStride + x] = r;
+ }
+ }
+
+ const int kNumFilterBanks = sizeof(kTestFilterList) /
+ sizeof(kTestFilterList[0]);
+
+ for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
+ const int16_t (*filters)[8] = kTestFilterList[filter_bank];
+ const int kNumFilters = 16;
+
+ for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
+ for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
+ filter_average_block2d_8_c(in, kInputStride,
+ filters[filter_x], filters[filter_y],
+ ref, kOutputStride,
+ Width(), Height());
+
+ if (filters == vp9_sub_pel_filters_8lp || (filter_x && filter_y))
+ REGISTER_STATE_CHECK(
+ UUT_->hv8_avg_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, filters[filter_y], 16,
+ Width(), Height()));
+ else if (filter_y)
+ REGISTER_STATE_CHECK(
+ UUT_->v8_avg_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, filters[filter_y], 16,
+ Width(), Height()));
+ else
+ REGISTER_STATE_CHECK(
+ UUT_->h8_avg_(in, kInputStride, out, kOutputStride,
+ filters[filter_x], 16, filters[filter_y], 16,
+ Width(), Height()));
+
+ CheckGuardBlocks();
+
+ for (int y = 0; y < Height(); ++y)
+ for (int x = 0; x < Width(); ++x)
+ ASSERT_EQ(ref[y * kOutputStride + x], out[y * kOutputStride + x])
+ << "mismatch at (" << x << "," << y << "), "
+ << "filters (" << filter_bank << ","
+ << filter_x << "," << filter_y << ")";
+ }
+ }
+ }
+}
+
+DECLARE_ALIGNED(256, const int16_t, kChangeFilters[16][8]) = {
+ { 0, 0, 0, 0, 0, 0, 0, 128},
+ { 0, 0, 0, 0, 0, 0, 128},
+ { 0, 0, 0, 0, 0, 128},
+ { 0, 0, 0, 0, 128},
+ { 0, 0, 0, 128},
+ { 0, 0, 128},
+ { 0, 128},
+ { 128},
+ { 0, 0, 0, 0, 0, 0, 0, 128},
+ { 0, 0, 0, 0, 0, 0, 128},
+ { 0, 0, 0, 0, 0, 128},
+ { 0, 0, 0, 0, 128},
+ { 0, 0, 0, 128},
+ { 0, 0, 128},
+ { 0, 128},
+ { 128}
+};
+
+TEST_P(ConvolveTest, ChangeFilterWorks) {
+ uint8_t* const in = input();
+ uint8_t* const out = output();
+ const int kPixelSelected = 4;
+
+ REGISTER_STATE_CHECK(UUT_->h8_(in, kInputStride, out, kOutputStride,
+ kChangeFilters[8], 17, kChangeFilters[4], 16,
+ Width(), Height()));
+
+ for (int x = 0; x < Width(); ++x) {
+ const int kQ4StepAdjust = x >> 4;
+ const int kFilterPeriodAdjust = (x >> 3) << 3;
+ const int ref_x = kQ4StepAdjust + kFilterPeriodAdjust + kPixelSelected;
+ ASSERT_EQ(in[ref_x], out[x]) << "x == " << x;
+ }
+
+ REGISTER_STATE_CHECK(UUT_->v8_(in, kInputStride, out, kOutputStride,
+ kChangeFilters[4], 16, kChangeFilters[8], 17,
+ Width(), Height()));
+
+ for (int y = 0; y < Height(); ++y) {
+ const int kQ4StepAdjust = y >> 4;
+ const int kFilterPeriodAdjust = (y >> 3) << 3;
+ const int ref_y = kQ4StepAdjust + kFilterPeriodAdjust + kPixelSelected;
+ ASSERT_EQ(in[ref_y * kInputStride], out[y * kInputStride]) << "y == " << y;
+ }
+
+ REGISTER_STATE_CHECK(UUT_->hv8_(in, kInputStride, out, kOutputStride,
+ kChangeFilters[8], 17, kChangeFilters[8], 17,
+ Width(), Height()));
+
+ for (int y = 0; y < Height(); ++y) {
+ const int kQ4StepAdjustY = y >> 4;
+ const int kFilterPeriodAdjustY = (y >> 3) << 3;
+ const int ref_y = kQ4StepAdjustY + kFilterPeriodAdjustY + kPixelSelected;
+ for (int x = 0; x < Width(); ++x) {
+ const int kQ4StepAdjustX = x >> 4;
+ const int kFilterPeriodAdjustX = (x >> 3) << 3;
+ const int ref_x = kQ4StepAdjustX + kFilterPeriodAdjustX + kPixelSelected;
+
+ ASSERT_EQ(in[ref_y * kInputStride + ref_x], out[y * kOutputStride + x])
+ << "x == " << x << ", y == " << y;
+ }
+ }
+}
+
+
+using std::tr1::make_tuple;
+
+const ConvolveFunctions convolve8_c(
+ vp9_convolve8_horiz_c, vp9_convolve8_avg_horiz_c,
+ vp9_convolve8_vert_c, vp9_convolve8_avg_vert_c,
+ vp9_convolve8_c, vp9_convolve8_avg_c);
+
+INSTANTIATE_TEST_CASE_P(C, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_c),
+ make_tuple(8, 4, &convolve8_c),
+ make_tuple(4, 8, &convolve8_c),
+ make_tuple(8, 8, &convolve8_c),
+ make_tuple(16, 8, &convolve8_c),
+ make_tuple(8, 16, &convolve8_c),
+ make_tuple(16, 16, &convolve8_c),
+ make_tuple(32, 16, &convolve8_c),
+ make_tuple(16, 32, &convolve8_c),
+ make_tuple(32, 32, &convolve8_c),
+ make_tuple(64, 32, &convolve8_c),
+ make_tuple(32, 64, &convolve8_c),
+ make_tuple(64, 64, &convolve8_c)));
+
+#if HAVE_SSSE3
+const ConvolveFunctions convolve8_ssse3(
+ vp9_convolve8_horiz_ssse3, vp9_convolve8_avg_horiz_c,
+ vp9_convolve8_vert_ssse3, vp9_convolve8_avg_vert_c,
+ vp9_convolve8_ssse3, vp9_convolve8_avg_c);
+
+INSTANTIATE_TEST_CASE_P(SSSE3, ConvolveTest, ::testing::Values(
+ make_tuple(4, 4, &convolve8_ssse3),
+ make_tuple(8, 4, &convolve8_ssse3),
+ make_tuple(4, 8, &convolve8_ssse3),
+ make_tuple(8, 8, &convolve8_ssse3),
+ make_tuple(16, 8, &convolve8_ssse3),
+ make_tuple(8, 16, &convolve8_ssse3),
+ make_tuple(16, 16, &convolve8_ssse3),
+ make_tuple(32, 16, &convolve8_ssse3),
+ make_tuple(16, 32, &convolve8_ssse3),
+ make_tuple(32, 32, &convolve8_ssse3),
+ make_tuple(64, 32, &convolve8_ssse3),
+ make_tuple(32, 64, &convolve8_ssse3),
+ make_tuple(64, 64, &convolve8_ssse3)));
+#endif
+} // namespace
diff --git a/libvpx/test/cq_test.cc b/libvpx/test/cq_test.cc
index 42ee2a2..a6a4b8e 100644
--- a/libvpx/test/cq_test.cc
+++ b/libvpx/test/cq_test.cc
@@ -9,8 +9,12 @@
*/
#include <cmath>
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
#include "test/encode_test_driver.h"
#include "test/i420_video_source.h"
+#include "test/util.h"
+
+namespace {
// CQ level range: [kCQLevelMin, kCQLevelMax).
const int kCQLevelMin = 4;
@@ -18,12 +22,13 @@ const int kCQLevelMax = 63;
const int kCQLevelStep = 8;
const int kCQTargetBitrate = 2000;
-namespace {
-
-class CQTest : public libvpx_test::EncoderTest,
- public ::testing::TestWithParam<int> {
+class CQTest : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWithParam<int> {
protected:
- CQTest() : cq_level_(GetParam()) { init_flags_ = VPX_CODEC_USE_PSNR; }
+ CQTest() : EncoderTest(GET_PARAM(0)), cq_level_(GET_PARAM(1)) {
+ init_flags_ = VPX_CODEC_USE_PSNR;
+ }
+
virtual ~CQTest() {}
virtual void SetUp() {
@@ -100,7 +105,7 @@ TEST_P(CQTest, LinearPSNRIsHigherForCQLevel) {
EXPECT_GE(cq_psnr_lin, vbr_psnr_lin);
}
-INSTANTIATE_TEST_CASE_P(CQLevelRange, CQTest,
- ::testing::Range(kCQLevelMin, kCQLevelMax,
- kCQLevelStep));
+VP8_INSTANTIATE_TEST_CASE(CQTest,
+ ::testing::Range(kCQLevelMin, kCQLevelMax,
+ kCQLevelStep));
} // namespace
diff --git a/libvpx/test/datarate_test.cc b/libvpx/test/datarate_test.cc
index 6fbcb64..85eeafb 100644
--- a/libvpx/test/datarate_test.cc
+++ b/libvpx/test/datarate_test.cc
@@ -7,17 +7,23 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
#include "test/encode_test_driver.h"
#include "test/i420_video_source.h"
-#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/util.h"
+
namespace {
class DatarateTest : public ::libvpx_test::EncoderTest,
- public ::testing::TestWithParam<enum libvpx_test::TestMode> {
+ public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
+ public:
+ DatarateTest() : EncoderTest(GET_PARAM(0)) {}
+
protected:
virtual void SetUp() {
InitializeConfig();
- SetMode(GetParam());
+ SetMode(GET_PARAM(1));
ResetModel();
}
@@ -174,5 +180,6 @@ TEST_P(DatarateTest, ChangingDropFrameThresh) {
}
}
-INSTANTIATE_TEST_CASE_P(AllModes, DatarateTest, ALL_TEST_MODES);
+VP8_INSTANTIATE_TEST_CASE(DatarateTest, ALL_TEST_MODES);
+
} // namespace
diff --git a/libvpx/test/dct16x16_test.cc b/libvpx/test/dct16x16_test.cc
new file mode 100644
index 0000000..9fb45d6
--- /dev/null
+++ b/libvpx/test/dct16x16_test.cc
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
+extern "C" {
+#include "vp9/common/vp9_entropy.h"
+#include "vp9_rtcd.h"
+void vp9_short_idct16x16_add_c(short *input, uint8_t *output, int pitch);
+}
+
+#include "acm_random.h"
+#include "vpx/vpx_integer.h"
+
+using libvpx_test::ACMRandom;
+
+namespace {
+
+#ifdef _MSC_VER
+static int round(double x) {
+ if (x < 0)
+ return (int)ceil(x - 0.5);
+ else
+ return (int)floor(x + 0.5);
+}
+#endif
+
+const double PI = 3.1415926535898;
+void reference2_16x16_idct_2d(double *input, double *output) {
+ double x;
+ for (int l = 0; l < 16; ++l) {
+ for (int k = 0; k < 16; ++k) {
+ double s = 0;
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ x=cos(PI*j*(l+0.5)/16.0)*cos(PI*i*(k+0.5)/16.0)*input[i*16+j]/256;
+ if (i != 0)
+ x *= sqrt(2.0);
+ if (j != 0)
+ x *= sqrt(2.0);
+ s += x;
+ }
+ }
+ output[k*16+l] = s;
+ }
+ }
+}
+
+
+static const double C1 = 0.995184726672197;
+static const double C2 = 0.98078528040323;
+static const double C3 = 0.956940335732209;
+static const double C4 = 0.923879532511287;
+static const double C5 = 0.881921264348355;
+static const double C6 = 0.831469612302545;
+static const double C7 = 0.773010453362737;
+static const double C8 = 0.707106781186548;
+static const double C9 = 0.634393284163646;
+static const double C10 = 0.555570233019602;
+static const double C11 = 0.471396736825998;
+static const double C12 = 0.38268343236509;
+static const double C13 = 0.290284677254462;
+static const double C14 = 0.195090322016128;
+static const double C15 = 0.098017140329561;
+
+static void butterfly_16x16_dct_1d(double input[16], double output[16]) {
+ double step[16];
+ double intermediate[16];
+ double temp1, temp2;
+
+ // step 1
+ step[ 0] = input[0] + input[15];
+ step[ 1] = input[1] + input[14];
+ step[ 2] = input[2] + input[13];
+ step[ 3] = input[3] + input[12];
+ step[ 4] = input[4] + input[11];
+ step[ 5] = input[5] + input[10];
+ step[ 6] = input[6] + input[ 9];
+ step[ 7] = input[7] + input[ 8];
+ step[ 8] = input[7] - input[ 8];
+ step[ 9] = input[6] - input[ 9];
+ step[10] = input[5] - input[10];
+ step[11] = input[4] - input[11];
+ step[12] = input[3] - input[12];
+ step[13] = input[2] - input[13];
+ step[14] = input[1] - input[14];
+ step[15] = input[0] - input[15];
+
+ // step 2
+ output[0] = step[0] + step[7];
+ output[1] = step[1] + step[6];
+ output[2] = step[2] + step[5];
+ output[3] = step[3] + step[4];
+ output[4] = step[3] - step[4];
+ output[5] = step[2] - step[5];
+ output[6] = step[1] - step[6];
+ output[7] = step[0] - step[7];
+
+ temp1 = step[ 8]*C7;
+ temp2 = step[15]*C9;
+ output[ 8] = temp1 + temp2;
+
+ temp1 = step[ 9]*C11;
+ temp2 = step[14]*C5;
+ output[ 9] = temp1 - temp2;
+
+ temp1 = step[10]*C3;
+ temp2 = step[13]*C13;
+ output[10] = temp1 + temp2;
+
+ temp1 = step[11]*C15;
+ temp2 = step[12]*C1;
+ output[11] = temp1 - temp2;
+
+ temp1 = step[11]*C1;
+ temp2 = step[12]*C15;
+ output[12] = temp2 + temp1;
+
+ temp1 = step[10]*C13;
+ temp2 = step[13]*C3;
+ output[13] = temp2 - temp1;
+
+ temp1 = step[ 9]*C5;
+ temp2 = step[14]*C11;
+ output[14] = temp2 + temp1;
+
+ temp1 = step[ 8]*C9;
+ temp2 = step[15]*C7;
+ output[15] = temp2 - temp1;
+
+ // step 3
+ step[ 0] = output[0] + output[3];
+ step[ 1] = output[1] + output[2];
+ step[ 2] = output[1] - output[2];
+ step[ 3] = output[0] - output[3];
+
+ temp1 = output[4]*C14;
+ temp2 = output[7]*C2;
+ step[ 4] = temp1 + temp2;
+
+ temp1 = output[5]*C10;
+ temp2 = output[6]*C6;
+ step[ 5] = temp1 + temp2;
+
+ temp1 = output[5]*C6;
+ temp2 = output[6]*C10;
+ step[ 6] = temp2 - temp1;
+
+ temp1 = output[4]*C2;
+ temp2 = output[7]*C14;
+ step[ 7] = temp2 - temp1;
+
+ step[ 8] = output[ 8] + output[11];
+ step[ 9] = output[ 9] + output[10];
+ step[10] = output[ 9] - output[10];
+ step[11] = output[ 8] - output[11];
+
+ step[12] = output[12] + output[15];
+ step[13] = output[13] + output[14];
+ step[14] = output[13] - output[14];
+ step[15] = output[12] - output[15];
+
+ // step 4
+ output[ 0] = (step[ 0] + step[ 1]);
+ output[ 8] = (step[ 0] - step[ 1]);
+
+ temp1 = step[2]*C12;
+ temp2 = step[3]*C4;
+ temp1 = temp1 + temp2;
+ output[ 4] = 2*(temp1*C8);
+
+ temp1 = step[2]*C4;
+ temp2 = step[3]*C12;
+ temp1 = temp2 - temp1;
+ output[12] = 2*(temp1*C8);
+
+ output[ 2] = 2*((step[4] + step[ 5])*C8);
+ output[14] = 2*((step[7] - step[ 6])*C8);
+
+ temp1 = step[4] - step[5];
+ temp2 = step[6] + step[7];
+ output[ 6] = (temp1 + temp2);
+ output[10] = (temp1 - temp2);
+
+ intermediate[8] = step[8] + step[14];
+ intermediate[9] = step[9] + step[15];
+
+ temp1 = intermediate[8]*C12;
+ temp2 = intermediate[9]*C4;
+ temp1 = temp1 - temp2;
+ output[3] = 2*(temp1*C8);
+
+ temp1 = intermediate[8]*C4;
+ temp2 = intermediate[9]*C12;
+ temp1 = temp2 + temp1;
+ output[13] = 2*(temp1*C8);
+
+ output[ 9] = 2*((step[10] + step[11])*C8);
+
+ intermediate[11] = step[10] - step[11];
+ intermediate[12] = step[12] + step[13];
+ intermediate[13] = step[12] - step[13];
+ intermediate[14] = step[ 8] - step[14];
+ intermediate[15] = step[ 9] - step[15];
+
+ output[15] = (intermediate[11] + intermediate[12]);
+ output[ 1] = -(intermediate[11] - intermediate[12]);
+
+ output[ 7] = 2*(intermediate[13]*C8);
+
+ temp1 = intermediate[14]*C12;
+ temp2 = intermediate[15]*C4;
+ temp1 = temp1 - temp2;
+ output[11] = -2*(temp1*C8);
+
+ temp1 = intermediate[14]*C4;
+ temp2 = intermediate[15]*C12;
+ temp1 = temp2 + temp1;
+ output[ 5] = 2*(temp1*C8);
+}
+
+static void reference_16x16_dct_1d(double in[16], double out[16]) {
+ const double kPi = 3.141592653589793238462643383279502884;
+ const double kInvSqrt2 = 0.707106781186547524400844362104;
+ for (int k = 0; k < 16; k++) {
+ out[k] = 0.0;
+ for (int n = 0; n < 16; n++)
+ out[k] += in[n]*cos(kPi*(2*n+1)*k/32.0);
+ if (k == 0)
+ out[k] = out[k]*kInvSqrt2;
+ }
+}
+
+void reference_16x16_dct_2d(int16_t input[16*16], double output[16*16]) {
+ // First transform columns
+ for (int i = 0; i < 16; ++i) {
+ double temp_in[16], temp_out[16];
+ for (int j = 0; j < 16; ++j)
+ temp_in[j] = input[j*16 + i];
+ butterfly_16x16_dct_1d(temp_in, temp_out);
+ for (int j = 0; j < 16; ++j)
+ output[j*16 + i] = temp_out[j];
+ }
+ // Then transform rows
+ for (int i = 0; i < 16; ++i) {
+ double temp_in[16], temp_out[16];
+ for (int j = 0; j < 16; ++j)
+ temp_in[j] = output[j + i*16];
+ butterfly_16x16_dct_1d(temp_in, temp_out);
+ // Scale by some magic number
+ for (int j = 0; j < 16; ++j)
+ output[j + i*16] = temp_out[j]/2;
+ }
+}
+
+
+TEST(VP9Idct16x16Test, AccuracyCheck) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 1000;
+ for (int i = 0; i < count_test_block; ++i) {
+ int16_t in[256], coeff[256];
+ uint8_t dst[256], src[256];
+ double out_r[256];
+
+ for (int j = 0; j < 256; ++j) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ }
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < 256; ++j)
+ in[j] = src[j] - dst[j];
+
+ reference_16x16_dct_2d(in, out_r);
+ for (int j = 0; j < 256; j++)
+ coeff[j] = round(out_r[j]);
+ vp9_short_idct16x16_add_c(coeff, dst, 16);
+ for (int j = 0; j < 256; ++j) {
+ const int diff = dst[j] - src[j];
+ const int error = diff * diff;
+ EXPECT_GE(1, error)
+ << "Error: 16x16 IDCT has error " << error
+ << " at index " << j;
+ }
+ }
+}
+
+// we need enable fdct test once we re-do the 16 point fdct.
+TEST(VP9Fdct16x16Test, AccuracyCheck) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ int max_error = 0;
+ double total_error = 0;
+ const int count_test_block = 1000;
+ for (int i = 0; i < count_test_block; ++i) {
+ int16_t test_input_block[256];
+ int16_t test_temp_block[256];
+ uint8_t dst[256], src[256];
+
+ for (int j = 0; j < 256; ++j) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ }
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < 256; ++j)
+ test_input_block[j] = src[j] - dst[j];
+
+ const int pitch = 32;
+ vp9_short_fdct16x16_c(test_input_block, test_temp_block, pitch);
+ vp9_short_idct16x16_add_c(test_temp_block, dst, 16);
+
+ for (int j = 0; j < 256; ++j) {
+ const int diff = dst[j] - src[j];
+ const int error = diff * diff;
+ if (max_error < error)
+ max_error = error;
+ total_error += error;
+ }
+ }
+
+ EXPECT_GE(1, max_error)
+ << "Error: 16x16 FDCT/IDCT has an individual round trip error > 1";
+
+ EXPECT_GE(count_test_block , total_error)
+ << "Error: 16x16 FDCT/IDCT has average round trip error > 1 per block";
+}
+
+TEST(VP9Fdct16x16Test, CoeffSizeCheck) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 1000;
+ for (int i = 0; i < count_test_block; ++i) {
+ int16_t input_block[256], input_extreme_block[256];
+ int16_t output_block[256], output_extreme_block[256];
+
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < 256; ++j) {
+ input_block[j] = rnd.Rand8() - rnd.Rand8();
+ input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
+ }
+ if (i == 0)
+ for (int j = 0; j < 256; ++j)
+ input_extreme_block[j] = 255;
+
+ const int pitch = 32;
+ vp9_short_fdct16x16_c(input_block, output_block, pitch);
+ vp9_short_fdct16x16_c(input_extreme_block, output_extreme_block, pitch);
+
+ // The minimum quant value is 4.
+ for (int j = 0; j < 256; ++j) {
+ EXPECT_GE(4*DCT_MAX_VALUE, abs(output_block[j]))
+ << "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
+ EXPECT_GE(4*DCT_MAX_VALUE, abs(output_extreme_block[j]))
+ << "Error: 16x16 FDCT extreme has coefficient larger than 4*DCT_MAX_VALUE";
+ }
+ }
+}
+} // namespace
diff --git a/libvpx/test/dct32x32_test.cc b/libvpx/test/dct32x32_test.cc
new file mode 100644
index 0000000..e05d482
--- /dev/null
+++ b/libvpx/test/dct32x32_test.cc
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
+extern "C" {
+#include "vp9/common/vp9_entropy.h"
+#include "./vp9_rtcd.h"
+ void vp9_short_fdct32x32_c(int16_t *input, int16_t *out, int pitch);
+ void vp9_short_idct32x32_add_c(short *input, uint8_t *output, int pitch);
+}
+
+#include "test/acm_random.h"
+#include "vpx/vpx_integer.h"
+
+using libvpx_test::ACMRandom;
+
+namespace {
+#ifdef _MSC_VER
+static int round(double x) {
+ if (x < 0)
+ return (int)ceil(x - 0.5);
+ else
+ return (int)floor(x + 0.5);
+}
+#endif
+
+static const double kPi = 3.141592653589793238462643383279502884;
+static void reference2_32x32_idct_2d(double *input, double *output) {
+ double x;
+ for (int l = 0; l < 32; ++l) {
+ for (int k = 0; k < 32; ++k) {
+ double s = 0;
+ for (int i = 0; i < 32; ++i) {
+ for (int j = 0; j < 32; ++j) {
+ x = cos(kPi * j * (l + 0.5) / 32.0) *
+ cos(kPi * i * (k + 0.5) / 32.0) * input[i * 32 + j] / 1024;
+ if (i != 0)
+ x *= sqrt(2.0);
+ if (j != 0)
+ x *= sqrt(2.0);
+ s += x;
+ }
+ }
+ output[k * 32 + l] = s / 4;
+ }
+ }
+}
+
+static void reference_32x32_dct_1d(double in[32], double out[32], int stride) {
+ const double kInvSqrt2 = 0.707106781186547524400844362104;
+ for (int k = 0; k < 32; k++) {
+ out[k] = 0.0;
+ for (int n = 0; n < 32; n++)
+ out[k] += in[n] * cos(kPi * (2 * n + 1) * k / 64.0);
+ if (k == 0)
+ out[k] = out[k] * kInvSqrt2;
+ }
+}
+
+static void reference_32x32_dct_2d(int16_t input[32*32], double output[32*32]) {
+ // First transform columns
+ for (int i = 0; i < 32; ++i) {
+ double temp_in[32], temp_out[32];
+ for (int j = 0; j < 32; ++j)
+ temp_in[j] = input[j*32 + i];
+ reference_32x32_dct_1d(temp_in, temp_out, 1);
+ for (int j = 0; j < 32; ++j)
+ output[j * 32 + i] = temp_out[j];
+ }
+ // Then transform rows
+ for (int i = 0; i < 32; ++i) {
+ double temp_in[32], temp_out[32];
+ for (int j = 0; j < 32; ++j)
+ temp_in[j] = output[j + i*32];
+ reference_32x32_dct_1d(temp_in, temp_out, 1);
+ // Scale by some magic number
+ for (int j = 0; j < 32; ++j)
+ output[j + i * 32] = temp_out[j] / 4;
+ }
+}
+
+TEST(VP9Idct32x32Test, AccuracyCheck) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 1000;
+ for (int i = 0; i < count_test_block; ++i) {
+ int16_t in[1024], coeff[1024];
+ uint8_t dst[1024], src[1024];
+ double out_r[1024];
+
+ for (int j = 0; j < 1024; ++j) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ }
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < 1024; ++j)
+ in[j] = src[j] - dst[j];
+
+ reference_32x32_dct_2d(in, out_r);
+ for (int j = 0; j < 1024; j++)
+ coeff[j] = round(out_r[j]);
+ vp9_short_idct32x32_add_c(coeff, dst, 32);
+ for (int j = 0; j < 1024; ++j) {
+ const int diff = dst[j] - src[j];
+ const int error = diff * diff;
+ EXPECT_GE(1, error)
+ << "Error: 32x32 IDCT has error " << error
+ << " at index " << j;
+ }
+ }
+}
+
+TEST(VP9Fdct32x32Test, AccuracyCheck) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ unsigned int max_error = 0;
+ int64_t total_error = 0;
+ const int count_test_block = 1000;
+ for (int i = 0; i < count_test_block; ++i) {
+ int16_t test_input_block[1024];
+ int16_t test_temp_block[1024];
+ uint8_t dst[1024], src[1024];
+
+ for (int j = 0; j < 1024; ++j) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ }
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < 1024; ++j)
+ test_input_block[j] = src[j] - dst[j];
+
+ const int pitch = 64;
+ vp9_short_fdct32x32_c(test_input_block, test_temp_block, pitch);
+ vp9_short_idct32x32_add_c(test_temp_block, dst, 32);
+
+ for (int j = 0; j < 1024; ++j) {
+ const unsigned diff = dst[j] - src[j];
+ const unsigned error = diff * diff;
+ if (max_error < error)
+ max_error = error;
+ total_error += error;
+ }
+ }
+
+ EXPECT_GE(1u, max_error)
+ << "Error: 32x32 FDCT/IDCT has an individual roundtrip error > 1";
+
+ EXPECT_GE(count_test_block, total_error)
+ << "Error: 32x32 FDCT/IDCT has average roundtrip error > 1 per block";
+}
+
+TEST(VP9Fdct32x32Test, CoeffSizeCheck) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 1000;
+ for (int i = 0; i < count_test_block; ++i) {
+ int16_t input_block[1024], input_extreme_block[1024];
+ int16_t output_block[1024], output_extreme_block[1024];
+
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < 1024; ++j) {
+ input_block[j] = rnd.Rand8() - rnd.Rand8();
+ input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
+ }
+ if (i == 0)
+ for (int j = 0; j < 1024; ++j)
+ input_extreme_block[j] = 255;
+
+ const int pitch = 64;
+ vp9_short_fdct32x32_c(input_block, output_block, pitch);
+ vp9_short_fdct32x32_c(input_extreme_block, output_extreme_block, pitch);
+
+ // The minimum quant value is 4.
+ for (int j = 0; j < 1024; ++j) {
+ EXPECT_GE(4*DCT_MAX_VALUE, abs(output_block[j]))
+ << "Error: 32x32 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
+ EXPECT_GE(4*DCT_MAX_VALUE, abs(output_extreme_block[j]))
+ << "Error: 32x32 FDCT extreme has coefficient larger than "
+ "4*DCT_MAX_VALUE";
+ }
+ }
+}
+} // namespace
diff --git a/libvpx/test/decode_test_driver.cc b/libvpx/test/decode_test_driver.cc
index 3610f02..1f6d540 100644
--- a/libvpx/test/decode_test_driver.cc
+++ b/libvpx/test/decode_test_driver.cc
@@ -7,40 +7,41 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
+#include "test/codec_factory.h"
#include "test/decode_test_driver.h"
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/register_state_check.h"
#include "test/video_source.h"
namespace libvpx_test {
-#if CONFIG_VP8_DECODER
-void Decoder::DecodeFrame(const uint8_t *cxdata, int size) {
- if (!decoder_.priv) {
- const vpx_codec_err_t res_init = vpx_codec_dec_init(&decoder_,
- &vpx_codec_vp8_dx_algo,
- &cfg_, 0);
- ASSERT_EQ(VPX_CODEC_OK, res_init) << DecodeError();
- }
- const vpx_codec_err_t res_dec = vpx_codec_decode(&decoder_,
- cxdata, size, NULL, 0);
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << DecodeError();
+vpx_codec_err_t Decoder::DecodeFrame(const uint8_t *cxdata, int size) {
+ vpx_codec_err_t res_dec;
+ InitOnce();
+ REGISTER_STATE_CHECK(res_dec = vpx_codec_decode(&decoder_,
+ cxdata, size, NULL, 0));
+ return res_dec;
}
void DecoderTest::RunLoop(CompressedVideoSource *video) {
vpx_codec_dec_cfg_t dec_cfg = {0};
- Decoder decoder(dec_cfg, 0);
+ Decoder* const decoder = codec_->CreateDecoder(dec_cfg, 0);
+ ASSERT_TRUE(decoder != NULL);
// Decode frames.
for (video->Begin(); video->cxdata(); video->Next()) {
- decoder.DecodeFrame(video->cxdata(), video->frame_size());
+ vpx_codec_err_t res_dec = decoder->DecodeFrame(video->cxdata(),
+ video->frame_size());
+ ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
- DxDataIterator dec_iter = decoder.GetDxData();
+ DxDataIterator dec_iter = decoder->GetDxData();
const vpx_image_t *img = NULL;
// Get decompressed data
while ((img = dec_iter.Next()))
DecompressedFrameHook(*img, video->frame_number());
}
+
+ delete decoder;
}
-#endif
} // namespace libvpx_test
diff --git a/libvpx/test/decode_test_driver.h b/libvpx/test/decode_test_driver.h
index 6408bee..49e7384 100644
--- a/libvpx/test/decode_test_driver.h
+++ b/libvpx/test/decode_test_driver.h
@@ -14,10 +14,10 @@
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "vpx_config.h"
#include "vpx/vpx_decoder.h"
-#include "vpx/vp8dx.h"
namespace libvpx_test {
+class CodecFactory;
class CompressedVideoSource;
// Provides an object to handle decoding output
@@ -42,15 +42,15 @@ class DxDataIterator {
class Decoder {
public:
Decoder(vpx_codec_dec_cfg_t cfg, unsigned long deadline)
- : cfg_(cfg), deadline_(deadline) {
+ : cfg_(cfg), deadline_(deadline), init_done_(false) {
memset(&decoder_, 0, sizeof(decoder_));
}
- ~Decoder() {
+ virtual ~Decoder() {
vpx_codec_destroy(&decoder_);
}
- void DecodeFrame(const uint8_t *cxdata, int size);
+ vpx_codec_err_t DecodeFrame(const uint8_t *cxdata, int size);
DxDataIterator GetDxData() {
return DxDataIterator(&decoder_);
@@ -61,25 +61,45 @@ class Decoder {
}
void Control(int ctrl_id, int arg) {
+ InitOnce();
const vpx_codec_err_t res = vpx_codec_control_(&decoder_, ctrl_id, arg);
ASSERT_EQ(VPX_CODEC_OK, res) << DecodeError();
}
- protected:
- const char *DecodeError() {
+ void Control(int ctrl_id, const void *arg) {
+ InitOnce();
+ const vpx_codec_err_t res = vpx_codec_control_(&decoder_, ctrl_id, arg);
+ ASSERT_EQ(VPX_CODEC_OK, res) << DecodeError();
+ }
+
+ const char* DecodeError() {
const char *detail = vpx_codec_error_detail(&decoder_);
return detail ? detail : vpx_codec_error(&decoder_);
}
+ protected:
+ virtual const vpx_codec_iface_t* CodecInterface() const = 0;
+
+ void InitOnce() {
+ if (!init_done_) {
+ const vpx_codec_err_t res = vpx_codec_dec_init(&decoder_,
+ CodecInterface(),
+ &cfg_, 0);
+ ASSERT_EQ(VPX_CODEC_OK, res) << DecodeError();
+ init_done_ = true;
+ }
+ }
+
vpx_codec_ctx_t decoder_;
vpx_codec_dec_cfg_t cfg_;
unsigned int deadline_;
+ bool init_done_;
};
// Common test functionality for all Decoder tests.
class DecoderTest {
public:
- // Main loop.
+ // Main decoding loop
virtual void RunLoop(CompressedVideoSource *video);
// Hook to be called on every decompressed frame.
@@ -87,9 +107,11 @@ class DecoderTest {
const unsigned int frame_number) {}
protected:
- DecoderTest() {}
+ explicit DecoderTest(const CodecFactory *codec) : codec_(codec) {}
virtual ~DecoderTest() {}
+
+ const CodecFactory *codec_;
};
} // namespace libvpx_test
diff --git a/libvpx/test/encode_test_driver.cc b/libvpx/test/encode_test_driver.cc
index ebb3959..eed3e33 100644
--- a/libvpx/test/encode_test_driver.cc
+++ b/libvpx/test/encode_test_driver.cc
@@ -7,11 +7,12 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
+
#include "vpx_config.h"
+#include "test/codec_factory.h"
#include "test/encode_test_driver.h"
-#if CONFIG_VP8_DECODER
#include "test/decode_test_driver.h"
-#endif
+#include "test/register_state_check.h"
#include "test/video_source.h"
#include "third_party/googletest/src/include/gtest/gtest.h"
@@ -44,7 +45,7 @@ void Encoder::EncodeFrameInternal(const VideoSource &video,
cfg_.g_h = img->d_h;
cfg_.g_timebase = video.timebase();
cfg_.rc_twopass_stats_in = stats_->buf();
- res = vpx_codec_enc_init(&encoder_, &vpx_codec_vp8_cx_algo, &cfg_,
+ res = vpx_codec_enc_init(&encoder_, CodecInterface(), &cfg_,
init_flags_);
ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
}
@@ -58,9 +59,10 @@ void Encoder::EncodeFrameInternal(const VideoSource &video,
}
// Encode the frame
- res = vpx_codec_encode(&encoder_,
- video.img(), video.pts(), video.duration(),
- frame_flags, deadline_);
+ REGISTER_STATE_CHECK(
+ res = vpx_codec_encode(&encoder_,
+ video.img(), video.pts(), video.duration(),
+ frame_flags, deadline_));
ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
}
@@ -70,6 +72,11 @@ void Encoder::Flush() {
ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
}
+void EncoderTest::InitializeConfig() {
+ const vpx_codec_err_t res = codec_->DefaultEncoderConfig(&cfg_, 0);
+ ASSERT_EQ(VPX_CODEC_OK, res);
+}
+
void EncoderTest::SetMode(TestMode mode) {
switch (mode) {
case kRealTime:
@@ -123,13 +130,17 @@ static bool compare_img(const vpx_image_t *img1,
return match;
}
+void EncoderTest::MismatchHook(const vpx_image_t *img1,
+ const vpx_image_t *img2) {
+ ASSERT_TRUE(0) << "Encode/Decode mismatch found";
+}
+
void EncoderTest::RunLoop(VideoSource *video) {
-#if CONFIG_VP8_DECODER
vpx_codec_dec_cfg_t dec_cfg = {0};
-#endif
stats_.Reset();
+ ASSERT_TRUE(passes_ == 1 || passes_ == 2);
for (unsigned int pass = 0; pass < passes_; pass++) {
last_pts_ = 0;
@@ -141,31 +152,34 @@ void EncoderTest::RunLoop(VideoSource *video) {
cfg_.g_pass = VPX_RC_LAST_PASS;
BeginPassHook(pass);
- Encoder encoder(cfg_, deadline_, init_flags_, &stats_);
-#if CONFIG_VP8_DECODER
- Decoder decoder(dec_cfg, 0);
- bool has_cxdata = false;
-#endif
+ Encoder* const encoder = codec_->CreateEncoder(cfg_, deadline_, init_flags_,
+ &stats_);
+ ASSERT_TRUE(encoder != NULL);
+ Decoder* const decoder = codec_->CreateDecoder(dec_cfg, 0);
bool again;
for (again = true, video->Begin(); again; video->Next()) {
again = video->img() != NULL;
PreEncodeFrameHook(video);
- PreEncodeFrameHook(video, &encoder);
- encoder.EncodeFrame(video, frame_flags_);
+ PreEncodeFrameHook(video, encoder);
+ encoder->EncodeFrame(video, frame_flags_);
- CxDataIterator iter = encoder.GetCxData();
+ CxDataIterator iter = encoder->GetCxData();
+ bool has_cxdata = false;
+ bool has_dxdata = false;
while (const vpx_codec_cx_pkt_t *pkt = iter.Next()) {
+ pkt = MutateEncoderOutputHook(pkt);
again = true;
-
switch (pkt->kind) {
case VPX_CODEC_CX_FRAME_PKT:
-#if CONFIG_VP8_DECODER
has_cxdata = true;
- decoder.DecodeFrame((const uint8_t*)pkt->data.frame.buf,
- pkt->data.frame.sz);
-#endif
+ if (decoder && DoDecode()) {
+ vpx_codec_err_t res_dec = decoder->DecodeFrame(
+ (const uint8_t*)pkt->data.frame.buf, pkt->data.frame.sz);
+ ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
+ has_dxdata = true;
+ }
ASSERT_GE(pkt->data.frame.pts, last_pts_);
last_pts_ = pkt->data.frame.pts;
FramePktHook(pkt);
@@ -180,25 +194,32 @@ void EncoderTest::RunLoop(VideoSource *video) {
}
}
-#if CONFIG_VP8_DECODER
- if (has_cxdata) {
- const vpx_image_t *img_enc = encoder.GetPreviewFrame();
- DxDataIterator dec_iter = decoder.GetDxData();
+ if (has_dxdata && has_cxdata) {
+ const vpx_image_t *img_enc = encoder->GetPreviewFrame();
+ DxDataIterator dec_iter = decoder->GetDxData();
const vpx_image_t *img_dec = dec_iter.Next();
- if(img_enc && img_dec) {
+ if (img_enc && img_dec) {
const bool res = compare_img(img_enc, img_dec);
- ASSERT_TRUE(res)<< "Encoder/Decoder mismatch found.";
+ if (!res) { // Mismatch
+ MismatchHook(img_enc, img_dec);
+ }
}
+ if (img_dec)
+ DecompressedFrameHook(*img_dec, video->pts());
}
-#endif
if (!Continue())
break;
}
EndPassHook();
+ if (decoder)
+ delete decoder;
+ delete encoder;
+
if (!Continue())
break;
}
}
+
} // namespace libvpx_test
diff --git a/libvpx/test/encode_test_driver.h b/libvpx/test/encode_test_driver.h
index 0141fa9..6aeb96b 100644
--- a/libvpx/test/encode_test_driver.h
+++ b/libvpx/test/encode_test_driver.h
@@ -9,14 +9,17 @@
*/
#ifndef TEST_ENCODE_TEST_DRIVER_H_
#define TEST_ENCODE_TEST_DRIVER_H_
+
#include <string>
#include <vector>
+
+#include "./vpx_config.h"
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "vpx/vpx_encoder.h"
-#include "vpx/vp8cx.h"
namespace libvpx_test {
+class CodecFactory;
class VideoSource;
enum TestMode {
@@ -36,12 +39,15 @@ enum TestMode {
::libvpx_test::kOnePassGood, \
::libvpx_test::kOnePassBest)
+#define TWO_PASS_TEST_MODES ::testing::Values(::libvpx_test::kTwoPassGood, \
+ ::libvpx_test::kTwoPassBest)
+
// Provides an object to handle the libvpx get_cx_data() iteration pattern
class CxDataIterator {
public:
explicit CxDataIterator(vpx_codec_ctx_t *encoder)
- : encoder_(encoder), iter_(NULL) {}
+ : encoder_(encoder), iter_(NULL) {}
const vpx_codec_cx_pkt_t *Next() {
return vpx_codec_get_cx_data(encoder_, &iter_);
@@ -83,11 +89,11 @@ class Encoder {
public:
Encoder(vpx_codec_enc_cfg_t cfg, unsigned long deadline,
const unsigned long init_flags, TwopassStatsStore *stats)
- : cfg_(cfg), deadline_(deadline), init_flags_(init_flags), stats_(stats) {
+ : cfg_(cfg), deadline_(deadline), init_flags_(init_flags), stats_(stats) {
memset(&encoder_, 0, sizeof(encoder_));
}
- ~Encoder() {
+ virtual ~Encoder() {
vpx_codec_destroy(&encoder_);
}
@@ -112,11 +118,18 @@ class Encoder {
ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
}
+ void Control(int ctrl_id, struct vpx_scaling_mode *arg) {
+ const vpx_codec_err_t res = vpx_codec_control_(&encoder_, ctrl_id, arg);
+ ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+ }
+
void set_deadline(unsigned long deadline) {
deadline_ = deadline;
}
protected:
+ virtual const vpx_codec_iface_t* CodecInterface() const = 0;
+
const char *EncoderError() {
const char *detail = vpx_codec_error_detail(&encoder_);
return detail ? detail : vpx_codec_error(&encoder_);
@@ -145,22 +158,19 @@ class Encoder {
// classes directly, so that tests can be parameterized differently.
class EncoderTest {
protected:
- EncoderTest() : abort_(false), init_flags_(0), frame_flags_(0),
- last_pts_(0) {}
+ explicit EncoderTest(const CodecFactory *codec)
+ : codec_(codec), abort_(false), init_flags_(0), frame_flags_(0),
+ last_pts_(0) {}
virtual ~EncoderTest() {}
// Initialize the cfg_ member with the default configuration.
- void InitializeConfig() {
- const vpx_codec_err_t res = vpx_codec_enc_config_default(
- &vpx_codec_vp8_cx_algo, &cfg_, 0);
- ASSERT_EQ(VPX_CODEC_OK, res);
- }
+ void InitializeConfig();
// Map the TestMode enum to the deadline_ and passes_ variables.
void SetMode(TestMode mode);
- // Main loop.
+ // Main loop
virtual void RunLoop(VideoSource *video);
// Hook to be called at the beginning of a pass.
@@ -182,6 +192,24 @@ class EncoderTest {
// Hook to determine whether the encode loop should continue.
virtual bool Continue() const { return !abort_; }
+ const CodecFactory *codec_;
+ // Hook to determine whether to decode frame after encoding
+ virtual bool DoDecode() const { return 1; }
+
+ // Hook to handle encode/decode mismatch
+ virtual void MismatchHook(const vpx_image_t *img1,
+ const vpx_image_t *img2);
+
+ // Hook to be called on every decompressed frame.
+ virtual void DecompressedFrameHook(const vpx_image_t& img,
+ vpx_codec_pts_t pts) {}
+
+ // Hook that can modify the encoder's output data
+ virtual const vpx_codec_cx_pkt_t * MutateEncoderOutputHook(
+ const vpx_codec_cx_pkt_t *pkt) {
+ return pkt;
+ }
+
bool abort_;
vpx_codec_enc_cfg_t cfg_;
unsigned int passes_;
diff --git a/libvpx/test/error_resilience_test.cc b/libvpx/test/error_resilience_test.cc
index 25c6731..ddfbd0f 100644
--- a/libvpx/test/error_resilience_test.cc
+++ b/libvpx/test/error_resilience_test.cc
@@ -7,22 +7,37 @@
in the file PATENTS. All contributing project authors may
be found in the AUTHORS file in the root of the source tree.
*/
+
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
#include "test/encode_test_driver.h"
#include "test/i420_video_source.h"
+#include "test/util.h"
namespace {
-class ErrorResilienceTest : public libvpx_test::EncoderTest,
- public ::testing::TestWithParam<int> {
+const int kMaxErrorFrames = 8;
+const int kMaxDroppableFrames = 8;
+
+class ErrorResilienceTest : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
protected:
- ErrorResilienceTest() {
- psnr_ = 0.0;
- nframes_ = 0;
- encoding_mode_ = static_cast<libvpx_test::TestMode>(GetParam());
+ ErrorResilienceTest() : EncoderTest(GET_PARAM(0)),
+ psnr_(0.0),
+ nframes_(0),
+ mismatch_psnr_(0.0),
+ mismatch_nframes_(0),
+ encoding_mode_(GET_PARAM(1)) {
+ Reset();
}
+
virtual ~ErrorResilienceTest() {}
+ void Reset() {
+ error_nframes_ = 0;
+ droppable_nframes_ = 0;
+ }
+
virtual void SetUp() {
InitializeConfig();
SetMode(encoding_mode_);
@@ -31,6 +46,8 @@ class ErrorResilienceTest : public libvpx_test::EncoderTest,
virtual void BeginPassHook(unsigned int /*pass*/) {
psnr_ = 0.0;
nframes_ = 0;
+ mismatch_psnr_ = 0.0;
+ mismatch_nframes_ = 0;
}
virtual bool Continue() const {
@@ -42,15 +59,92 @@ class ErrorResilienceTest : public libvpx_test::EncoderTest,
nframes_++;
}
+ virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video) {
+ frame_flags_ &= ~(VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF);
+ if (droppable_nframes_ > 0 &&
+ (cfg_.g_pass == VPX_RC_LAST_PASS || cfg_.g_pass == VPX_RC_ONE_PASS)) {
+ for (unsigned int i = 0; i < droppable_nframes_; ++i) {
+ if (droppable_frames_[i] == nframes_) {
+ std::cout << " Encoding droppable frame: "
+ << droppable_frames_[i] << "\n";
+ frame_flags_ |= (VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF);
+ return;
+ }
+ }
+ }
+ }
+
double GetAveragePsnr() const {
if (nframes_)
return psnr_ / nframes_;
return 0.0;
}
+ double GetAverageMismatchPsnr() const {
+ if (mismatch_nframes_)
+ return mismatch_psnr_ / mismatch_nframes_;
+ return 0.0;
+ }
+
+ virtual bool DoDecode() const {
+ if (error_nframes_ > 0 &&
+ (cfg_.g_pass == VPX_RC_LAST_PASS || cfg_.g_pass == VPX_RC_ONE_PASS)) {
+ for (unsigned int i = 0; i < error_nframes_; ++i) {
+ if (error_frames_[i] == nframes_ - 1) {
+ std::cout << " Skipping decoding frame: "
+ << error_frames_[i] << "\n";
+ return 0;
+ }
+ }
+ }
+ return 1;
+ }
+
+ virtual void MismatchHook(const vpx_image_t *img1,
+ const vpx_image_t *img2) {
+ double mismatch_psnr = compute_psnr(img1, img2);
+ mismatch_psnr_ += mismatch_psnr;
+ ++mismatch_nframes_;
+ // std::cout << "Mismatch frame psnr: " << mismatch_psnr << "\n";
+ }
+
+ void SetErrorFrames(int num, unsigned int *list) {
+ if (num > kMaxErrorFrames)
+ num = kMaxErrorFrames;
+ else if (num < 0)
+ num = 0;
+ error_nframes_ = num;
+ for (unsigned int i = 0; i < error_nframes_; ++i)
+ error_frames_[i] = list[i];
+ }
+
+ void SetDroppableFrames(int num, unsigned int *list) {
+ if (num > kMaxDroppableFrames)
+ num = kMaxDroppableFrames;
+ else if (num < 0)
+ num = 0;
+ droppable_nframes_ = num;
+ for (unsigned int i = 0; i < droppable_nframes_; ++i)
+ droppable_frames_[i] = list[i];
+ }
+
+ unsigned int GetMismatchFrames() {
+ return mismatch_nframes_;
+ }
+
private:
double psnr_;
unsigned int nframes_;
+ unsigned int error_nframes_;
+ unsigned int droppable_nframes_;
+ double mismatch_psnr_;
+ unsigned int mismatch_nframes_;
+ unsigned int error_frames_[kMaxErrorFrames];
+ unsigned int droppable_frames_[kMaxDroppableFrames];
libvpx_test::TestMode encoding_mode_;
};
@@ -85,6 +179,56 @@ TEST_P(ErrorResilienceTest, OnVersusOff) {
}
}
-INSTANTIATE_TEST_CASE_P(OnOffTest, ErrorResilienceTest,
- ONE_PASS_TEST_MODES);
+TEST_P(ErrorResilienceTest, DropFramesWithoutRecovery) {
+ const vpx_rational timebase = { 33333333, 1000000000 };
+ cfg_.g_timebase = timebase;
+ cfg_.rc_target_bitrate = 500;
+
+ init_flags_ = VPX_CODEC_USE_PSNR;
+
+ libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ timebase.den, timebase.num, 0, 30);
+
+ // Error resilient mode ON.
+ cfg_.g_error_resilient = 1;
+
+ // Set an arbitrary set of error frames same as droppable frames
+ unsigned int num_droppable_frames = 2;
+ unsigned int droppable_frame_list[] = {5, 16};
+ SetDroppableFrames(num_droppable_frames, droppable_frame_list);
+ SetErrorFrames(num_droppable_frames, droppable_frame_list);
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ // Test that no mismatches have been found
+ std::cout << " Mismatch frames: "
+ << GetMismatchFrames() << "\n";
+ EXPECT_EQ(GetMismatchFrames(), (unsigned int) 0);
+
+ // reset previously set error/droppable frames
+ Reset();
+
+#if 0
+ // TODO(jkoleszar): This test is disabled for the time being as too
+ // sensitive. It's not clear how to set a reasonable threshold for
+ // this behavior.
+
+ // Now set an arbitrary set of error frames that are non-droppable
+ unsigned int num_error_frames = 3;
+ unsigned int error_frame_list[] = {3, 10, 20};
+ SetErrorFrames(num_error_frames, error_frame_list);
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+
+ // Test that dropping an arbitrary set of inter frames does not hurt too much
+ // Note the Average Mismatch PSNR is the average of the PSNR between
+ // decoded frame and encoder's version of the same frame for all frames
+ // with mismatch.
+ const double psnr_resilience_mismatch = GetAverageMismatchPsnr();
+ std::cout << " Mismatch PSNR: "
+ << psnr_resilience_mismatch << "\n";
+ EXPECT_GT(psnr_resilience_mismatch, 20.0);
+#endif
+}
+
+VP8_INSTANTIATE_TEST_CASE(ErrorResilienceTest, ONE_PASS_TEST_MODES);
+VP9_INSTANTIATE_TEST_CASE(ErrorResilienceTest, ONE_PASS_TEST_MODES);
+
} // namespace
diff --git a/libvpx/test/fdct4x4_test.cc b/libvpx/test/fdct4x4_test.cc
index 619b23d..1c887bb 100644
--- a/libvpx/test/fdct4x4_test.cc
+++ b/libvpx/test/fdct4x4_test.cc
@@ -1,79 +1,31 @@
/*
-* Copyright (c) 2012 The WebM project authors. All Rights Reserved.
-*
-* Use of this source code is governed by a BSD-style license
-* that can be found in the LICENSE file in the root of the source
-* tree. An additional intellectual property rights grant can be found
-* in the file PATENTS. All contributing project authors may
-* be found in the AUTHORS file in the root of the source tree.
-*/
-
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
#include <math.h>
-#include <stddef.h>
-#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <sys/types.h>
+#include "third_party/googletest/src/include/gtest/gtest.h"
extern "C" {
-#include "vpx_rtcd.h"
+#include "vp9_rtcd.h"
}
-#include "test/acm_random.h"
-#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "acm_random.h"
#include "vpx/vpx_integer.h"
+using libvpx_test::ACMRandom;
namespace {
-const int cospi8sqrt2minus1 = 20091;
-const int sinpi8sqrt2 = 35468;
-
-void reference_idct4x4(const int16_t *input, int16_t *output) {
- const int16_t *ip = input;
- int16_t *op = output;
-
- for (int i = 0; i < 4; ++i) {
- const int a1 = ip[0] + ip[8];
- const int b1 = ip[0] - ip[8];
- const int temp1 = (ip[4] * sinpi8sqrt2) >> 16;
- const int temp2 = ip[12] + ((ip[12] * cospi8sqrt2minus1) >> 16);
- const int c1 = temp1 - temp2;
- const int temp3 = ip[4] + ((ip[4] * cospi8sqrt2minus1) >> 16);
- const int temp4 = (ip[12] * sinpi8sqrt2) >> 16;
- const int d1 = temp3 + temp4;
- op[0] = a1 + d1;
- op[12] = a1 - d1;
- op[4] = b1 + c1;
- op[8] = b1 - c1;
- ++ip;
- ++op;
- }
- ip = output;
- op = output;
- for (int i = 0; i < 4; ++i) {
- const int a1 = ip[0] + ip[2];
- const int b1 = ip[0] - ip[2];
- const int temp1 = (ip[1] * sinpi8sqrt2) >> 16;
- const int temp2 = ip[3] + ((ip[3] * cospi8sqrt2minus1) >> 16);
- const int c1 = temp1 - temp2;
- const int temp3 = ip[1] + ((ip[1] * cospi8sqrt2minus1) >> 16);
- const int temp4 = (ip[3] * sinpi8sqrt2) >> 16;
- const int d1 = temp3 + temp4;
- op[0] = (a1 + d1 + 4) >> 3;
- op[3] = (a1 - d1 + 4) >> 3;
- op[1] = (b1 + c1 + 4) >> 3;
- op[2] = (b1 - c1 + 4) >> 3;
- ip += 4;
- op += 4;
- }
-}
-
-using libvpx_test::ACMRandom;
-
-TEST(Vp8FdctTest, SignBiasCheck) {
+TEST(Vp9Fdct4x4Test, SignBiasCheck) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
int16_t test_input_block[16];
int16_t test_output_block[16];
@@ -88,7 +40,9 @@ TEST(Vp8FdctTest, SignBiasCheck) {
for (int j = 0; j < 16; ++j)
test_input_block[j] = rnd.Rand8() - rnd.Rand8();
- vp8_short_fdct4x4_c(test_input_block, test_output_block, pitch);
+ // TODO(Yaowu): this should be converted to a parameterized test
+ // to test optimized versions of this function.
+ vp9_short_fdct4x4_c(test_input_block, test_output_block, pitch);
for (int j = 0; j < 16; ++j) {
if (test_output_block[j] < 0)
@@ -98,13 +52,13 @@ TEST(Vp8FdctTest, SignBiasCheck) {
}
}
- bool bias_acceptable = true;
- for (int j = 0; j < 16; ++j)
- bias_acceptable = bias_acceptable &&
- (abs(count_sign_block[j][0] - count_sign_block[j][1]) < 10000);
-
- EXPECT_EQ(true, bias_acceptable)
- << "Error: 4x4 FDCT has a sign bias > 1% for input range [-255, 255]";
+ for (int j = 0; j < 16; ++j) {
+ const bool bias_acceptable = (abs(count_sign_block[j][0] -
+ count_sign_block[j][1]) < 10000);
+ EXPECT_TRUE(bias_acceptable)
+ << "Error: 4x4 FDCT has a sign bias > 1%"
+ << " for input range [-255, 255] at index " << j;
+ }
memset(count_sign_block, 0, sizeof(count_sign_block));
@@ -113,7 +67,9 @@ TEST(Vp8FdctTest, SignBiasCheck) {
for (int j = 0; j < 16; ++j)
test_input_block[j] = (rnd.Rand8() >> 4) - (rnd.Rand8() >> 4);
- vp8_short_fdct4x4_c(test_input_block, test_output_block, pitch);
+ // TODO(Yaowu): this should be converted to a parameterized test
+ // to test optimized versions of this function.
+ vp9_short_fdct4x4_c(test_input_block, test_output_block, pitch);
for (int j = 0; j < 16; ++j) {
if (test_output_block[j] < 0)
@@ -123,16 +79,16 @@ TEST(Vp8FdctTest, SignBiasCheck) {
}
}
- bias_acceptable = true;
- for (int j = 0; j < 16; ++j)
- bias_acceptable = bias_acceptable &&
- (abs(count_sign_block[j][0] - count_sign_block[j][1]) < 100000);
-
- EXPECT_EQ(true, bias_acceptable)
- << "Error: 4x4 FDCT has a sign bias > 10% for input range [-15, 15]";
+ for (int j = 0; j < 16; ++j) {
+ const bool bias_acceptable = (abs(count_sign_block[j][0] -
+ count_sign_block[j][1]) < 100000);
+ EXPECT_TRUE(bias_acceptable)
+ << "Error: 4x4 FDCT has a sign bias > 10%"
+ << " for input range [-15, 15] at index " << j;
+ }
};
-TEST(Vp8FdctTest, RoundTripErrorCheck) {
+TEST(Vp9Fdct4x4Test, RoundTripErrorCheck) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
int max_error = 0;
double total_error = 0;
@@ -140,30 +96,49 @@ TEST(Vp8FdctTest, RoundTripErrorCheck) {
for (int i = 0; i < count_test_block; ++i) {
int16_t test_input_block[16];
int16_t test_temp_block[16];
- int16_t test_output_block[16];
+ uint8_t dst[16], src[16];
+ for (int j = 0; j < 16; ++j) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ }
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < 16; ++j)
- test_input_block[j] = rnd.Rand8() - rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+ // TODO(Yaowu): this should be converted to a parameterized test
+ // to test optimized versions of this function.
const int pitch = 8;
- vp8_short_fdct4x4_c(test_input_block, test_temp_block, pitch);
- reference_idct4x4(test_temp_block, test_output_block);
+ vp9_short_fdct4x4_c(test_input_block, test_temp_block, pitch);
for (int j = 0; j < 16; ++j) {
- const int diff = test_input_block[j] - test_output_block[j];
+ if(test_temp_block[j] > 0) {
+ test_temp_block[j] += 2;
+ test_temp_block[j] /= 4;
+ test_temp_block[j] *= 4;
+ } else {
+ test_temp_block[j] -= 2;
+ test_temp_block[j] /= 4;
+ test_temp_block[j] *= 4;
+ }
+ }
+
+ // Because the bitstream is not frozen yet, use the idct in the codebase.
+ vp9_short_idct4x4_add_c(test_temp_block, dst, 4);
+
+ for (int j = 0; j < 16; ++j) {
+ const int diff = dst[j] - src[j];
const int error = diff * diff;
if (max_error < error)
max_error = error;
total_error += error;
}
}
-
- EXPECT_GE(1, max_error )
- << "Error: FDCT/IDCT has an individual roundtrip error > 1";
+ EXPECT_GE(1, max_error)
+ << "Error: FDCT/IDCT has an individual roundtrip error > 1";
EXPECT_GE(count_test_block, total_error)
- << "Error: FDCT/IDCT has average roundtrip error > 1 per block";
+ << "Error: FDCT/IDCT has average roundtrip error > 1 per block";
};
} // namespace
diff --git a/libvpx/test/fdct8x8_test.cc b/libvpx/test/fdct8x8_test.cc
new file mode 100644
index 0000000..90b4ecd
--- /dev/null
+++ b/libvpx/test/fdct8x8_test.cc
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
+extern "C" {
+#include "vp9_rtcd.h"
+void vp9_short_idct8x8_add_c(short *input, uint8_t *output, int pitch);
+}
+
+#include "acm_random.h"
+#include "vpx/vpx_integer.h"
+
+using libvpx_test::ACMRandom;
+
+namespace {
+
+TEST(VP9Fdct8x8Test, SignBiasCheck) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ int16_t test_input_block[64];
+ int16_t test_output_block[64];
+ const int pitch = 16;
+ int count_sign_block[64][2];
+ const int count_test_block = 100000;
+
+ memset(count_sign_block, 0, sizeof(count_sign_block));
+
+ for (int i = 0; i < count_test_block; ++i) {
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < 64; ++j)
+ test_input_block[j] = rnd.Rand8() - rnd.Rand8();
+
+ vp9_short_fdct8x8_c(test_input_block, test_output_block, pitch);
+
+ for (int j = 0; j < 64; ++j) {
+ if (test_output_block[j] < 0)
+ ++count_sign_block[j][0];
+ else if (test_output_block[j] > 0)
+ ++count_sign_block[j][1];
+ }
+ }
+
+ for (int j = 0; j < 64; ++j) {
+ const int diff = abs(count_sign_block[j][0] - count_sign_block[j][1]);
+ const int max_diff = 1125;
+ EXPECT_LT(diff, max_diff)
+ << "Error: 8x8 FDCT has a sign bias > "
+ << 1. * max_diff / count_test_block * 100 << "%"
+ << " for input range [-255, 255] at index " << j
+ << " count0: " << count_sign_block[j][0]
+ << " count1: " << count_sign_block[j][1]
+ << " diff: " << diff;
+ }
+
+ memset(count_sign_block, 0, sizeof(count_sign_block));
+
+ for (int i = 0; i < count_test_block; ++i) {
+ // Initialize a test block with input range [-15, 15].
+ for (int j = 0; j < 64; ++j)
+ test_input_block[j] = (rnd.Rand8() >> 4) - (rnd.Rand8() >> 4);
+
+ vp9_short_fdct8x8_c(test_input_block, test_output_block, pitch);
+
+ for (int j = 0; j < 64; ++j) {
+ if (test_output_block[j] < 0)
+ ++count_sign_block[j][0];
+ else if (test_output_block[j] > 0)
+ ++count_sign_block[j][1];
+ }
+ }
+
+ for (int j = 0; j < 64; ++j) {
+ const int diff = abs(count_sign_block[j][0] - count_sign_block[j][1]);
+ const int max_diff = 10000;
+ EXPECT_LT(diff, max_diff)
+ << "Error: 4x4 FDCT has a sign bias > "
+ << 1. * max_diff / count_test_block * 100 << "%"
+ << " for input range [-15, 15] at index " << j
+ << " count0: " << count_sign_block[j][0]
+ << " count1: " << count_sign_block[j][1]
+ << " diff: " << diff;
+ }
+};
+
+TEST(VP9Fdct8x8Test, RoundTripErrorCheck) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ int max_error = 0;
+ double total_error = 0;
+ const int count_test_block = 100000;
+ for (int i = 0; i < count_test_block; ++i) {
+ int16_t test_input_block[64];
+ int16_t test_temp_block[64];
+ uint8_t dst[64], src[64];
+
+ for (int j = 0; j < 64; ++j) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ }
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < 64; ++j)
+ test_input_block[j] = src[j] - dst[j];
+
+ const int pitch = 16;
+ vp9_short_fdct8x8_c(test_input_block, test_temp_block, pitch);
+ for (int j = 0; j < 64; ++j){
+ if(test_temp_block[j] > 0) {
+ test_temp_block[j] += 2;
+ test_temp_block[j] /= 4;
+ test_temp_block[j] *= 4;
+ } else {
+ test_temp_block[j] -= 2;
+ test_temp_block[j] /= 4;
+ test_temp_block[j] *= 4;
+ }
+ }
+ vp9_short_idct8x8_add_c(test_temp_block, dst, 8);
+
+ for (int j = 0; j < 64; ++j) {
+ const int diff = dst[j] - src[j];
+ const int error = diff * diff;
+ if (max_error < error)
+ max_error = error;
+ total_error += error;
+ }
+ }
+
+ EXPECT_GE(1, max_error)
+ << "Error: 8x8 FDCT/IDCT has an individual roundtrip error > 1";
+
+ EXPECT_GE(count_test_block/5, total_error)
+ << "Error: 8x8 FDCT/IDCT has average roundtrip error > 1/5 per block";
+};
+
+TEST(VP9Fdct8x8Test, ExtremalCheck) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ int max_error = 0;
+ double total_error = 0;
+ const int count_test_block = 100000;
+ for (int i = 0; i < count_test_block; ++i) {
+ int16_t test_input_block[64];
+ int16_t test_temp_block[64];
+ uint8_t dst[64], src[64];
+
+ for (int j = 0; j < 64; ++j) {
+ src[j] = rnd.Rand8() % 2 ? 255 : 0;
+ dst[j] = src[j] > 0 ? 0 : 255;
+ }
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < 64; ++j)
+ test_input_block[j] = src[j] - dst[j];
+
+ const int pitch = 16;
+ vp9_short_fdct8x8_c(test_input_block, test_temp_block, pitch);
+ vp9_short_idct8x8_add_c(test_temp_block, dst, 8);
+
+ for (int j = 0; j < 64; ++j) {
+ const int diff = dst[j] - src[j];
+ const int error = diff * diff;
+ if (max_error < error)
+ max_error = error;
+ total_error += error;
+ }
+
+ EXPECT_GE(1, max_error)
+ << "Error: Extremal 8x8 FDCT/IDCT has an"
+ << " individual roundtrip error > 1";
+
+ EXPECT_GE(count_test_block/5, total_error)
+ << "Error: Extremal 8x8 FDCT/IDCT has average"
+ << " roundtrip error > 1/5 per block";
+ }
+};
+
+} // namespace
diff --git a/libvpx/test/i420_video_source.h b/libvpx/test/i420_video_source.h
index 219bd33..12a6ab1 100644
--- a/libvpx/test/i420_video_source.h
+++ b/libvpx/test/i420_video_source.h
@@ -83,7 +83,7 @@ class I420VideoSource : public VideoSource {
void SetSize(unsigned int width, unsigned int height) {
if (width != width_ || height != height_) {
vpx_img_free(img_);
- img_ = vpx_img_alloc(NULL, VPX_IMG_FMT_VPXI420, width, height, 1);
+ img_ = vpx_img_alloc(NULL, VPX_IMG_FMT_I420, width, height, 1);
ASSERT_TRUE(img_ != NULL);
width_ = width;
height_ = height;
diff --git a/libvpx/test/idct8x8_test.cc b/libvpx/test/idct8x8_test.cc
new file mode 100644
index 0000000..67db78b
--- /dev/null
+++ b/libvpx/test/idct8x8_test.cc
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
+extern "C" {
+#include "vp9_rtcd.h"
+}
+
+#include "acm_random.h"
+#include "vpx/vpx_integer.h"
+
+using libvpx_test::ACMRandom;
+
+namespace {
+
+#ifdef _MSC_VER
+static int round(double x) {
+ if(x < 0)
+ return (int)ceil(x - 0.5);
+ else
+ return (int)floor(x + 0.5);
+}
+#endif
+
+void reference_dct_1d(double input[8], double output[8]) {
+ const double kPi = 3.141592653589793238462643383279502884;
+ const double kInvSqrt2 = 0.707106781186547524400844362104;
+ for (int k = 0; k < 8; k++) {
+ output[k] = 0.0;
+ for (int n = 0; n < 8; n++)
+ output[k] += input[n]*cos(kPi*(2*n+1)*k/16.0);
+ if (k == 0)
+ output[k] = output[k]*kInvSqrt2;
+ }
+}
+
+void reference_dct_2d(int16_t input[64], double output[64]) {
+ // First transform columns
+ for (int i = 0; i < 8; ++i) {
+ double temp_in[8], temp_out[8];
+ for (int j = 0; j < 8; ++j)
+ temp_in[j] = input[j*8 + i];
+ reference_dct_1d(temp_in, temp_out);
+ for (int j = 0; j < 8; ++j)
+ output[j*8 + i] = temp_out[j];
+ }
+ // Then transform rows
+ for (int i = 0; i < 8; ++i) {
+ double temp_in[8], temp_out[8];
+ for (int j = 0; j < 8; ++j)
+ temp_in[j] = output[j + i*8];
+ reference_dct_1d(temp_in, temp_out);
+ for (int j = 0; j < 8; ++j)
+ output[j + i*8] = temp_out[j];
+ }
+ // Scale by some magic number
+ for (int i = 0; i < 64; ++i)
+ output[i] *= 2;
+}
+
+void reference_idct_1d(double input[8], double output[8]) {
+ const double kPi = 3.141592653589793238462643383279502884;
+ const double kSqrt2 = 1.414213562373095048801688724209698;
+ for (int k = 0; k < 8; k++) {
+ output[k] = 0.0;
+ for (int n = 0; n < 8; n++) {
+ output[k] += input[n]*cos(kPi*(2*k+1)*n/16.0);
+ if (n == 0)
+ output[k] = output[k]/kSqrt2;
+ }
+ }
+}
+
+void reference_idct_2d(double input[64], int16_t output[64]) {
+ double out[64], out2[64];
+ // First transform rows
+ for (int i = 0; i < 8; ++i) {
+ double temp_in[8], temp_out[8];
+ for (int j = 0; j < 8; ++j)
+ temp_in[j] = input[j + i*8];
+ reference_idct_1d(temp_in, temp_out);
+ for (int j = 0; j < 8; ++j)
+ out[j + i*8] = temp_out[j];
+ }
+ // Then transform columns
+ for (int i = 0; i < 8; ++i) {
+ double temp_in[8], temp_out[8];
+ for (int j = 0; j < 8; ++j)
+ temp_in[j] = out[j*8 + i];
+ reference_idct_1d(temp_in, temp_out);
+ for (int j = 0; j < 8; ++j)
+ out2[j*8 + i] = temp_out[j];
+ }
+ for (int i = 0; i < 64; ++i)
+ output[i] = round(out2[i]/32);
+}
+
+TEST(VP9Idct8x8Test, AccuracyCheck) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 10000;
+ for (int i = 0; i < count_test_block; ++i) {
+ int16_t input[64], coeff[64];
+ double output_r[64];
+ uint8_t dst[64], src[64];
+
+ for (int j = 0; j < 64; ++j) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ }
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < 64; ++j)
+ input[j] = src[j] - dst[j];
+
+ reference_dct_2d(input, output_r);
+ for (int j = 0; j < 64; ++j)
+ coeff[j] = round(output_r[j]);
+ vp9_short_idct8x8_add_c(coeff, dst, 8);
+ for (int j = 0; j < 64; ++j) {
+ const int diff = dst[j] - src[j];
+ const int error = diff * diff;
+ EXPECT_GE(1, error)
+ << "Error: 8x8 FDCT/IDCT has error " << error
+ << " at index " << j;
+ }
+ }
+}
+
+} // namespace
diff --git a/libvpx/test/idctllm_test.cc b/libvpx/test/idct_test.cc
index dd42e22..659cce0 100644
--- a/libvpx/test/idctllm_test.cc
+++ b/libvpx/test/idct_test.cc
@@ -10,27 +10,31 @@
extern "C" {
-#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "./vpx_config.h"
+#include "./vp8_rtcd.h"
}
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
#include "third_party/googletest/src/include/gtest/gtest.h"
typedef void (*idct_fn_t)(short *input, unsigned char *pred_ptr,
int pred_stride, unsigned char *dst_ptr,
int dst_stride);
namespace {
-class IDCTTest : public ::testing::TestWithParam<idct_fn_t>
-{
+class IDCTTest : public ::testing::TestWithParam<idct_fn_t> {
protected:
- virtual void SetUp()
- {
+ virtual void SetUp() {
int i;
UUT = GetParam();
memset(input, 0, sizeof(input));
/* Set up guard blocks */
- for(i=0; i<256; i++)
- output[i] = ((i&0xF)<4&&(i<64))?0:-1;
+ for (i = 0; i < 256; i++)
+ output[i] = ((i & 0xF) < 4 && (i < 64)) ? 0 : -1;
+ }
+
+ virtual void TearDown() {
+ libvpx_test::ClearSystemState();
}
idct_fn_t UUT;
@@ -39,78 +43,72 @@ class IDCTTest : public ::testing::TestWithParam<idct_fn_t>
unsigned char predict[256];
};
-TEST_P(IDCTTest, TestGuardBlocks)
-{
+TEST_P(IDCTTest, TestGuardBlocks) {
int i;
- for(i=0; i<256; i++)
- if((i&0xF) < 4 && i<64)
+ for (i = 0; i < 256; i++)
+ if ((i & 0xF) < 4 && i < 64)
EXPECT_EQ(0, output[i]) << i;
else
EXPECT_EQ(255, output[i]);
}
-TEST_P(IDCTTest, TestAllZeros)
-{
+TEST_P(IDCTTest, TestAllZeros) {
int i;
- UUT(input, output, 16, output, 16);
+ REGISTER_STATE_CHECK(UUT(input, output, 16, output, 16));
- for(i=0; i<256; i++)
- if((i&0xF) < 4 && i<64)
+ for (i = 0; i < 256; i++)
+ if ((i & 0xF) < 4 && i < 64)
EXPECT_EQ(0, output[i]) << "i==" << i;
else
EXPECT_EQ(255, output[i]) << "i==" << i;
}
-TEST_P(IDCTTest, TestAllOnes)
-{
+TEST_P(IDCTTest, TestAllOnes) {
int i;
input[0] = 4;
- UUT(input, output, 16, output, 16);
+ REGISTER_STATE_CHECK(UUT(input, output, 16, output, 16));
- for(i=0; i<256; i++)
- if((i&0xF) < 4 && i<64)
+ for (i = 0; i < 256; i++)
+ if ((i & 0xF) < 4 && i < 64)
EXPECT_EQ(1, output[i]) << "i==" << i;
else
EXPECT_EQ(255, output[i]) << "i==" << i;
}
-TEST_P(IDCTTest, TestAddOne)
-{
+TEST_P(IDCTTest, TestAddOne) {
int i;
- for(i=0; i<256; i++)
+ for (i = 0; i < 256; i++)
predict[i] = i;
-
input[0] = 4;
- UUT(input, predict, 16, output, 16);
+ REGISTER_STATE_CHECK(UUT(input, predict, 16, output, 16));
- for(i=0; i<256; i++)
- if((i&0xF) < 4 && i<64)
+ for (i = 0; i < 256; i++)
+ if ((i & 0xF) < 4 && i < 64)
EXPECT_EQ(i+1, output[i]) << "i==" << i;
else
EXPECT_EQ(255, output[i]) << "i==" << i;
}
-TEST_P(IDCTTest, TestWithData)
-{
+TEST_P(IDCTTest, TestWithData) {
int i;
- for(i=0; i<16; i++)
+ for (i = 0; i < 16; i++)
input[i] = i;
- UUT(input, output, 16, output, 16);
+ REGISTER_STATE_CHECK(UUT(input, output, 16, output, 16));
- for(i=0; i<256; i++)
- if((i&0xF) > 3 || i>63)
+ for (i = 0; i < 256; i++)
+ if ((i & 0xF) > 3 || i > 63)
EXPECT_EQ(255, output[i]) << "i==" << i;
- else if(i == 0)
+ else if (i == 0)
EXPECT_EQ(11, output[i]) << "i==" << i;
- else if(i == 34)
+ else if (i == 34)
EXPECT_EQ(1, output[i]) << "i==" << i;
- else if(i == 2 || i == 17 || i == 32)
+ else if (i == 2 || i == 17 || i == 32)
EXPECT_EQ(3, output[i]) << "i==" << i;
else
EXPECT_EQ(0, output[i]) << "i==" << i;
diff --git a/libvpx/test/intrapred_test.cc b/libvpx/test/intrapred_test.cc
index d2e0d61..39ec896 100644
--- a/libvpx/test/intrapred_test.cc
+++ b/libvpx/test/intrapred_test.cc
@@ -11,10 +11,12 @@
#include <string.h>
#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
#include "third_party/googletest/src/include/gtest/gtest.h"
extern "C" {
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vp8/common/blockd.h"
#include "vpx_mem/vpx_mem.h"
}
@@ -24,6 +26,11 @@ namespace {
using libvpx_test::ACMRandom;
class IntraPredBase {
+ public:
+ virtual void TearDown() {
+ libvpx_test::ClearSystemState();
+ }
+
protected:
void SetupMacroblock(uint8_t *data, int block_size, int stride,
int num_planes) {
@@ -246,8 +253,10 @@ class IntraPredYTest : public ::testing::TestWithParam<intra_pred_y_fn_t>,
virtual void Predict(MB_PREDICTION_MODE mode) {
mb_.mode_info_context->mbmi.mode = mode;
- pred_fn_(&mb_, data_ptr_[0] - kStride, data_ptr_[0] - 1, kStride,
- data_ptr_[0], kStride);
+ REGISTER_STATE_CHECK(pred_fn_(&mb_,
+ data_ptr_[0] - kStride,
+ data_ptr_[0] - 1, kStride,
+ data_ptr_[0], kStride));
}
intra_pred_y_fn_t pred_fn_;
diff --git a/libvpx/test/keyframe_test.cc b/libvpx/test/keyframe_test.cc
index d0c81df..85ca0b9 100644
--- a/libvpx/test/keyframe_test.cc
+++ b/libvpx/test/keyframe_test.cc
@@ -9,18 +9,22 @@
*/
#include <climits>
#include <vector>
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
#include "test/encode_test_driver.h"
#include "test/i420_video_source.h"
-#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/util.h"
namespace {
class KeyframeTest : public ::libvpx_test::EncoderTest,
- public ::testing::TestWithParam<enum libvpx_test::TestMode> {
+ public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
protected:
+ KeyframeTest() : EncoderTest(GET_PARAM(0)) {}
+
virtual void SetUp() {
InitializeConfig();
- SetMode(GetParam());
+ SetMode(GET_PARAM(1));
kf_count_ = 0;
kf_count_max_ = INT_MAX;
kf_do_force_kf_ = false;
@@ -64,7 +68,7 @@ TEST_P(KeyframeTest, TestRandomVideoSource) {
// In realtime mode - auto placed keyframes are exceedingly rare, don't
// bother with this check if(GetParam() > 0)
- if(GetParam() > 0)
+ if (GET_PARAM(1) > 0)
EXPECT_GT(kf_count_, 1);
}
@@ -126,7 +130,7 @@ TEST_P(KeyframeTest, TestAutoKeyframe) {
// In realtime mode - auto placed keyframes are exceedingly rare, don't
// bother with this check
- if(GetParam() > 0)
+ if (GET_PARAM(1) > 0)
EXPECT_EQ(2u, kf_pts_list_.size()) << " Not the right number of keyframes ";
// Verify that keyframes match the file keyframes in the file.
@@ -141,5 +145,5 @@ TEST_P(KeyframeTest, TestAutoKeyframe) {
}
}
-INSTANTIATE_TEST_CASE_P(AllModes, KeyframeTest, ALL_TEST_MODES);
+VP8_INSTANTIATE_TEST_CASE(KeyframeTest, ALL_TEST_MODES);
} // namespace
diff --git a/libvpx/test/md5_helper.h b/libvpx/test/md5_helper.h
new file mode 100644
index 0000000..fc1a974
--- /dev/null
+++ b/libvpx/test/md5_helper.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef LIBVPX_TEST_MD5_HELPER_H_
+#define LIBVPX_TEST_MD5_HELPER_H_
+
+extern "C" {
+#include "./md5_utils.h"
+#include "vpx/vpx_decoder.h"
+}
+
+namespace libvpx_test {
+class MD5 {
+ public:
+ MD5() {
+ MD5Init(&md5_);
+ }
+
+ void Add(const vpx_image_t *img) {
+ for (int plane = 0; plane < 3; ++plane) {
+ uint8_t *buf = img->planes[plane];
+ const int h = plane ? (img->d_h + 1) >> 1 : img->d_h;
+ const int w = plane ? (img->d_w + 1) >> 1 : img->d_w;
+
+ for (int y = 0; y < h; ++y) {
+ MD5Update(&md5_, buf, w);
+ buf += img->stride[plane];
+ }
+ }
+ }
+
+ const char *Get(void) {
+ static const char hex[16] = {
+ '0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f',
+ };
+ uint8_t tmp[16];
+ MD5Context ctx_tmp = md5_;
+
+ MD5Final(tmp, &ctx_tmp);
+ for (int i = 0; i < 16; i++) {
+ res_[i * 2 + 0] = hex[tmp[i] >> 4];
+ res_[i * 2 + 1] = hex[tmp[i] & 0xf];
+ }
+ res_[32] = 0;
+
+ return res_;
+ }
+
+ protected:
+ char res_[33];
+ MD5Context md5_;
+};
+
+} // namespace libvpx_test
+
+#endif // LIBVPX_TEST_MD5_HELPER_H_
diff --git a/libvpx/test/pp_filter_test.cc b/libvpx/test/pp_filter_test.cc
index af2f3bd..79896fe 100644
--- a/libvpx/test/pp_filter_test.cc
+++ b/libvpx/test/pp_filter_test.cc
@@ -7,10 +7,12 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
#include "third_party/googletest/src/include/gtest/gtest.h"
extern "C" {
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx/vpx_integer.h"
#include "vpx_mem/vpx_mem.h"
}
@@ -26,7 +28,12 @@ typedef void (*post_proc_func_t)(unsigned char *src_ptr,
namespace {
class Vp8PostProcessingFilterTest
- : public ::testing::TestWithParam<post_proc_func_t> {};
+ : public ::testing::TestWithParam<post_proc_func_t> {
+ public:
+ virtual void TearDown() {
+ libvpx_test::ClearSystemState();
+ }
+};
// Test routine for the VP8 post-processing function
// vp8_post_proc_down_and_across_mb_row_c.
@@ -74,8 +81,8 @@ TEST_P(Vp8PostProcessingFilterTest, FilterOutputCheck) {
// Initialize pixels in the output to 99.
(void)vpx_memset(dst_image, 99, output_size);
- GetParam()(src_image_ptr, dst_image_ptr, input_stride,
- output_stride, block_width, flimits, 16);
+ REGISTER_STATE_CHECK(GetParam()(src_image_ptr, dst_image_ptr, input_stride,
+ output_stride, block_width, flimits, 16));
static const uint8_t expected_data[block_height] = {
4, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 4
diff --git a/libvpx/test/register_state_check.h b/libvpx/test/register_state_check.h
new file mode 100644
index 0000000..fb3f53b
--- /dev/null
+++ b/libvpx/test/register_state_check.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef LIBVPX_TEST_REGISTER_STATE_CHECK_H_
+#define LIBVPX_TEST_REGISTER_STATE_CHECK_H_
+
+#ifdef _WIN64
+
+#define _WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <winnt.h>
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
+namespace testing {
+namespace internal {
+
+inline bool operator==(const M128A& lhs, const M128A& rhs) {
+ return (lhs.Low == rhs.Low && lhs.High == rhs.High);
+}
+
+} // namespace internal
+} // namespace testing
+
+namespace libvpx_test {
+
+// Compares the state of xmm[6-15] at construction with their state at
+// destruction. These registers should be preserved by the callee on
+// Windows x64.
+// Usage:
+// {
+// RegisterStateCheck reg_check;
+// FunctionToVerify();
+// }
+class RegisterStateCheck {
+ public:
+ RegisterStateCheck() { initialized_ = StoreRegisters(&pre_context_); }
+ ~RegisterStateCheck() { EXPECT_TRUE(Check()); }
+
+ private:
+ static bool StoreRegisters(CONTEXT* const context) {
+ const HANDLE this_thread = GetCurrentThread();
+ EXPECT_TRUE(this_thread != NULL);
+ context->ContextFlags = CONTEXT_FLOATING_POINT;
+ const bool context_saved = GetThreadContext(this_thread, context) == TRUE;
+ EXPECT_TRUE(context_saved) << "GetLastError: " << GetLastError();
+ return context_saved;
+ }
+
+ // Compares the register state. Returns true if the states match.
+ bool Check() const {
+ if (!initialized_) return false;
+ CONTEXT post_context;
+ if (!StoreRegisters(&post_context)) return false;
+
+ const M128A* xmm_pre = &pre_context_.Xmm6;
+ const M128A* xmm_post = &post_context.Xmm6;
+ for (int i = 6; i <= 15; ++i) {
+ EXPECT_EQ(*xmm_pre, *xmm_post) << "xmm" << i << " has been modified!";
+ ++xmm_pre;
+ ++xmm_post;
+ }
+ return !testing::Test::HasNonfatalFailure();
+ }
+
+ bool initialized_;
+ CONTEXT pre_context_;
+};
+
+#define REGISTER_STATE_CHECK(statement) do { \
+ libvpx_test::RegisterStateCheck reg_check; \
+ statement; \
+} while (false)
+
+} // namespace libvpx_test
+
+#else // !_WIN64
+
+namespace libvpx_test {
+
+class RegisterStateCheck {};
+#define REGISTER_STATE_CHECK(statement) statement
+
+} // namespace libvpx_test
+
+#endif // _WIN64
+
+#endif // LIBVPX_TEST_REGISTER_STATE_CHECK_H_
diff --git a/libvpx/test/resize_test.cc b/libvpx/test/resize_test.cc
index c846157..0d591ad 100644
--- a/libvpx/test/resize_test.cc
+++ b/libvpx/test/resize_test.cc
@@ -9,9 +9,12 @@
*/
#include <climits>
#include <vector>
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
#include "test/encode_test_driver.h"
+#include "test/i420_video_source.h"
#include "test/video_source.h"
-#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/util.h"
namespace {
@@ -49,8 +52,10 @@ class ResizingVideoSource : public ::libvpx_test::DummyVideoSource {
};
class ResizeTest : public ::libvpx_test::EncoderTest,
- public ::testing::TestWithParam<enum libvpx_test::TestMode> {
+ public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
protected:
+ ResizeTest() : EncoderTest(GET_PARAM(0)) {}
+
struct FrameInfo {
FrameInfo(vpx_codec_pts_t _pts, unsigned int _w, unsigned int _h)
: pts(_pts), w(_w), h(_h) {}
@@ -62,22 +67,16 @@ class ResizeTest : public ::libvpx_test::EncoderTest,
virtual void SetUp() {
InitializeConfig();
- SetMode(GetParam());
+ SetMode(GET_PARAM(1));
}
virtual bool Continue() const {
return !HasFatalFailure() && !abort_;
}
- virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
- if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
- const unsigned char *buf =
- reinterpret_cast<const unsigned char *>(pkt->data.frame.buf);
- const unsigned int w = (buf[6] | (buf[7] << 8)) & 0x3fff;
- const unsigned int h = (buf[8] | (buf[9] << 8)) & 0x3fff;
-
- frame_info_list_.push_back(FrameInfo(pkt->data.frame.pts, w, h));
- }
+ virtual void DecompressedFrameHook(const vpx_image_t &img,
+ vpx_codec_pts_t pts) {
+ frame_info_list_.push_back(FrameInfo(pts, img.d_w, img.d_h));
}
std::vector< FrameInfo > frame_info_list_;
@@ -100,5 +99,53 @@ TEST_P(ResizeTest, TestExternalResizeWorks) {
}
}
-INSTANTIATE_TEST_CASE_P(OnePass, ResizeTest, ONE_PASS_TEST_MODES);
+class ResizeInternalTest : public ResizeTest {
+ protected:
+ ResizeInternalTest() : ResizeTest(), frame0_psnr_(0.0) {}
+
+ virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video,
+ libvpx_test::Encoder *encoder) {
+ if (video->frame() == 3) {
+ struct vpx_scaling_mode mode = {VP8E_FOURFIVE, VP8E_THREEFIVE};
+ encoder->Control(VP8E_SET_SCALEMODE, &mode);
+ }
+ if (video->frame() == 6) {
+ struct vpx_scaling_mode mode = {VP8E_NORMAL, VP8E_NORMAL};
+ encoder->Control(VP8E_SET_SCALEMODE, &mode);
+ }
+ }
+
+ virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
+ if (!frame0_psnr_)
+ frame0_psnr_ = pkt->data.psnr.psnr[0];
+ EXPECT_NEAR(pkt->data.psnr.psnr[0], frame0_psnr_, 1.0);
+ }
+
+ double frame0_psnr_;
+};
+
+TEST_P(ResizeInternalTest, TestInternalResizeWorks) {
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ 30, 1, 0, 10);
+ init_flags_ = VPX_CODEC_USE_PSNR;
+ // q picked such that initial keyframe on this clip is ~30dB PSNR
+ cfg_.rc_min_quantizer = cfg_.rc_max_quantizer = 48;
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+
+ for (std::vector<FrameInfo>::iterator info = frame_info_list_.begin();
+ info != frame_info_list_.end(); ++info) {
+ const vpx_codec_pts_t pts = info->pts;
+ if (pts >= 3 && pts < 6) {
+ ASSERT_EQ(282U, info->w) << "Frame " << pts << " had unexpected width";
+ ASSERT_EQ(173U, info->h) << "Frame " << pts << " had unexpected height";
+ } else {
+ EXPECT_EQ(352U, info->w) << "Frame " << pts << " had unexpected width";
+ EXPECT_EQ(288U, info->h) << "Frame " << pts << " had unexpected height";
+ }
+ }
+}
+
+VP8_INSTANTIATE_TEST_CASE(ResizeTest, ONE_PASS_TEST_MODES);
+VP9_INSTANTIATE_TEST_CASE(ResizeInternalTest,
+ ::testing::Values(::libvpx_test::kOnePassBest));
} // namespace
diff --git a/libvpx/test/sad_test.cc b/libvpx/test/sad_test.cc
index 2b562e6..1f5435f 100644
--- a/libvpx/test/sad_test.cc
+++ b/libvpx/test/sad_test.cc
@@ -15,12 +15,19 @@
extern "C" {
#include "./vpx_config.h"
-#include "./vpx_rtcd.h"
-#include "vp8/common/blockd.h"
+#if CONFIG_VP8_ENCODER
+#include "./vp8_rtcd.h"
+//#include "vp8/common/blockd.h"
+#endif
+#if CONFIG_VP9_ENCODER
+#include "./vp9_rtcd.h"
+#endif
#include "vpx_mem/vpx_mem.h"
}
#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
#include "test/util.h"
#include "third_party/googletest/src/include/gtest/gtest.h"
@@ -30,15 +37,26 @@ typedef unsigned int (*sad_m_by_n_fn_t)(const unsigned char *source_ptr,
const unsigned char *reference_ptr,
int reference_stride,
unsigned int max_sad);
+typedef std::tr1::tuple<int, int, sad_m_by_n_fn_t> sad_m_by_n_test_param_t;
+
+typedef void (*sad_n_by_n_by_4_fn_t)(const uint8_t *src_ptr,
+ int src_stride,
+ const unsigned char * const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array);
+typedef std::tr1::tuple<int, int, sad_n_by_n_by_4_fn_t>
+ sad_n_by_n_by_4_test_param_t;
using libvpx_test::ACMRandom;
namespace {
-class SADTest : public PARAMS(int, int, sad_m_by_n_fn_t) {
+class SADTestBase : public ::testing::Test {
public:
+ SADTestBase(int width, int height) : width_(width), height_(height) {}
+
static void SetUpTestCase() {
source_data_ = reinterpret_cast<uint8_t*>(
- vpx_memalign(kDataAlignment, kDataBufferSize));
+ vpx_memalign(kDataAlignment, kDataBlockSize));
reference_data_ = reinterpret_cast<uint8_t*>(
vpx_memalign(kDataAlignment, kDataBufferSize));
}
@@ -50,35 +68,36 @@ class SADTest : public PARAMS(int, int, sad_m_by_n_fn_t) {
reference_data_ = NULL;
}
+ virtual void TearDown() {
+ libvpx_test::ClearSystemState();
+ }
+
protected:
+ // Handle blocks up to 4 blocks 64x64 with stride up to 128
static const int kDataAlignment = 16;
- static const int kDataBufferSize = 16 * 32;
+ static const int kDataBlockSize = 64 * 128;
+ static const int kDataBufferSize = 4 * kDataBlockSize;
virtual void SetUp() {
- sad_fn_ = GET_PARAM(2);
- height_ = GET_PARAM(1);
- width_ = GET_PARAM(0);
- source_stride_ = width_ * 2;
+ source_stride_ = (width_ + 31) & ~31;
reference_stride_ = width_ * 2;
rnd_.Reset(ACMRandom::DeterministicSeed());
}
- sad_m_by_n_fn_t sad_fn_;
- virtual unsigned int SAD(unsigned int max_sad) {
- return sad_fn_(source_data_, source_stride_,
- reference_data_, reference_stride_,
- max_sad);
+ virtual uint8_t* GetReference(int block_idx) {
+ return reference_data_ + block_idx * kDataBlockSize;
}
// Sum of Absolute Differences. Given two blocks, calculate the absolute
// difference between two pixels in the same relative location; accumulate.
- unsigned int ReferenceSAD(unsigned int max_sad) {
+ unsigned int ReferenceSAD(unsigned int max_sad, int block_idx = 0) {
unsigned int sad = 0;
+ const uint8_t* const reference = GetReference(block_idx);
for (int h = 0; h < height_; ++h) {
for (int w = 0; w < width_; ++w) {
sad += abs(source_data_[h * source_stride_ + w]
- - reference_data_[h * reference_stride_ + w]);
+ - reference[h * reference_stride_ + w]);
}
if (sad > max_sad) {
break;
@@ -103,6 +122,31 @@ class SADTest : public PARAMS(int, int, sad_m_by_n_fn_t) {
}
}
+ int width_, height_;
+ static uint8_t* source_data_;
+ int source_stride_;
+ static uint8_t* reference_data_;
+ int reference_stride_;
+
+ ACMRandom rnd_;
+};
+
+class SADTest : public SADTestBase,
+ public ::testing::WithParamInterface<sad_m_by_n_test_param_t> {
+ public:
+ SADTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1)) {}
+
+ protected:
+ unsigned int SAD(unsigned int max_sad, int block_idx = 0) {
+ unsigned int ret;
+ const uint8_t* const reference = GetReference(block_idx);
+
+ REGISTER_STATE_CHECK(ret = GET_PARAM(2)(source_data_, source_stride_,
+ reference, reference_stride_,
+ max_sad));
+ return ret;
+ }
+
void CheckSad(unsigned int max_sad) {
unsigned int reference_sad, exp_sad;
@@ -116,19 +160,37 @@ class SADTest : public PARAMS(int, int, sad_m_by_n_fn_t) {
ASSERT_GE(exp_sad, reference_sad);
}
}
+};
- // Handle blocks up to 16x16 with stride up to 32
- int height_, width_;
- static uint8_t* source_data_;
- int source_stride_;
- static uint8_t* reference_data_;
- int reference_stride_;
+class SADx4Test : public SADTestBase,
+ public ::testing::WithParamInterface<sad_n_by_n_by_4_test_param_t> {
+ public:
+ SADx4Test() : SADTestBase(GET_PARAM(0), GET_PARAM(1)) {}
- ACMRandom rnd_;
+ protected:
+ void SADs(unsigned int *results) {
+ const uint8_t* refs[] = {GetReference(0), GetReference(1),
+ GetReference(2), GetReference(3)};
+
+ REGISTER_STATE_CHECK(GET_PARAM(2)(source_data_, source_stride_,
+ refs, reference_stride_,
+ results));
+ }
+
+ void CheckSADs() {
+ unsigned int reference_sad, exp_sad[4];
+
+ SADs(exp_sad);
+ for (int block = 0; block < 4; block++) {
+ reference_sad = ReferenceSAD(UINT_MAX, block);
+
+ EXPECT_EQ(exp_sad[block], reference_sad) << "block " << block;
+ }
+ }
};
-uint8_t* SADTest::source_data_ = NULL;
-uint8_t* SADTest::reference_data_ = NULL;
+uint8_t* SADTestBase::source_data_ = NULL;
+uint8_t* SADTestBase::reference_data_ = NULL;
TEST_P(SADTest, MaxRef) {
FillConstant(source_data_, source_stride_, 0);
@@ -136,12 +198,30 @@ TEST_P(SADTest, MaxRef) {
CheckSad(UINT_MAX);
}
+TEST_P(SADx4Test, MaxRef) {
+ FillConstant(source_data_, source_stride_, 0);
+ FillConstant(GetReference(0), reference_stride_, 255);
+ FillConstant(GetReference(1), reference_stride_, 255);
+ FillConstant(GetReference(2), reference_stride_, 255);
+ FillConstant(GetReference(3), reference_stride_, 255);
+ CheckSADs();
+}
+
TEST_P(SADTest, MaxSrc) {
FillConstant(source_data_, source_stride_, 255);
FillConstant(reference_data_, reference_stride_, 0);
CheckSad(UINT_MAX);
}
+TEST_P(SADx4Test, MaxSrc) {
+ FillConstant(source_data_, source_stride_, 255);
+ FillConstant(GetReference(0), reference_stride_, 0);
+ FillConstant(GetReference(1), reference_stride_, 0);
+ FillConstant(GetReference(2), reference_stride_, 0);
+ FillConstant(GetReference(3), reference_stride_, 0);
+ CheckSADs();
+}
+
TEST_P(SADTest, ShortRef) {
int tmp_stride = reference_stride_;
reference_stride_ >>= 1;
@@ -151,6 +231,18 @@ TEST_P(SADTest, ShortRef) {
reference_stride_ = tmp_stride;
}
+TEST_P(SADx4Test, ShortRef) {
+ int tmp_stride = reference_stride_;
+ reference_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(GetReference(0), reference_stride_);
+ FillRandom(GetReference(1), reference_stride_);
+ FillRandom(GetReference(2), reference_stride_);
+ FillRandom(GetReference(3), reference_stride_);
+ CheckSADs();
+ reference_stride_ = tmp_stride;
+}
+
TEST_P(SADTest, UnalignedRef) {
// The reference frame, but not the source frame, may be unaligned for
// certain types of searches.
@@ -162,6 +254,20 @@ TEST_P(SADTest, UnalignedRef) {
reference_stride_ = tmp_stride;
}
+TEST_P(SADx4Test, UnalignedRef) {
+ // The reference frame, but not the source frame, may be unaligned for
+ // certain types of searches.
+ int tmp_stride = reference_stride_;
+ reference_stride_ -= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(GetReference(0), reference_stride_);
+ FillRandom(GetReference(1), reference_stride_);
+ FillRandom(GetReference(2), reference_stride_);
+ FillRandom(GetReference(3), reference_stride_);
+ CheckSADs();
+ reference_stride_ = tmp_stride;
+}
+
TEST_P(SADTest, ShortSrc) {
int tmp_stride = source_stride_;
source_stride_ >>= 1;
@@ -171,6 +277,18 @@ TEST_P(SADTest, ShortSrc) {
source_stride_ = tmp_stride;
}
+TEST_P(SADx4Test, ShortSrc) {
+ int tmp_stride = source_stride_;
+ source_stride_ >>= 1;
+ FillRandom(source_data_, source_stride_);
+ FillRandom(GetReference(0), reference_stride_);
+ FillRandom(GetReference(1), reference_stride_);
+ FillRandom(GetReference(2), reference_stride_);
+ FillRandom(GetReference(3), reference_stride_);
+ CheckSADs();
+ source_stride_ = tmp_stride;
+}
+
TEST_P(SADTest, MaxSAD) {
// Verify that, when max_sad is set, the implementation does not return a
// value lower than the reference.
@@ -181,17 +299,75 @@ TEST_P(SADTest, MaxSAD) {
using std::tr1::make_tuple;
+#if CONFIG_VP8_ENCODER
const sad_m_by_n_fn_t sad_16x16_c = vp8_sad16x16_c;
const sad_m_by_n_fn_t sad_8x16_c = vp8_sad8x16_c;
const sad_m_by_n_fn_t sad_16x8_c = vp8_sad16x8_c;
const sad_m_by_n_fn_t sad_8x8_c = vp8_sad8x8_c;
const sad_m_by_n_fn_t sad_4x4_c = vp8_sad4x4_c;
-INSTANTIATE_TEST_CASE_P(C, SADTest, ::testing::Values(
- make_tuple(16, 16, sad_16x16_c),
- make_tuple(8, 16, sad_8x16_c),
- make_tuple(16, 8, sad_16x8_c),
- make_tuple(8, 8, sad_8x8_c),
- make_tuple(4, 4, sad_4x4_c)));
+#endif
+#if CONFIG_VP9_ENCODER
+const sad_m_by_n_fn_t sad_64x64_c_vp9 = vp9_sad64x64_c;
+const sad_m_by_n_fn_t sad_32x32_c_vp9 = vp9_sad32x32_c;
+const sad_m_by_n_fn_t sad_16x16_c_vp9 = vp9_sad16x16_c;
+const sad_m_by_n_fn_t sad_8x16_c_vp9 = vp9_sad8x16_c;
+const sad_m_by_n_fn_t sad_16x8_c_vp9 = vp9_sad16x8_c;
+const sad_m_by_n_fn_t sad_8x8_c_vp9 = vp9_sad8x8_c;
+const sad_m_by_n_fn_t sad_8x4_c_vp9 = vp9_sad8x4_c;
+const sad_m_by_n_fn_t sad_4x8_c_vp9 = vp9_sad4x8_c;
+const sad_m_by_n_fn_t sad_4x4_c_vp9 = vp9_sad4x4_c;
+#endif
+const sad_m_by_n_test_param_t c_tests[] = {
+#if CONFIG_VP8_ENCODER
+ make_tuple(16, 16, sad_16x16_c),
+ make_tuple(8, 16, sad_8x16_c),
+ make_tuple(16, 8, sad_16x8_c),
+ make_tuple(8, 8, sad_8x8_c),
+ make_tuple(4, 4, sad_4x4_c),
+#endif
+#if CONFIG_VP9_ENCODER
+ make_tuple(64, 64, sad_64x64_c_vp9),
+ make_tuple(32, 32, sad_32x32_c_vp9),
+ make_tuple(16, 16, sad_16x16_c_vp9),
+ make_tuple(8, 16, sad_8x16_c_vp9),
+ make_tuple(16, 8, sad_16x8_c_vp9),
+ make_tuple(8, 8, sad_8x8_c_vp9),
+ make_tuple(8, 4, sad_8x4_c_vp9),
+ make_tuple(4, 8, sad_4x8_c_vp9),
+ make_tuple(4, 4, sad_4x4_c_vp9),
+#endif
+};
+INSTANTIATE_TEST_CASE_P(C, SADTest, ::testing::ValuesIn(c_tests));
+
+#if CONFIG_VP9_ENCODER
+const sad_n_by_n_by_4_fn_t sad_64x64x4d_c = vp9_sad64x64x4d_c;
+const sad_n_by_n_by_4_fn_t sad_64x32x4d_c = vp9_sad64x32x4d_c;
+const sad_n_by_n_by_4_fn_t sad_32x64x4d_c = vp9_sad32x64x4d_c;
+const sad_n_by_n_by_4_fn_t sad_32x32x4d_c = vp9_sad32x32x4d_c;
+const sad_n_by_n_by_4_fn_t sad_32x16x4d_c = vp9_sad32x16x4d_c;
+const sad_n_by_n_by_4_fn_t sad_16x32x4d_c = vp9_sad16x32x4d_c;
+const sad_n_by_n_by_4_fn_t sad_16x16x4d_c = vp9_sad16x16x4d_c;
+const sad_n_by_n_by_4_fn_t sad_16x8x4d_c = vp9_sad16x8x4d_c;
+const sad_n_by_n_by_4_fn_t sad_8x16x4d_c = vp9_sad8x16x4d_c;
+const sad_n_by_n_by_4_fn_t sad_8x8x4d_c = vp9_sad8x8x4d_c;
+const sad_n_by_n_by_4_fn_t sad_8x4x4d_c = vp9_sad8x4x4d_c;
+const sad_n_by_n_by_4_fn_t sad_4x8x4d_c = vp9_sad4x8x4d_c;
+const sad_n_by_n_by_4_fn_t sad_4x4x4d_c = vp9_sad4x4x4d_c;
+INSTANTIATE_TEST_CASE_P(C, SADx4Test, ::testing::Values(
+ make_tuple(64, 64, sad_64x64x4d_c),
+ make_tuple(64, 32, sad_64x32x4d_c),
+ make_tuple(32, 64, sad_32x64x4d_c),
+ make_tuple(32, 32, sad_32x32x4d_c),
+ make_tuple(32, 16, sad_32x16x4d_c),
+ make_tuple(16, 32, sad_16x32x4d_c),
+ make_tuple(16, 16, sad_16x16x4d_c),
+ make_tuple(16, 8, sad_16x8x4d_c),
+ make_tuple(8, 16, sad_8x16x4d_c),
+ make_tuple(8, 8, sad_8x8x4d_c),
+ make_tuple(8, 4, sad_8x4x4d_c),
+ make_tuple(4, 8, sad_4x8x4d_c),
+ make_tuple(4, 4, sad_4x4x4d_c)));
+#endif
// ARM tests
#if HAVE_MEDIA
@@ -216,31 +392,136 @@ INSTANTIATE_TEST_CASE_P(NEON, SADTest, ::testing::Values(
// X86 tests
#if HAVE_MMX
+#if CONFIG_VP8_ENCODER
const sad_m_by_n_fn_t sad_16x16_mmx = vp8_sad16x16_mmx;
const sad_m_by_n_fn_t sad_8x16_mmx = vp8_sad8x16_mmx;
const sad_m_by_n_fn_t sad_16x8_mmx = vp8_sad16x8_mmx;
const sad_m_by_n_fn_t sad_8x8_mmx = vp8_sad8x8_mmx;
const sad_m_by_n_fn_t sad_4x4_mmx = vp8_sad4x4_mmx;
-INSTANTIATE_TEST_CASE_P(MMX, SADTest, ::testing::Values(
- make_tuple(16, 16, sad_16x16_mmx),
- make_tuple(8, 16, sad_8x16_mmx),
- make_tuple(16, 8, sad_16x8_mmx),
- make_tuple(8, 8, sad_8x8_mmx),
- make_tuple(4, 4, sad_4x4_mmx)));
#endif
+#if CONFIG_VP9_ENCODER
+const sad_m_by_n_fn_t sad_16x16_mmx_vp9 = vp9_sad16x16_mmx;
+const sad_m_by_n_fn_t sad_8x16_mmx_vp9 = vp9_sad8x16_mmx;
+const sad_m_by_n_fn_t sad_16x8_mmx_vp9 = vp9_sad16x8_mmx;
+const sad_m_by_n_fn_t sad_8x8_mmx_vp9 = vp9_sad8x8_mmx;
+const sad_m_by_n_fn_t sad_4x4_mmx_vp9 = vp9_sad4x4_mmx;
+#endif
+
+const sad_m_by_n_test_param_t mmx_tests[] = {
+#if CONFIG_VP8_ENCODER
+ make_tuple(16, 16, sad_16x16_mmx),
+ make_tuple(8, 16, sad_8x16_mmx),
+ make_tuple(16, 8, sad_16x8_mmx),
+ make_tuple(8, 8, sad_8x8_mmx),
+ make_tuple(4, 4, sad_4x4_mmx),
+#endif
+#if CONFIG_VP9_ENCODER
+ make_tuple(16, 16, sad_16x16_mmx_vp9),
+ make_tuple(8, 16, sad_8x16_mmx_vp9),
+ make_tuple(16, 8, sad_16x8_mmx_vp9),
+ make_tuple(8, 8, sad_8x8_mmx_vp9),
+ make_tuple(4, 4, sad_4x4_mmx_vp9),
+#endif
+};
+INSTANTIATE_TEST_CASE_P(MMX, SADTest, ::testing::ValuesIn(mmx_tests));
+#endif
+
+#if HAVE_SSE
+#if CONFIG_VP9_ENCODER
+const sad_m_by_n_fn_t sad_4x4_sse_vp9 = vp9_sad4x4_sse;
+const sad_m_by_n_fn_t sad_4x8_sse_vp9 = vp9_sad4x8_sse;
+INSTANTIATE_TEST_CASE_P(SSE, SADTest, ::testing::Values(
+ make_tuple(4, 4, sad_4x4_sse_vp9),
+ make_tuple(4, 8, sad_4x8_sse_vp9)));
+
+const sad_n_by_n_by_4_fn_t sad_4x8x4d_sse = vp9_sad4x8x4d_sse;
+const sad_n_by_n_by_4_fn_t sad_4x4x4d_sse = vp9_sad4x4x4d_sse;
+INSTANTIATE_TEST_CASE_P(SSE, SADx4Test, ::testing::Values(
+ make_tuple(4, 8, sad_4x8x4d_sse),
+ make_tuple(4, 4, sad_4x4x4d_sse)));
+#endif
+#endif
+
#if HAVE_SSE2
+#if CONFIG_VP8_ENCODER
const sad_m_by_n_fn_t sad_16x16_wmt = vp8_sad16x16_wmt;
const sad_m_by_n_fn_t sad_8x16_wmt = vp8_sad8x16_wmt;
const sad_m_by_n_fn_t sad_16x8_wmt = vp8_sad16x8_wmt;
const sad_m_by_n_fn_t sad_8x8_wmt = vp8_sad8x8_wmt;
const sad_m_by_n_fn_t sad_4x4_wmt = vp8_sad4x4_wmt;
-INSTANTIATE_TEST_CASE_P(SSE2, SADTest, ::testing::Values(
- make_tuple(16, 16, sad_16x16_wmt),
- make_tuple(8, 16, sad_8x16_wmt),
- make_tuple(16, 8, sad_16x8_wmt),
- make_tuple(8, 8, sad_8x8_wmt),
- make_tuple(4, 4, sad_4x4_wmt)));
#endif
+#if CONFIG_VP9_ENCODER
+const sad_m_by_n_fn_t sad_64x64_sse2_vp9 = vp9_sad64x64_sse2;
+const sad_m_by_n_fn_t sad_32x32_sse2_vp9 = vp9_sad32x32_sse2;
+const sad_m_by_n_fn_t sad_16x16_sse2_vp9 = vp9_sad16x16_sse2;
+const sad_m_by_n_fn_t sad_8x16_sse2_vp9 = vp9_sad8x16_sse2;
+const sad_m_by_n_fn_t sad_16x8_sse2_vp9 = vp9_sad16x8_sse2;
+const sad_m_by_n_fn_t sad_8x8_sse2_vp9 = vp9_sad8x8_sse2;
+const sad_m_by_n_fn_t sad_8x4_sse2_vp9 = vp9_sad8x4_sse2;
+#endif
+const sad_m_by_n_test_param_t sse2_tests[] = {
+#if CONFIG_VP8_ENCODER
+ make_tuple(16, 16, sad_16x16_wmt),
+ make_tuple(8, 16, sad_8x16_wmt),
+ make_tuple(16, 8, sad_16x8_wmt),
+ make_tuple(8, 8, sad_8x8_wmt),
+ make_tuple(4, 4, sad_4x4_wmt),
+#endif
+#if CONFIG_VP9_ENCODER
+ make_tuple(64, 64, sad_64x64_sse2_vp9),
+ make_tuple(32, 32, sad_32x32_sse2_vp9),
+ make_tuple(16, 16, sad_16x16_sse2_vp9),
+ make_tuple(8, 16, sad_8x16_sse2_vp9),
+ make_tuple(16, 8, sad_16x8_sse2_vp9),
+ make_tuple(8, 8, sad_8x8_sse2_vp9),
+ make_tuple(8, 4, sad_8x4_sse2_vp9),
+#endif
+};
+INSTANTIATE_TEST_CASE_P(SSE2, SADTest, ::testing::ValuesIn(sse2_tests));
+
+#if CONFIG_VP9_ENCODER
+const sad_n_by_n_by_4_fn_t sad_64x64x4d_sse2 = vp9_sad64x64x4d_sse2;
+const sad_n_by_n_by_4_fn_t sad_64x32x4d_sse2 = vp9_sad64x32x4d_sse2;
+const sad_n_by_n_by_4_fn_t sad_32x64x4d_sse2 = vp9_sad32x64x4d_sse2;
+const sad_n_by_n_by_4_fn_t sad_32x32x4d_sse2 = vp9_sad32x32x4d_sse2;
+const sad_n_by_n_by_4_fn_t sad_32x16x4d_sse2 = vp9_sad32x16x4d_sse2;
+const sad_n_by_n_by_4_fn_t sad_16x32x4d_sse2 = vp9_sad16x32x4d_sse2;
+const sad_n_by_n_by_4_fn_t sad_16x16x4d_sse2 = vp9_sad16x16x4d_sse2;
+const sad_n_by_n_by_4_fn_t sad_16x8x4d_sse2 = vp9_sad16x8x4d_sse2;
+const sad_n_by_n_by_4_fn_t sad_8x16x4d_sse2 = vp9_sad8x16x4d_sse2;
+const sad_n_by_n_by_4_fn_t sad_8x8x4d_sse2 = vp9_sad8x8x4d_sse2;
+const sad_n_by_n_by_4_fn_t sad_8x4x4d_sse2 = vp9_sad8x4x4d_sse2;
+INSTANTIATE_TEST_CASE_P(SSE2, SADx4Test, ::testing::Values(
+ make_tuple(64, 64, sad_64x64x4d_sse2),
+ make_tuple(64, 32, sad_64x32x4d_sse2),
+ make_tuple(32, 64, sad_32x64x4d_sse2),
+ make_tuple(32, 32, sad_32x32x4d_sse2),
+ make_tuple(32, 16, sad_32x16x4d_sse2),
+ make_tuple(16, 32, sad_16x32x4d_sse2),
+ make_tuple(16, 16, sad_16x16x4d_sse2),
+ make_tuple(16, 8, sad_16x8x4d_sse2),
+ make_tuple(8, 16, sad_8x16x4d_sse2),
+ make_tuple(8, 8, sad_8x8x4d_sse2),
+ make_tuple(8, 4, sad_8x4x4d_sse2)));
+#endif
+#endif
+
+#if HAVE_SSE3
+#if CONFIG_VP8_ENCODER
+const sad_n_by_n_by_4_fn_t sad_16x16x4d_sse3 = vp8_sad16x16x4d_sse3;
+const sad_n_by_n_by_4_fn_t sad_16x8x4d_sse3 = vp8_sad16x8x4d_sse3;
+const sad_n_by_n_by_4_fn_t sad_8x16x4d_sse3 = vp8_sad8x16x4d_sse3;
+const sad_n_by_n_by_4_fn_t sad_8x8x4d_sse3 = vp8_sad8x8x4d_sse3;
+const sad_n_by_n_by_4_fn_t sad_4x4x4d_sse3 = vp8_sad4x4x4d_sse3;
+INSTANTIATE_TEST_CASE_P(SSE3, SADx4Test, ::testing::Values(
+ make_tuple(16, 16, sad_16x16x4d_sse3),
+ make_tuple(16, 8, sad_16x8x4d_sse3),
+ make_tuple(8, 16, sad_8x16x4d_sse3),
+ make_tuple(8, 8, sad_8x8x4d_sse3),
+ make_tuple(4, 4, sad_4x4x4d_sse3)));
+#endif
+#endif
+
#if HAVE_SSSE3
const sad_m_by_n_fn_t sad_16x16_sse3 = vp8_sad16x16_sse3;
INSTANTIATE_TEST_CASE_P(SSE3, SADTest, ::testing::Values(
diff --git a/libvpx/test/sixtap_predict_test.cc b/libvpx/test/sixtap_predict_test.cc
index 06f14a1..ee4faac 100644
--- a/libvpx/test/sixtap_predict_test.cc
+++ b/libvpx/test/sixtap_predict_test.cc
@@ -12,11 +12,13 @@
#include <stdlib.h>
#include <string.h>
#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
#include "test/util.h"
#include "third_party/googletest/src/include/gtest/gtest.h"
extern "C" {
#include "./vpx_config.h"
-#include "./vpx_rtcd.h"
+#include "./vp8_rtcd.h"
#include "vpx/vpx_integer.h"
#include "vpx_mem/vpx_mem.h"
}
@@ -47,6 +49,10 @@ class SixtapPredictTest : public PARAMS(int, int, sixtap_predict_fn_t) {
dst_c_ = NULL;
}
+ virtual void TearDown() {
+ libvpx_test::ClearSystemState();
+ }
+
protected:
// Make test arrays big enough for 16x16 functions. Six-tap filters
// need 5 extra pixels outside of the macroblock.
@@ -60,9 +66,9 @@ class SixtapPredictTest : public PARAMS(int, int, sixtap_predict_fn_t) {
width_ = GET_PARAM(0);
height_ = GET_PARAM(1);
sixtap_predict_ = GET_PARAM(2);
- memset(src_, 0, sizeof(src_));
- memset(dst_, 0, sizeof(dst_));
- memset(dst_c_, 0, sizeof(dst_c_));
+ memset(src_, 0, kSrcSize);
+ memset(dst_, 0, kDstSize);
+ memset(dst_c_, 0, kDstSize);
}
int width_;
@@ -136,8 +142,8 @@ TEST_P(SixtapPredictTest, TestWithPresetData) {
uint8_t *src = const_cast<uint8_t*>(test_data);
- sixtap_predict_(&src[kSrcStride * 2 + 2 + 1], kSrcStride,
- 2, 2, dst_, kDstStride);
+ REGISTER_STATE_CHECK(sixtap_predict_(&src[kSrcStride * 2 + 2 + 1], kSrcStride,
+ 2, 2, dst_, kDstStride));
for (int i = 0; i < height_; ++i)
for (int j = 0; j < width_; ++j)
@@ -162,8 +168,9 @@ TEST_P(SixtapPredictTest, TestWithRandomData) {
xoffset, yoffset, dst_c_, kDstStride);
// Run test.
- sixtap_predict_(&src_[kSrcStride * 2 + 2 + 1], kSrcStride,
- xoffset, yoffset, dst_, kDstStride);
+ REGISTER_STATE_CHECK(
+ sixtap_predict_(&src_[kSrcStride * 2 + 2 + 1], kSrcStride,
+ xoffset, yoffset, dst_, kDstStride));
for (int i = 0; i < height_; ++i)
for (int j = 0; j < width_; ++j)
diff --git a/libvpx/test/subtract_test.cc b/libvpx/test/subtract_test.cc
index 99363de..81bfb66 100644
--- a/libvpx/test/subtract_test.cc
+++ b/libvpx/test/subtract_test.cc
@@ -10,9 +10,11 @@
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
extern "C" {
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vp8/common/blockd.h"
#include "vp8/encoder/block.h"
#include "vpx_mem/vpx_mem.h"
@@ -22,7 +24,12 @@ typedef void (*subtract_b_fn_t)(BLOCK *be, BLOCKD *bd, int pitch);
namespace {
-class SubtractBlockTest : public ::testing::TestWithParam<subtract_b_fn_t> {};
+class SubtractBlockTest : public ::testing::TestWithParam<subtract_b_fn_t> {
+ public:
+ virtual void TearDown() {
+ libvpx_test::ClearSystemState();
+ }
+};
using libvpx_test::ACMRandom;
@@ -77,7 +84,7 @@ TEST_P(SubtractBlockTest, SimpleSubtract) {
predictor += kDiffPredStride;
}
- GetParam()(&be, &bd, kDiffPredStride);
+ REGISTER_STATE_CHECK(GetParam()(&be, &bd, kDiffPredStride));
base_src = *be.base_src;
src_diff = be.src_diff;
diff --git a/libvpx/test/superframe_test.cc b/libvpx/test/superframe_test.cc
new file mode 100644
index 0000000..062ec6c
--- /dev/null
+++ b/libvpx/test/superframe_test.cc
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <climits>
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
+#include "test/encode_test_driver.h"
+#include "test/i420_video_source.h"
+#include "test/util.h"
+
+namespace {
+
+class SuperframeTest : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
+ protected:
+ SuperframeTest() : EncoderTest(GET_PARAM(0)), modified_buf_(NULL),
+ last_sf_pts_(0) {}
+
+ virtual void SetUp() {
+ InitializeConfig();
+ SetMode(GET_PARAM(1));
+ sf_count_ = 0;
+ sf_count_max_ = INT_MAX;
+ }
+
+ virtual void TearDown() {
+ delete[] modified_buf_;
+ }
+
+ virtual bool Continue() const {
+ return !HasFatalFailure() && !abort_;
+ }
+
+ virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video,
+ libvpx_test::Encoder *encoder) {
+ if (video->frame() == 1) {
+ encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
+ }
+ }
+
+ virtual const vpx_codec_cx_pkt_t * MutateEncoderOutputHook(
+ const vpx_codec_cx_pkt_t *pkt) {
+ if (pkt->kind != VPX_CODEC_CX_FRAME_PKT)
+ return pkt;
+
+ const uint8_t *buffer = reinterpret_cast<uint8_t*>(pkt->data.frame.buf);
+ const uint8_t marker = buffer[pkt->data.frame.sz - 1];
+ const int frames = (marker & 0x7) + 1;
+ const int mag = ((marker >> 3) & 3) + 1;
+ const unsigned int index_sz = 2 + mag * frames;
+ if ((marker & 0xe0) == 0xc0 &&
+ pkt->data.frame.sz >= index_sz &&
+ buffer[pkt->data.frame.sz - index_sz] == marker) {
+ // frame is a superframe. strip off the index.
+ if (modified_buf_)
+ delete[] modified_buf_;
+ modified_buf_ = new uint8_t[pkt->data.frame.sz - index_sz];
+ memcpy(modified_buf_, pkt->data.frame.buf,
+ pkt->data.frame.sz - index_sz);
+ modified_pkt_ = *pkt;
+ modified_pkt_.data.frame.buf = modified_buf_;
+ modified_pkt_.data.frame.sz -= index_sz;
+
+ sf_count_++;
+ last_sf_pts_ = pkt->data.frame.pts;
+ return &modified_pkt_;
+ }
+
+ // Make sure we do a few frames after the last SF
+ abort_ |= sf_count_ > sf_count_max_ &&
+ pkt->data.frame.pts - last_sf_pts_ >= 5;
+ return pkt;
+ }
+
+ int sf_count_;
+ int sf_count_max_;
+ vpx_codec_cx_pkt_t modified_pkt_;
+ uint8_t *modified_buf_;
+ vpx_codec_pts_t last_sf_pts_;
+};
+
+TEST_P(SuperframeTest, TestSuperframeIndexIsOptional) {
+ sf_count_max_ = 0; // early exit on successful test.
+ cfg_.g_lag_in_frames = 25;
+
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ 30, 1, 0, 40);
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ EXPECT_EQ(sf_count_, 1);
+}
+
+VP9_INSTANTIATE_TEST_CASE(SuperframeTest, ::testing::Values(
+ ::libvpx_test::kTwoPassGood));
+} // namespace
diff --git a/libvpx/test/test-data.sha1 b/libvpx/test/test-data.sha1
index c1b6a83..1036d7c 100644
--- a/libvpx/test/test-data.sha1
+++ b/libvpx/test/test-data.sha1
@@ -1,4 +1,5 @@
d5dfb0151c9051f8c85999255645d7a23916d3c0 hantro_collage_w352h288.yuv
+b87815bf86020c592ccc7a846ba2e28ec8043902 hantro_odd.yuv
5184c46ddca8b1fadd16742e8500115bc8f749da vp80-00-comprehensive-001.ivf
65bf1bbbced81b97bd030f376d1b7f61a224793f vp80-00-comprehensive-002.ivf
906b4c1e99eb734504c504b3f1ad8052137ce672 vp80-00-comprehensive-003.ivf
@@ -120,4 +121,224 @@ f95eb6214571434f1f73ab7833b9ccdf47588020 vp80-03-segmentation-1437.ivf.md5
41d70bb5fa45bc88da1604a0af466930b8dd77b5 vp80-05-sharpness-1438.ivf.md5
086c56378df81b6cee264d7540a7b8f2b405c7a4 vp80-05-sharpness-1439.ivf.md5
d32dc2c4165eb266ea4c23c14a45459b363def32 vp80-05-sharpness-1440.ivf.md5
-8c69dc3d8e563f56ffab5ad1e400d9e689dd23df vp80-05-sharpness-1443.ivf.md5 \ No newline at end of file
+8c69dc3d8e563f56ffab5ad1e400d9e689dd23df vp80-05-sharpness-1443.ivf.md5
+c5b6fc822d7b4ed97b5a0d69e3a71d9de6cab815 vp90-00-akiyo-100.webm
+1cd8ee73b53f4ecc2511effd233f9af6ecdfac7e vp90-00-akiyo-100.webm.md5
+a854b0f2313efde7767a4465afbcbe35005ffb07 vp90-00-akiyo-200.webm
+b0f53ad309611246821174b642f6808cc1e670de vp90-00-akiyo-200.webm.md5
+38a5c0e5465f884474b1a5a9184685f17f961ba1 vp90-00-akiyo-300.webm
+756a34417fc10dc2a49464eccaa6b7f987227b57 vp90-00-akiyo-300.webm.md5
+1047e6f19dd137ae7bbd5b93d407fc7186f8a98e vp90-00-akiyo-50.webm
+0fa08a76901a6a5b2d4b58a6b20bfa5239409b9d vp90-00-akiyo-50.webm.md5
+767511b25dde2c5926f5284782a9f1e04fe7afda vp90-00-bowing-150.webm
+b259c3c6afb30fd1ae7d3a563c1fe9fe6a4644cd vp90-00-bowing-150.webm.md5
+2ef831c75c021a03176536fb652196e9afc37888 vp90-00-bowing-25.webm
+37d3522cd76b7bab3b5e973e2b2c51edea49ef3f vp90-00-bowing-25.webm.md5
+c1e4639f14914516ca704f38c875d01f4c06be14 vp90-00-bowing-400.webm
+ca35c574512185d5f20f3b81517d6ac3333a1377 vp90-00-bowing-400.webm.md5
+e20fc293db095e52f29b891bc09458e7568e8603 vp90-00-bus-100.webm
+a754ea588cc409546936c09fb1ad06b3014b94f9 vp90-00-bus-100.webm.md5
+da5eb45fa42f55ff70ec7b71999e6fd8489d12f9 vp90-00-bus-2000.webm
+2a7356328eb991175cbddebd51a30018e48632f2 vp90-00-bus-2000.webm.md5
+607169c774664176aca7c7d46dabf04b9c3634e4 vp90-00-bus-300.webm
+c84daa3a0290d73226b243dd630820ac97bf4fbd vp90-00-bus-300.webm.md5
+655902b54b9a8a882c11bc8bce1447f3b2085035 vp90-00-bus-4400.webm
+f719ecd7b53c8e35fae735396629d1915ffc1ff9 vp90-00-bus-4400.webm.md5
+afcdca9763d233dd63fd67165a7b92ea679822af vp90-00-bus-800.webm
+66e2a55560e570cae09520060f1ae315c7ea0a07 vp90-00-bus-800.webm.md5
+390b91c8566d94c3a869af77531585c38f9f78da vp90-00-cheer-1600.webm
+3d47da26375a75afef0cf2123f5c808d0862e25d vp90-00-cheer-1600.webm.md5
+23419784db17a50e129e3bd030c20256cf0d6eb0 vp90-00-cheer-2800.webm
+0df4676171f19e7807d719a9b8a6fadcefc8f1fc vp90-00-cheer-2800.webm.md5
+45ed3c42874d5ec88852798691cf54bfb0cf652a vp90-00-cheer-400.webm
+374fd67ac9ae0e8146051b77963459c54b9eaaa2 vp90-00-cheer-400.webm.md5
+1c9459d824116a297ff0e90bed9be783005f9ac1 vp90-00-cheer-600.webm
+9dc0d43f72c8eb49d51a9748fb9948495529a6b5 vp90-00-cheer-600.webm.md5
+a86c5af1929d2f929a5caf6ef847d0066086223b vp90-00-city-1200.webm
+231c7f0f406e3a8d2328daee4c4466e1b4d47354 vp90-00-city-1200.webm.md5
+be9cf927e6ab517d7876925d21b3193b1373d03d vp90-00-city-2000.webm
+487d60226a3a3039528a049e9c6e8243b07404e6 vp90-00-city-2000.webm.md5
+1f3cd649d5829d52c08da3323baa86b1dcf2d2de vp90-00-city-300.webm
+8e3b38cfa2be757e46ea12cff11762cb50134615 vp90-00-city-300.webm.md5
+286f6ea64c33ce735b5b7806aac4ca5ee331af66 vp90-00-city-600.webm
+7c51ead147ef4029094a2b455239090c1999d8fe vp90-00-city-600.webm.md5
+f7ecbd63bed06ed15afe0ba2a192f2cf7943714c vp90-00-coastguard-1200.webm
+8c8fed2c64cc8fb330e9200e1e0f58a79b953b79 vp90-00-coastguard-1200.webm.md5
+2e63178e5b2c2cc84226df2b514c4dde46c32d70 vp90-00-coastguard-200.webm
+128f2b22fdcfd02bc50e63b1cd6d40c0cc4998d6 vp90-00-coastguard-200.webm.md5
+97b779617d3c1ca8f50beda7126be5df913d071d vp90-00-coastguard-3600.webm
+0da0ab4794439e6b8ab9ced41239e1307686be69 vp90-00-coastguard-3600.webm.md5
+5e060d66573a40f7f0a46ae9b6acb51b0afb2e3c vp90-00-coastguard-5200.webm
+4ba526d4bb895c4794dc20edeb38b102a9b1bd92 vp90-00-coastguard-5200.webm.md5
+17810fa737f29d5b032836e38243bbb666f06636 vp90-00-container-1000.webm
+7e0fd7e93c5a16394818f844aa5f2d5fa7a73ee2 vp90-00-container-1000.webm.md5
+38deb4f59cec9e62715dec2f3670ffe7b1cf493e vp90-00-container-200.webm
+aa3229017f920750bd5d919e19ea6127ea05adc0 vp90-00-container-200.webm.md5
+8b1a67ef35d3f00981d23c41b56a0a2e09976312 vp90-00-container-50.webm
+0a6f1a793b936ff1287326882f1165065a2dcea0 vp90-00-container-50.webm.md5
+4c724db691b7202b60b56107ec7b0abc6cc52bdc vp90-00-deadline-1000.webm
+5903bd89be457be681a6c6c8fd8c19f4570173db vp90-00-deadline-1000.webm.md5
+ee5e19a8fe14d3e72b1314a012b49a3bc0586375 vp90-00-deadline-200.webm
+77095f98406fa27a2da8661f21664c00292dcefc vp90-00-deadline-200.webm.md5
+8230b07aa0ee7adf3caabae4e3bef997929001eb vp90-00-deadline-50.webm
+fc47a159b2d2b0bed93d4e2c35408243e70b6d24 vp90-00-deadline-50.webm.md5
+244d12cda51235dcc421fedbe12422b326f539e7 vp90-00-flower-100.webm
+dfeca236450b5ff19c1558ad33fba7ab7ff75f27 vp90-00-flower-100.webm.md5
+d5b7057564f670f7bf82017e2abc3aed5656b810 vp90-00-flower-2000.webm
+65118811f4d46ef1e911d520296731536d3a507e vp90-00-flower-2000.webm.md5
+a9c226643365f0c8ae03e780d55aa6c6fa9cc0e7 vp90-00-flower-300.webm
+fa5193d1a6e6b9e8bb91f75e91a3a377f00fa42e vp90-00-flower-300.webm.md5
+b206284b51dec6219c46e9b03def38a94d91bf89 vp90-00-flower-4400.webm
+c8a73acd8234b287e86465d03fbf4f886d1fefb2 vp90-00-flower-4400.webm.md5
+faff83d7b6aa89f5d9518ffc5d4b145eb02b6800 vp90-00-flower-800.webm
+328dd1969804afc094d010f54f350bd05390d6a9 vp90-00-flower-800.webm.md5
+42caa40d3b76b8ae5e7573b95e09bc4e57bea835 vp90-00-football-1600.webm
+167b8f58a85d83050d4c56391d6b2d9a9a205b9a vp90-00-football-1600.webm.md5
+4c4f93f594a8ef89a9ba903bbcff914022a5ad9d vp90-00-football-2800.webm
+7995f7f91b13d4ab5badcd3f9282bd1fceba38f3 vp90-00-football-2800.webm.md5
+c3ff724e79b4ae0202929f3ed1a1a5b67d10901f vp90-00-football-400.webm
+19164a0e58ca5d407282a867866e8ec4a0a08fea vp90-00-football-400.webm.md5
+95de1c4abceab3706f0225e3b9c5dc719901a6cf vp90-00-football-600.webm
+4a4454ae4d65748a45eaa3decb783bbe0ba190dc vp90-00-football-600.webm.md5
+80eebcdae76459c00d14b6c50f7529377e53a1c2 vp90-00-foreman-1200.webm
+8228cc5a7cc83970b3a65f9b49bc74733255b09c vp90-00-foreman-1200.webm.md5
+601d0ff4f058a3da3af4409e4117795f7c231fda vp90-00-foreman-2000.webm
+e0c0b0aa6f9597984a2d78e799a00e0052710b2c vp90-00-foreman-2000.webm.md5
+30ebc327645d68bcc83eab72610bba22f877fb4c vp90-00-foreman-300.webm
+080fc2adf29a84f02a3e4b5508fc2f8dc32f1440 vp90-00-foreman-300.webm.md5
+6b1a6be0f7bd7605b565750b3080be397d4c48a0 vp90-00-foreman-600.webm
+f7713d3eba8d34d511ba1c9585a5a3f34e133ba5 vp90-00-foreman-600.webm.md5
+b080d9786abc89b4be59bffc5baba7b42fbc286a vp90-00-hallmonitor-1200.webm
+77be47800b58001eb7a854d4d4a9b9823bbbe158 vp90-00-hallmonitor-1200.webm.md5
+05cd8e8d58ab8311ad528c27b4c89cdf268e749b vp90-00-hallmonitor-2000.webm
+de1aa35c7172e78e07d6b197280214bbd362cc4e vp90-00-hallmonitor-2000.webm.md5
+908676b32b190e956518bb742d1415efceeb8c75 vp90-00-hallmonitor-300.webm
+f9d39866db341d18256339e9fd2c0ec296f47702 vp90-00-hallmonitor-300.webm.md5
+1307c7f7558de34a6230912e684ff9571a05db5f vp90-00-hallmonitor-600.webm
+954b292dd56be5c1bf153df440b132e1b1fbcb68 vp90-00-hallmonitor-600.webm.md5
+05f556288c5c4211420f7c332daded816f9b31b7 vp90-00-harbour-1200.webm
+399481f93cc252f20ad5141dd402cf5363673578 vp90-00-harbour-1200.webm.md5
+fa62e449485c544c281030c5ccff32c60d4dd169 vp90-00-harbour-200.webm
+3d0e1885befb2493c477384917797164d4fe58e4 vp90-00-harbour-200.webm.md5
+fa3a5e563c3d2215703c1a68f71fbe2168a42468 vp90-00-harbour-3600.webm
+9af392f6b2cb5ec5c9446b7262206773df535319 vp90-00-harbour-3600.webm.md5
+476db4b15989a5a078f1d2fc5f9734d1d24f1da1 vp90-00-harbour-5200.webm
+352a05b179dc1f86cf6ce27494a4a8fb42379d72 vp90-00-harbour-5200.webm.md5
+0ea17a4892383a2fd0be9f88f213f5f48f2a61f4 vp90-00-highway-100.webm
+a2fe942955bafa83295d1381c9a25264764924c5 vp90-00-highway-100.webm.md5
+7ab80485670a5343a74c4a2454761ed3bed7ceef vp90-00-highway-1600.webm
+fda9c82cb5d28a5ff5f7dae7c537e9187dfbd4cc vp90-00-highway-1600.webm.md5
+162d42e033dad04fd7ae3bf9d39e9e204c022edc vp90-00-highway-2800.webm
+b882c93a2dc89feb6090b0f72e67ac8a59fc0986 vp90-00-highway-2800.webm.md5
+79b9a0e6fa6cdd2367228e9ac8d6a369a8d647e6 vp90-00-highway-50.webm
+80ecf926372dbe8c1b4bcd68ea2101f78a93b02e vp90-00-highway-50.webm.md5
+a67fd02cbb75c1a757b5ea56b9eee46069bfadbf vp90-00-husky-100.webm
+12cd583e791c8e5b40b5dffe4a9dbcc1929dc645 vp90-00-husky-100.webm.md5
+1a8b4302eb6f88b14a9acd4a6cbe62d0b380f2e4 vp90-00-husky-2000.webm
+a9c2532e5d867d7627bb6767008b43b653cce904 vp90-00-husky-2000.webm.md5
+f56f66afd4d4512a49904275a1c942ba7379fec4 vp90-00-husky-300.webm
+196dc386f104b7b9ed2ec6c6a1f104ce0319c2eb vp90-00-husky-300.webm.md5
+6ba3c16fd98d37a8de7023419682a3595778b9bc vp90-00-husky-4400.webm
+2f4815ba97e352fcd0089d1a5883a0aff1e5394a vp90-00-husky-4400.webm.md5
+db04a296c377693dd6e974bea36256f4b14cddef vp90-00-husky-800.webm
+7658473ad17ee689a37fda558c5a23816131cfc3 vp90-00-husky-800.webm.md5
+50cf9e34b61e1cf32c9dde2ebcc5f5703c379a41 vp90-00-ice-150.webm
+806ceba91dc40c45eafc4d7ee61df9346c6fe5f9 vp90-00-ice-150.webm.md5
+4cfca1bea7aae6e4405abfca603cfbded13ded1a vp90-00-ice-400.webm
+e4298abf05419973da89c0bfcdf0006b1606ebcd vp90-00-ice-400.webm.md5
+12e3ccfdf96c3f4eebeed8106c5daef6c2b28d83 vp90-00-ice-800.webm
+6fb2aacb4d8131dcabaa61a9cd2497cd09854377 vp90-00-ice-800.webm.md5
+124977938c47ba739e918533bc5d6d73e41ce2ec vp90-00-mobile-1600.webm
+603b2b523c8ed5922121d285567a845bb6693d35 vp90-00-mobile-1600.webm.md5
+93f204b90250791b884479be5da534a5bc6304ff vp90-00-mobile-2800.webm
+21ec8735b774c66e192f7270c12075f598f700d5 vp90-00-mobile-2800.webm.md5
+fe9cdbfdeee2b7554efb532f646703cff55c2d2c vp90-00-mobile-400.webm
+4def63c78ee09e90e6385d3122ada95343246102 vp90-00-mobile-400.webm.md5
+2a042aa8a06c45770dcb52c56a7f5cea6d51b8dd vp90-00-mobile-600.webm
+03169f031dece0db3d89ce16cc3e0ee3eca21065 vp90-00-mobile-600.webm.md5
+7fc5b0b0c684d63e161c9c5932e1374327e15dd4 vp90-00-motherdaughter-100.webm
+290ac7722caf4b15136b307a239c9b903113b9c4 vp90-00-motherdaughter-100.webm.md5
+67ddfce82bff083a1ceb108a7dcfb801791102f1 vp90-00-motherdaughter-300.webm
+7696698d38e32f0afeb3a3e9a45b7fe3f237aaba vp90-00-motherdaughter-300.webm.md5
+ff65a1bee2fe384728017c5148df61379043d5b6 vp90-00-motherdaughter-600.webm
+f0b167000bf40877d1ba7ba52a08b4310011c032 vp90-00-motherdaughter-600.webm.md5
+d73c54e676bd63424fc9ad8d0cef64e929081cf4 vp90-00-news-100.webm
+71821b71a97823e9ba58563efc841dc6beefe9df vp90-00-news-100.webm.md5
+2937238d094863951eb8f218438b966d2b7b5430 vp90-00-news-300.webm
+2587d0859a330cf6d8e0a135d1f586bb2a5033fc vp90-00-news-300.webm.md5
+65afdd4fc411951115b48435b8b65155594b5c99 vp90-00-news-600.webm
+5815bb341db976f44dab97bb9cfba8ea0ca55502 vp90-00-news-600.webm.md5
+de5dd99ac04d3a937fc0951d06fb8f533fdc393a vp90-00-pamphlet-150.webm
+0381d705fa490f35c772e3048b423b382088d546 vp90-00-pamphlet-150.webm.md5
+46f283284cb64b79243b2ea6aad709a526c26393 vp90-00-pamphlet-25.webm
+f100fbebcad96f27ed8f340414b939bc738d49d0 vp90-00-pamphlet-25.webm.md5
+8df04ece12455c5c40f14cb089348260798c5f2b vp90-00-pamphlet-400.webm
+66a2c87cd4194368d3477e9a334880b76c87e991 vp90-00-pamphlet-400.webm.md5
+a00e97e4a71f5e24f194c59cde7d41bc2c3af325 vp90-00-paris-1000.webm
+53ef896e16d1b83aa5166945d149c7133401b3f0 vp90-00-paris-1000.webm.md5
+6b03388e0236f6171e20c73834858e3c87b441b2 vp90-00-paris-200.webm
+55a324b0153c5d54cd0c0492fed8755c441fa18c vp90-00-paris-200.webm.md5
+429ec362a9600c8822652cf7e122e22bca033d69 vp90-00-paris-50.webm
+4406226b7bddb11ede8ee0c442d52e5d3bbbde78 vp90-00-paris-50.webm.md5
+a7996d4e757ea484aa72e14f623d6c9e72537888 vp90-00-signirene-1000.webm
+f65a1ac6e1ce77102e63fb363dbca361b8108c02 vp90-00-signirene-1000.webm.md5
+8c2f686179bc3e87a18b48bcb5058f3cd61e1b4c vp90-00-signirene-200.webm
+b8ab16cba9392e49169c374eb1e0c1b763ccaefb vp90-00-signirene-200.webm.md5
+5f8f99c386dce64931bbd4fc42a59a78dc6fdba1 vp90-00-signirene-50.webm
+fdb8c4bc302884d413a256634d3e2fbd92867c90 vp90-00-signirene-50.webm.md5
+d5074f0a5bcefe9fd651afbbebf0e0f3fedb965b vp90-00-silent-1000.webm
+9c075894fbfb84791fcc7dbd3fcab15b0a9bd64e vp90-00-silent-1000.webm.md5
+32101f334f675715a8f411638dfda80afacc37a6 vp90-00-silent-200.webm
+fb0dac37f31ca711443832046a6aaf868e69b357 vp90-00-silent-200.webm.md5
+0aaef50d7f94873e99ec7e39f59a6b74e92ad946 vp90-00-silent-50.webm
+be9fc41965b5b63f7c7bbd6c91191e940903e012 vp90-00-silent-50.webm.md5
+5e22ad14c562733d4d4a3ce163b580ed4a64e6fe vp90-00-soccer-100.webm
+1ca9a0016910cfca26def9944568749a168131d8 vp90-00-soccer-100.webm.md5
+2d9b2a0fa5ac210f8d7c646578698e045733ad4a vp90-00-soccer-2000.webm
+f979078650057606ca770b3f03be4c509efb40a9 vp90-00-soccer-2000.webm.md5
+7b789360ffc1eb5a3735f8a1f8d248a24ca4267c vp90-00-soccer-300.webm
+195d33b23ca8304519bd6e38e9657e53a04779d8 vp90-00-soccer-300.webm.md5
+3907318ef35573e4efc5c150d3aff271c7157501 vp90-00-soccer-4400.webm
+4b43ceecae9a9a7d39a47347f9e20af3613827d1 vp90-00-soccer-4400.webm.md5
+c89920aa89194cb6a36f77dff8722573f0df7241 vp90-00-soccer-800.webm
+1da71751009afa483a03e274a538df24c9f5e513 vp90-00-soccer-800.webm.md5
+efca14e8e0515a8f8ed3ded11fdbff24b09a7f9d vp90-00-stefan-1600.webm
+6f103270ce03cc85b28dd1c86d0447922d810671 vp90-00-stefan-1600.webm.md5
+b99ab6a983d48c15aa3a9160d06286fca0074193 vp90-00-stefan-2800.webm
+986a72dd9988c6bf4246cd5bd966ce991ba55319 vp90-00-stefan-2800.webm.md5
+eb962244ca51a101ad8f585df6be8f5f96691f18 vp90-00-stefan-400.webm
+2747cfd8f74aedc370767f08129b35ace70e1fe7 vp90-00-stefan-400.webm.md5
+b507b8cedd0147c5316db8f84f35ace768c25069 vp90-00-stefan-600.webm
+daeb369046c2dc27ecfde978b87fd8b49d83789f vp90-00-stefan-600.webm.md5
+c5c2dd891c2b5fe4a70845858ccb859df3455ee7 vp90-00-students-100.webm
+d1be06dc636ece0c34ab8c17399888aaf19e0c19 vp90-00-students-100.webm.md5
+c9e4da3a8b455aa690d89338f32f9d76773cdd18 vp90-00-students-300.webm
+a9aa72e1ee27063f8e9f13b4647cec01c8efb2d6 vp90-00-students-300.webm.md5
+e9e5072cd944a8994e50fce367975e3ce526bd67 vp90-00-students-600.webm
+86525ce188a98a51f86fad27341729bb61d1ca8b vp90-00-students-600.webm.md5
+58deb053aeafefdfdf13741accf9fcbe4584ea94 vp90-00-tempete-1200.webm
+ec395a2ec76b4c1e64e243366a8840da22ee3a65 vp90-00-tempete-1200.webm.md5
+5d35232eaa8ee149a917ff94536968fb37dad50e vp90-00-tempete-200.webm
+7f8c7529f40d6b6d6de8e89dbf9697623d27c234 vp90-00-tempete-200.webm.md5
+c44eb147bc3f8682b96096fccef8beb4380c40db vp90-00-tempete-3600.webm
+01fd23e412530fa2d5319a22886161957a747ee0 vp90-00-tempete-3600.webm.md5
+56ab322b34a750e16dcc8ccfb735a5b9270cedc4 vp90-00-tempete-5200.webm
+1cf803409ae53b991bff10079af4ab07aaa2853d vp90-00-tempete-5200.webm.md5
+ffe48d52019c228e919f4b123028664b8d0c2f4b vp90-00-tennis-100.webm
+406fda3367899995d4e37170063495832e2be372 vp90-00-tennis-100.webm.md5
+6c030f8142b1932fbe8eb5c2b39b3452a5eea3aa vp90-00-tennis-2000.webm
+dcf20921e2a8ab0dcd09f7f6bdcdd35f979205ae vp90-00-tennis-2000.webm.md5
+3fe0df7b74f301b39e1b21e6926c69a8418b9b70 vp90-00-tennis-300.webm
+80c8301d3a37b33ca50318ba000066a6ae9929dc vp90-00-tennis-300.webm.md5
+82a2497083b8dce6b1c73bcdf16323ea69d1cca9 vp90-00-tennis-4400.webm
+83ce97bc09a7e1b2f2c3437195a8931d7608a62b vp90-00-tennis-4400.webm.md5
+2c8bd3a29bbd1085169bfcba9fdf65a37f4a16bb vp90-00-tennis-800.webm
+9920a65e06d2e7025f13f3d8bf35670503875aed vp90-00-tennis-800.webm.md5
+26469062c5724c2cc4914436ef032bb55373f843 vp90-00-waterfall-150.webm
+9b86373ce15302a9b22cef8f808ce0e37e6d2b65 vp90-00-waterfall-150.webm.md5
+410ba6af2ddca5110fa7a4c383dc8b28f38cf565 vp90-00-waterfall-200.webm
+251892d3fdcbc9d7a20c22ba202ed4935222e5b8 vp90-00-waterfall-200.webm.md5
+40b643aff88aed3764c5b58c446a8fbbc5fb36d7 vp90-00-waterfall-400.webm
+51f31a6b6408f8af4d107e0f2a3c1a274d4da6bb vp90-00-waterfall-400.webm.md5
+bd421141e01f53dc15ced790f9a96ab70a613260 vp90-00-waterfall-800.webm
+1366efe772fccaa2b8a6ac3ce45255b312a2ef6c vp90-00-waterfall-800.webm.md5
diff --git a/libvpx/test/test.mk b/libvpx/test/test.mk
index 7a11a27..806901d 100644
--- a/libvpx/test/test.mk
+++ b/libvpx/test/test.mk
@@ -1,5 +1,9 @@
-LIBVPX_TEST_SRCS-yes += acm_random.h
+LIBVPX_TEST_SRCS-yes += clear_system_state.h
+LIBVPX_TEST_SRCS-yes += register_state_check.h
LIBVPX_TEST_SRCS-yes += test.mk
+LIBVPX_TEST_SRCS-yes += acm_random.h
+LIBVPX_TEST_SRCS-yes += md5_helper.h
+LIBVPX_TEST_SRCS-yes += codec_factory.h
LIBVPX_TEST_SRCS-yes += test_libvpx.cc
LIBVPX_TEST_SRCS-yes += util.h
LIBVPX_TEST_SRCS-yes += video_source.h
@@ -13,18 +17,32 @@ LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += altref_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += config_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += cq_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += datarate_test.cc
-LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += encode_test_driver.cc
-LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += encode_test_driver.h
-LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += error_resilience_test.cc
-LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += i420_video_source.h
+
+LIBVPX_TEST_SRCS-yes += encode_test_driver.cc
+LIBVPX_TEST_SRCS-yes += encode_test_driver.h
+LIBVPX_TEST_SRCS-$(CONFIG_ENCODERS) += error_resilience_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_ENCODERS) += i420_video_source.h
LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += keyframe_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += borders_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += resize_test.cc
-LIBVPX_TEST_SRCS-$(CONFIG_VP8_DECODER) += ../md5_utils.h ../md5_utils.c
-LIBVPX_TEST_SRCS-$(CONFIG_VP8_DECODER) += decode_test_driver.cc
-LIBVPX_TEST_SRCS-$(CONFIG_VP8_DECODER) += decode_test_driver.h
-LIBVPX_TEST_SRCS-$(CONFIG_VP8_DECODER) += ivf_video_source.h
-LIBVPX_TEST_SRCS-$(CONFIG_VP8_DECODER) += test_vector_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_DECODERS) += ../md5_utils.h ../md5_utils.c
+LIBVPX_TEST_SRCS-yes += decode_test_driver.cc
+LIBVPX_TEST_SRCS-yes += decode_test_driver.h
+LIBVPX_TEST_SRCS-$(CONFIG_DECODERS) += ivf_video_source.h
+
+## WebM Parsing
+NESTEGG_SRCS += ../nestegg/halloc/halloc.h
+NESTEGG_SRCS += ../nestegg/halloc/src/align.h
+NESTEGG_SRCS += ../nestegg/halloc/src/halloc.c
+NESTEGG_SRCS += ../nestegg/halloc/src/hlist.h
+NESTEGG_SRCS += ../nestegg/include/nestegg/nestegg.h
+NESTEGG_SRCS += ../nestegg/src/nestegg.c
+LIBVPX_TEST_SRCS-$(CONFIG_DECODERS) += $(NESTEGG_SRCS)
+LIBVPX_TEST_SRCS-$(CONFIG_DECODERS) += webm_video_source.h
+
+LIBVPX_TEST_SRCS-$(CONFIG_DECODERS) += test_vector_test.cc
+
##
## WHITE BOX TESTS
##
@@ -33,19 +51,50 @@ LIBVPX_TEST_SRCS-$(CONFIG_VP8_DECODER) += test_vector_test.cc
##
ifeq ($(CONFIG_SHARED),)
+## VP8
+ifneq ($(CONFIG_VP8_ENCODER)$(CONFIG_VP8_DECODER),)
+
# These tests require both the encoder and decoder to be built.
ifeq ($(CONFIG_VP8_ENCODER)$(CONFIG_VP8_DECODER),yesyes)
-LIBVPX_TEST_SRCS-yes += boolcoder_test.cc
+LIBVPX_TEST_SRCS-yes += vp8_boolcoder_test.cc
endif
-LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += fdct4x4_test.cc
-LIBVPX_TEST_SRCS-yes += idctllm_test.cc
+LIBVPX_TEST_SRCS-yes += idct_test.cc
LIBVPX_TEST_SRCS-yes += intrapred_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_POSTPROC) += pp_filter_test.cc
-LIBVPX_TEST_SRCS-yes += sad_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_ENCODERS) += sad_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += set_roi.cc
LIBVPX_TEST_SRCS-yes += sixtap_predict_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += subtract_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += variance_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_VP8_DECODER) += vp8_decrypt_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += vp8_fdct4x4_test.cc
+
+endif # VP8
+
+## VP9
+ifneq ($(CONFIG_VP9_ENCODER)$(CONFIG_VP9_DECODER),)
+
+# These tests require both the encoder and decoder to be built.
+ifeq ($(CONFIG_VP9_ENCODER)$(CONFIG_VP9_DECODER),yesyes)
+LIBVPX_TEST_SRCS-yes += vp9_boolcoder_test.cc
+
+# IDCT test currently depends on FDCT function
+LIBVPX_TEST_SRCS-yes += idct8x8_test.cc
+LIBVPX_TEST_SRCS-yes += superframe_test.cc
+LIBVPX_TEST_SRCS-yes += tile_independence_test.cc
+endif
+
+LIBVPX_TEST_SRCS-$(CONFIG_VP9) += convolve_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += fdct4x4_test.cc
+
+LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += fdct8x8_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += dct16x16_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += variance_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += dct32x32_test.cc
+
+endif # VP9
+
endif
@@ -53,7 +102,9 @@ endif
##
## TEST DATA
##
-LIBVPX_TEST_DATA-$(CONFIG_VP8_ENCODER) += hantro_collage_w352h288.yuv
+LIBVPX_TEST_DATA-$(CONFIG_ENCODERS) += hantro_collage_w352h288.yuv
+LIBVPX_TEST_DATA-$(CONFIG_ENCODERS) += hantro_odd.yuv
+
LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-001.ivf
LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-002.ivf
LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-003.ivf
@@ -176,3 +227,223 @@ LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1438.ivf.md5
LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1439.ivf.md5
LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1440.ivf.md5
LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1443.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-akiyo-100.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-akiyo-100.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-akiyo-200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-akiyo-200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-akiyo-300.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-akiyo-300.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-akiyo-50.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-akiyo-50.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-bowing-150.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-bowing-150.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-bowing-25.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-bowing-25.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-bowing-400.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-bowing-400.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-bus-100.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-bus-100.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-bus-2000.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-bus-2000.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-bus-300.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-bus-300.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-bus-4400.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-bus-4400.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-bus-800.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-bus-800.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-cheer-1600.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-cheer-1600.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-cheer-2800.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-cheer-2800.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-cheer-400.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-cheer-400.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-cheer-600.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-cheer-600.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-city-1200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-city-1200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-city-2000.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-city-2000.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-city-300.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-city-300.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-city-600.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-city-600.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-coastguard-1200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-coastguard-1200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-coastguard-200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-coastguard-200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-coastguard-3600.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-coastguard-3600.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-coastguard-5200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-coastguard-5200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-container-1000.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-container-1000.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-container-200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-container-200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-container-50.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-container-50.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-deadline-1000.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-deadline-1000.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-deadline-200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-deadline-200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-deadline-50.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-deadline-50.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-flower-100.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-flower-100.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-flower-2000.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-flower-2000.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-flower-300.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-flower-300.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-flower-4400.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-flower-4400.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-flower-800.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-flower-800.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-football-1600.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-football-1600.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-football-2800.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-football-2800.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-football-400.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-football-400.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-football-600.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-football-600.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-foreman-1200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-foreman-1200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-foreman-2000.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-foreman-2000.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-foreman-300.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-foreman-300.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-foreman-600.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-foreman-600.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-hallmonitor-1200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-hallmonitor-1200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-hallmonitor-2000.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-hallmonitor-2000.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-hallmonitor-300.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-hallmonitor-300.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-hallmonitor-600.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-hallmonitor-600.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-harbour-1200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-harbour-1200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-harbour-200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-harbour-200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-harbour-3600.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-harbour-3600.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-harbour-5200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-harbour-5200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-highway-100.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-highway-100.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-highway-1600.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-highway-1600.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-highway-2800.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-highway-2800.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-highway-50.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-highway-50.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-husky-100.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-husky-100.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-husky-2000.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-husky-2000.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-husky-300.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-husky-300.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-husky-4400.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-husky-4400.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-husky-800.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-husky-800.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-ice-150.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-ice-150.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-ice-400.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-ice-400.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-ice-800.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-ice-800.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-mobile-1600.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-mobile-1600.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-mobile-2800.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-mobile-2800.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-mobile-400.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-mobile-400.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-mobile-600.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-mobile-600.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-motherdaughter-100.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-motherdaughter-100.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-motherdaughter-300.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-motherdaughter-300.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-motherdaughter-600.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-motherdaughter-600.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-news-100.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-news-100.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-news-300.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-news-300.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-news-600.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-news-600.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-pamphlet-150.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-pamphlet-150.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-pamphlet-25.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-pamphlet-25.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-pamphlet-400.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-pamphlet-400.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-paris-1000.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-paris-1000.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-paris-200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-paris-200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-paris-50.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-paris-50.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-signirene-1000.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-signirene-1000.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-signirene-200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-signirene-200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-signirene-50.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-signirene-50.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-silent-1000.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-silent-1000.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-silent-200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-silent-200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-silent-50.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-silent-50.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-soccer-100.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-soccer-100.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-soccer-2000.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-soccer-2000.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-soccer-300.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-soccer-300.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-soccer-4400.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-soccer-4400.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-soccer-800.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-soccer-800.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-stefan-1600.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-stefan-1600.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-stefan-2800.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-stefan-2800.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-stefan-400.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-stefan-400.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-stefan-600.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-stefan-600.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-students-100.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-students-100.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-students-300.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-students-300.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-students-600.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-students-600.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-tempete-1200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-tempete-1200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-tempete-200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-tempete-200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-tempete-3600.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-tempete-3600.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-tempete-5200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-tempete-5200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-tennis-100.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-tennis-100.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-tennis-2000.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-tennis-2000.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-tennis-300.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-tennis-300.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-tennis-4400.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-tennis-4400.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-tennis-800.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-tennis-800.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-waterfall-150.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-waterfall-150.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-waterfall-200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-waterfall-200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-waterfall-400.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-waterfall-400.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-waterfall-800.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-00-waterfall-800.webm.md5
diff --git a/libvpx/test/test_libvpx.cc b/libvpx/test/test_libvpx.cc
index cfd5d28..5610c26 100644
--- a/libvpx/test/test_libvpx.cc
+++ b/libvpx/test/test_libvpx.cc
@@ -9,11 +9,17 @@
*/
#include <string>
#include "vpx_config.h"
-#if ARCH_X86 || ARCH_X86_64
extern "C" {
+#if ARCH_X86 || ARCH_X86_64
#include "vpx_ports/x86.h"
-}
#endif
+#if CONFIG_VP8
+extern void vp8_rtcd();
+#endif
+#if CONFIG_VP9
+extern void vp9_rtcd();
+#endif
+}
#include "third_party/googletest/src/include/gtest/gtest.h"
static void append_gtest_filter(const char *str) {
@@ -27,19 +33,29 @@ int main(int argc, char **argv) {
#if ARCH_X86 || ARCH_X86_64
const int simd_caps = x86_simd_caps();
- if(!(simd_caps & HAS_MMX))
+ if (!(simd_caps & HAS_MMX))
append_gtest_filter(":-MMX/*");
- if(!(simd_caps & HAS_SSE))
+ if (!(simd_caps & HAS_SSE))
append_gtest_filter(":-SSE/*");
- if(!(simd_caps & HAS_SSE2))
+ if (!(simd_caps & HAS_SSE2))
append_gtest_filter(":-SSE2/*");
- if(!(simd_caps & HAS_SSE3))
+ if (!(simd_caps & HAS_SSE3))
append_gtest_filter(":-SSE3/*");
- if(!(simd_caps & HAS_SSSE3))
+ if (!(simd_caps & HAS_SSSE3))
append_gtest_filter(":-SSSE3/*");
- if(!(simd_caps & HAS_SSE4_1))
+ if (!(simd_caps & HAS_SSE4_1))
append_gtest_filter(":-SSE4_1/*");
#endif
+#if !CONFIG_SHARED
+ /* Shared library builds don't support whitebox tests that exercise internal symbols. */
+#if CONFIG_VP8
+ vp8_rtcd();
+#endif
+#if CONFIG_VP9
+ vp9_rtcd();
+#endif
+#endif
+
return RUN_ALL_TESTS();
}
diff --git a/libvpx/test/test_vector_test.cc b/libvpx/test/test_vector_test.cc
index 938457b..d7bd184 100644
--- a/libvpx/test/test_vector_test.cc
+++ b/libvpx/test/test_vector_test.cc
@@ -12,20 +12,19 @@
#include <cstdlib>
#include <string>
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
#include "test/decode_test_driver.h"
#include "test/ivf_video_source.h"
+#include "test/webm_video_source.h"
+#include "test/util.h"
+#include "test/md5_helper.h"
extern "C" {
-#include "./md5_utils.h"
#include "vpx_mem/vpx_mem.h"
}
-#if defined(_MSC_VER)
-#define snprintf sprintf_s
-#endif
-
namespace {
-// There are 61 test vectors in total.
-const char *kTestVectors[] = {
+#if CONFIG_VP8_DECODER
+const char *kVP8TestVectors[] = {
"vp80-00-comprehensive-001.ivf",
"vp80-00-comprehensive-002.ivf", "vp80-00-comprehensive-003.ivf",
"vp80-00-comprehensive-004.ivf", "vp80-00-comprehensive-005.ivf",
@@ -58,11 +57,71 @@ const char *kTestVectors[] = {
"vp80-05-sharpness-1438.ivf", "vp80-05-sharpness-1439.ivf",
"vp80-05-sharpness-1440.ivf", "vp80-05-sharpness-1443.ivf"
};
+#endif
+#if CONFIG_VP9_DECODER
+const char *kVP9TestVectors[] = {
+ "vp90-00-akiyo-200.webm", "vp90-00-akiyo-300.webm",
+ "vp90-00-akiyo-50.webm", "vp90-00-bowing-150.webm",
+ "vp90-00-bowing-25.webm", "vp90-00-bowing-400.webm",
+ "vp90-00-bus-100.webm", "vp90-00-bus-2000.webm",
+ "vp90-00-bus-300.webm", "vp90-00-bus-4400.webm",
+ "vp90-00-bus-800.webm", "vp90-00-cheer-1600.webm",
+ "vp90-00-cheer-2800.webm", "vp90-00-cheer-400.webm",
+ "vp90-00-cheer-600.webm", "vp90-00-city-1200.webm",
+ "vp90-00-city-2000.webm", "vp90-00-city-300.webm",
+ "vp90-00-city-600.webm", "vp90-00-coastguard-1200.webm",
+ "vp90-00-coastguard-200.webm", "vp90-00-coastguard-3600.webm",
+ "vp90-00-coastguard-5200.webm", "vp90-00-container-1000.webm",
+ "vp90-00-container-200.webm", "vp90-00-container-50.webm",
+ "vp90-00-deadline-1000.webm", "vp90-00-deadline-200.webm",
+ "vp90-00-deadline-50.webm", "vp90-00-flower-100.webm",
+ "vp90-00-flower-2000.webm", "vp90-00-flower-300.webm",
+ "vp90-00-flower-4400.webm", "vp90-00-flower-800.webm",
+ "vp90-00-football-1600.webm", "vp90-00-football-2800.webm",
+ "vp90-00-football-400.webm", "vp90-00-football-600.webm",
+ "vp90-00-foreman-1200.webm", "vp90-00-foreman-2000.webm",
+ "vp90-00-foreman-300.webm", "vp90-00-foreman-600.webm",
+ "vp90-00-hallmonitor-1200.webm", "vp90-00-hallmonitor-2000.webm",
+ "vp90-00-hallmonitor-300.webm", "vp90-00-hallmonitor-600.webm",
+ "vp90-00-harbour-1200.webm", "vp90-00-harbour-200.webm",
+ "vp90-00-harbour-3600.webm", "vp90-00-harbour-5200.webm",
+ "vp90-00-highway-100.webm", "vp90-00-highway-1600.webm",
+ "vp90-00-highway-2800.webm", "vp90-00-highway-50.webm",
+ "vp90-00-husky-100.webm", "vp90-00-husky-2000.webm",
+ "vp90-00-husky-300.webm", "vp90-00-husky-4400.webm",
+ "vp90-00-husky-800.webm", "vp90-00-ice-150.webm",
+ "vp90-00-ice-400.webm", "vp90-00-ice-800.webm",
+ "vp90-00-mobile-1600.webm", "vp90-00-mobile-2800.webm",
+ "vp90-00-mobile-400.webm", "vp90-00-mobile-600.webm",
+ "vp90-00-motherdaughter-100.webm", "vp90-00-motherdaughter-300.webm",
+ "vp90-00-motherdaughter-600.webm", "vp90-00-news-100.webm",
+ "vp90-00-news-300.webm", "vp90-00-news-600.webm",
+ "vp90-00-pamphlet-150.webm", "vp90-00-pamphlet-25.webm",
+ "vp90-00-pamphlet-400.webm", "vp90-00-paris-1000.webm",
+ "vp90-00-paris-200.webm", "vp90-00-paris-50.webm",
+ "vp90-00-signirene-1000.webm", "vp90-00-signirene-200.webm",
+ "vp90-00-signirene-50.webm", "vp90-00-silent-1000.webm",
+ "vp90-00-silent-200.webm", "vp90-00-silent-50.webm",
+ "vp90-00-soccer-100.webm", "vp90-00-soccer-2000.webm",
+ "vp90-00-soccer-300.webm", "vp90-00-soccer-4400.webm",
+ "vp90-00-soccer-800.webm", "vp90-00-stefan-1600.webm",
+ "vp90-00-stefan-2800.webm", "vp90-00-stefan-400.webm",
+ "vp90-00-stefan-600.webm", "vp90-00-students-100.webm",
+ "vp90-00-students-300.webm", "vp90-00-students-600.webm",
+ "vp90-00-tempete-1200.webm", "vp90-00-tempete-200.webm",
+ "vp90-00-tempete-3600.webm", "vp90-00-tempete-5200.webm",
+ "vp90-00-tennis-100.webm", "vp90-00-tennis-2000.webm",
+ "vp90-00-tennis-300.webm", "vp90-00-tennis-4400.webm",
+ "vp90-00-tennis-800.webm", "vp90-00-waterfall-150.webm",
+ "vp90-00-waterfall-200.webm", "vp90-00-waterfall-400.webm",
+ "vp90-00-waterfall-800.webm",
+};
+#endif
-class TestVectorTest : public libvpx_test::DecoderTest,
- public ::testing::TestWithParam<const char*> {
+class TestVectorTest : public ::libvpx_test::DecoderTest,
+ public ::libvpx_test::CodecTestWithParam<const char*> {
protected:
- TestVectorTest() : md5_file_(NULL) {}
+ TestVectorTest() : DecoderTest(GET_PARAM(0)), md5_file_(NULL) {}
virtual ~TestVectorTest() {
if (md5_file_)
@@ -85,30 +144,9 @@ class TestVectorTest : public libvpx_test::DecoderTest,
ASSERT_NE(res, EOF) << "Read md5 data failed";
expected_md5[32] = '\0';
- MD5Context md5;
- MD5Init(&md5);
-
- // Compute and update md5 for each raw in decompressed data.
- for (int plane = 0; plane < 3; ++plane) {
- uint8_t *buf = img.planes[plane];
-
- for (unsigned int y = 0; y < (plane ? (img.d_h + 1) >> 1 : img.d_h);
- ++y) {
- MD5Update(&md5, buf, (plane ? (img.d_w + 1) >> 1 : img.d_w));
- buf += img.stride[plane];
- }
- }
-
- uint8_t md5_sum[16];
- MD5Final(md5_sum, &md5);
-
- char actual_md5[33];
- // Convert to get the actual md5.
- for (int i = 0; i < 16; i++) {
- snprintf(&actual_md5[i * 2], sizeof(actual_md5) - i * 2, "%02x",
- md5_sum[i]);
- }
- actual_md5[32] = '\0';
+ ::libvpx_test::MD5 md5_res;
+ md5_res.Add(&img);
+ const char *actual_md5 = md5_res.Get();
// Check md5 match.
ASSERT_STREQ(expected_md5, actual_md5)
@@ -124,21 +162,29 @@ class TestVectorTest : public libvpx_test::DecoderTest,
// checksums match the correct md5 data, then the test is passed. Otherwise,
// the test failed.
TEST_P(TestVectorTest, MD5Match) {
- const std::string filename = GetParam();
- // Open compressed video file.
- libvpx_test::IVFVideoSource video(filename);
+ const std::string filename = GET_PARAM(1);
+ libvpx_test::CompressedVideoSource *video = NULL;
- video.Init();
+ // Open compressed video file.
+ if (filename.substr(filename.length() - 3, 3) == "ivf") {
+ video = new libvpx_test::IVFVideoSource(filename);
+ } else if (filename.substr(filename.length() - 4, 4) == "webm") {
+ video = new libvpx_test::WebMVideoSource(filename);
+ }
+ video->Init();
// Construct md5 file name.
const std::string md5_filename = filename + ".md5";
OpenMD5File(md5_filename);
// Decode frame, and check the md5 matching.
- ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ ASSERT_NO_FATAL_FAILURE(RunLoop(video));
+ delete video;
}
-INSTANTIATE_TEST_CASE_P(TestVectorSequence, TestVectorTest,
- ::testing::ValuesIn(kTestVectors));
+VP8_INSTANTIATE_TEST_CASE(TestVectorTest,
+ ::testing::ValuesIn(kVP8TestVectors));
+VP9_INSTANTIATE_TEST_CASE(TestVectorTest,
+ ::testing::ValuesIn(kVP9TestVectors));
} // namespace
diff --git a/libvpx/test/tile_independence_test.cc b/libvpx/test/tile_independence_test.cc
new file mode 100644
index 0000000..9633ed7
--- /dev/null
+++ b/libvpx/test/tile_independence_test.cc
@@ -0,0 +1,108 @@
+/*
+ Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+
+ Use of this source code is governed by a BSD-style license
+ that can be found in the LICENSE file in the root of the source
+ tree. An additional intellectual property rights grant can be found
+ in the file PATENTS. All contributing project authors may
+ be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <cstdio>
+#include <cstdlib>
+#include <string>
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
+#include "test/encode_test_driver.h"
+#include "test/i420_video_source.h"
+#include "test/util.h"
+#include "test/md5_helper.h"
+extern "C" {
+#include "vpx_mem/vpx_mem.h"
+}
+
+namespace {
+class TileIndependenceTest : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWithParam<int> {
+ protected:
+ TileIndependenceTest() : EncoderTest(GET_PARAM(0)), n_tiles_(GET_PARAM(1)),
+ md5_fw_order_(), md5_inv_order_() {
+ init_flags_ = VPX_CODEC_USE_PSNR;
+ vpx_codec_dec_cfg_t cfg;
+ cfg.w = 704;
+ cfg.h = 144;
+ cfg.threads = 1;
+ fw_dec_ = codec_->CreateDecoder(cfg, 0);
+ inv_dec_ = codec_->CreateDecoder(cfg, 0);
+ inv_dec_->Control(VP9_INVERT_TILE_DECODE_ORDER, 1);
+ }
+
+ virtual ~TileIndependenceTest() {
+ delete fw_dec_;
+ delete inv_dec_;
+ }
+
+ virtual void SetUp() {
+ InitializeConfig();
+ SetMode(libvpx_test::kTwoPassGood);
+ }
+
+ virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video,
+ libvpx_test::Encoder *encoder) {
+ if (video->frame() == 1) {
+ encoder->Control(VP9E_SET_TILE_COLUMNS, n_tiles_);
+ }
+ }
+
+ void UpdateMD5(::libvpx_test::Decoder *dec, const vpx_codec_cx_pkt_t *pkt,
+ ::libvpx_test::MD5 *md5) {
+ const vpx_codec_err_t res =
+ dec->DecodeFrame(reinterpret_cast<uint8_t*>(pkt->data.frame.buf),
+ pkt->data.frame.sz);
+ if (res != VPX_CODEC_OK) {
+ abort_ = true;
+ ASSERT_EQ(VPX_CODEC_OK, res);
+ }
+ const vpx_image_t *img = dec->GetDxData().Next();
+ md5->Add(img);
+ }
+
+ virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
+ UpdateMD5(fw_dec_, pkt, &md5_fw_order_);
+ UpdateMD5(inv_dec_, pkt, &md5_inv_order_);
+ }
+
+ private:
+ int n_tiles_;
+ protected:
+ ::libvpx_test::MD5 md5_fw_order_, md5_inv_order_;
+ ::libvpx_test::Decoder *fw_dec_, *inv_dec_;
+};
+
+// run an encode with 2 or 4 tiles, and do the decode both in normal and
+// inverted tile ordering. Ensure that the MD5 of the output in both cases
+// is identical. If so, tiles are considered independent and the test passes.
+TEST_P(TileIndependenceTest, MD5Match) {
+ const vpx_rational timebase = { 33333333, 1000000000 };
+ cfg_.g_timebase = timebase;
+ cfg_.rc_target_bitrate = 500;
+ cfg_.g_lag_in_frames = 25;
+ cfg_.rc_end_usage = VPX_VBR;
+
+ libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 704, 144,
+ timebase.den, timebase.num, 0, 30);
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+
+ const char *md5_fw_str = md5_fw_order_.Get();
+ const char *md5_inv_str = md5_inv_order_.Get();
+
+ // could use ASSERT_EQ(!memcmp(.., .., 16) here, but this gives nicer
+ // output if it fails. Not sure if it's helpful since it's really just
+ // a MD5...
+ ASSERT_STREQ(md5_fw_str, md5_inv_str);
+}
+
+VP9_INSTANTIATE_TEST_CASE(TileIndependenceTest,
+ ::testing::Range(0, 2, 1));
+
+} // namespace
diff --git a/libvpx/test/util.h b/libvpx/test/util.h
index 06a70cc..533a1db 100644
--- a/libvpx/test/util.h
+++ b/libvpx/test/util.h
@@ -11,8 +11,38 @@
#ifndef TEST_UTIL_H_
#define TEST_UTIL_H_
+#include <stdio.h>
+#include <math.h>
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "vpx/vpx_image.h"
+
// Macros
#define PARAMS(...) ::testing::TestWithParam< std::tr1::tuple< __VA_ARGS__ > >
#define GET_PARAM(k) std::tr1::get< k >(GetParam())
+static double compute_psnr(const vpx_image_t *img1,
+ const vpx_image_t *img2) {
+ assert((img1->fmt == img2->fmt) &&
+ (img1->d_w == img2->d_w) &&
+ (img1->d_h == img2->d_h));
+
+ const unsigned int width_y = img1->d_w;
+ const unsigned int height_y = img1->d_h;
+ unsigned int i, j;
+
+ int64_t sqrerr = 0;
+ for (i = 0; i < height_y; ++i)
+ for (j = 0; j < width_y; ++j) {
+ int64_t d = img1->planes[VPX_PLANE_Y][i * img1->stride[VPX_PLANE_Y] + j] -
+ img2->planes[VPX_PLANE_Y][i * img2->stride[VPX_PLANE_Y] + j];
+ sqrerr += d * d;
+ }
+ double mse = sqrerr / (width_y * height_y);
+ double psnr = 100.0;
+ if (mse > 0.0) {
+ psnr = 10 * log10(255.0 * 255.0 / mse);
+ }
+ return psnr;
+}
+
#endif // TEST_UTIL_H_
diff --git a/libvpx/test/variance_test.cc b/libvpx/test/variance_test.cc
new file mode 100644
index 0000000..dfa1a07
--- /dev/null
+++ b/libvpx/test/variance_test.cc
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <stdlib.h>
+#include <new>
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "test/clear_system_state.h"
+
+#include "vpx/vpx_integer.h"
+#include "vpx_config.h"
+extern "C" {
+#if CONFIG_VP8_ENCODER
+# include "vp8/common/variance.h"
+# include "vp8_rtcd.h"
+#endif
+#if CONFIG_VP9_ENCODER
+# include "vp9/encoder/vp9_variance.h"
+# include "vp9_rtcd.h"
+#endif
+}
+
+namespace {
+
+using ::std::tr1::get;
+using ::std::tr1::make_tuple;
+using ::std::tr1::tuple;
+
+template<typename VarianceFunctionType>
+class VarianceTest :
+ public ::testing::TestWithParam<tuple<int, int, VarianceFunctionType> > {
+ public:
+ virtual void SetUp() {
+ const tuple<int, int, VarianceFunctionType>& params = this->GetParam();
+ width_ = get<0>(params);
+ height_ = get<1>(params);
+ variance_ = get<2>(params);
+
+ block_size_ = width_ * height_;
+ src_ = new uint8_t[block_size_];
+ ref_ = new uint8_t[block_size_];
+ ASSERT_TRUE(src_ != NULL);
+ ASSERT_TRUE(ref_ != NULL);
+ }
+
+ virtual void TearDown() {
+ delete[] src_;
+ delete[] ref_;
+ libvpx_test::ClearSystemState();
+ }
+
+ protected:
+ void ZeroTest();
+ void OneQuarterTest();
+
+ uint8_t* src_;
+ uint8_t* ref_;
+ int width_;
+ int height_;
+ int block_size_;
+ VarianceFunctionType variance_;
+
+};
+
+template<typename VarianceFunctionType>
+void VarianceTest<VarianceFunctionType>::ZeroTest() {
+ for (int i = 0; i <= 255; ++i) {
+ memset(src_, i, block_size_);
+ for (int j = 0; j <= 255; ++j) {
+ memset(ref_, j, block_size_);
+ unsigned int sse;
+ const unsigned int var = variance_(src_, width_, ref_, width_, &sse);
+ EXPECT_EQ(0u, var) << "src values: " << i << "ref values: " << j;
+ }
+ }
+}
+
+template<typename VarianceFunctionType>
+void VarianceTest<VarianceFunctionType>::OneQuarterTest() {
+ memset(src_, 255, block_size_);
+ const int half = block_size_ / 2;
+ memset(ref_, 255, half);
+ memset(ref_ + half, 0, half);
+ unsigned int sse;
+ const unsigned int var = variance_(src_, width_, ref_, width_, &sse);
+ const unsigned int expected = block_size_ * 255 * 255 / 4;
+ EXPECT_EQ(expected, var);
+}
+
+// -----------------------------------------------------------------------------
+// VP8 test cases.
+
+namespace vp8 {
+
+#if CONFIG_VP8_ENCODER
+typedef VarianceTest<vp8_variance_fn_t> VP8VarianceTest;
+
+TEST_P(VP8VarianceTest, Zero) { ZeroTest(); }
+TEST_P(VP8VarianceTest, OneQuarter) { OneQuarterTest(); }
+
+const vp8_variance_fn_t variance4x4_c = vp8_variance4x4_c;
+const vp8_variance_fn_t variance8x8_c = vp8_variance8x8_c;
+const vp8_variance_fn_t variance8x16_c = vp8_variance8x16_c;
+const vp8_variance_fn_t variance16x8_c = vp8_variance16x8_c;
+const vp8_variance_fn_t variance16x16_c = vp8_variance16x16_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VP8VarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance4x4_c),
+ make_tuple(8, 8, variance8x8_c),
+ make_tuple(8, 16, variance8x16_c),
+ make_tuple(16, 8, variance16x8_c),
+ make_tuple(16, 16, variance16x16_c)));
+
+#if HAVE_MMX
+const vp8_variance_fn_t variance4x4_mmx = vp8_variance4x4_mmx;
+const vp8_variance_fn_t variance8x8_mmx = vp8_variance8x8_mmx;
+const vp8_variance_fn_t variance8x16_mmx = vp8_variance8x16_mmx;
+const vp8_variance_fn_t variance16x8_mmx = vp8_variance16x8_mmx;
+const vp8_variance_fn_t variance16x16_mmx = vp8_variance16x16_mmx;
+INSTANTIATE_TEST_CASE_P(
+ MMX, VP8VarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance4x4_mmx),
+ make_tuple(8, 8, variance8x8_mmx),
+ make_tuple(8, 16, variance8x16_mmx),
+ make_tuple(16, 8, variance16x8_mmx),
+ make_tuple(16, 16, variance16x16_mmx)));
+#endif
+
+#if HAVE_SSE2
+const vp8_variance_fn_t variance4x4_wmt = vp8_variance4x4_wmt;
+const vp8_variance_fn_t variance8x8_wmt = vp8_variance8x8_wmt;
+const vp8_variance_fn_t variance8x16_wmt = vp8_variance8x16_wmt;
+const vp8_variance_fn_t variance16x8_wmt = vp8_variance16x8_wmt;
+const vp8_variance_fn_t variance16x16_wmt = vp8_variance16x16_wmt;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VP8VarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance4x4_wmt),
+ make_tuple(8, 8, variance8x8_wmt),
+ make_tuple(8, 16, variance8x16_wmt),
+ make_tuple(16, 8, variance16x8_wmt),
+ make_tuple(16, 16, variance16x16_wmt)));
+#endif
+#endif // CONFIG_VP8_ENCODER
+
+} // namespace vp8
+
+// -----------------------------------------------------------------------------
+// VP9 test cases.
+
+namespace vp9 {
+
+#if CONFIG_VP9_ENCODER
+typedef VarianceTest<vp9_variance_fn_t> VP9VarianceTest;
+
+TEST_P(VP9VarianceTest, Zero) { ZeroTest(); }
+TEST_P(VP9VarianceTest, OneQuarter) { OneQuarterTest(); }
+
+const vp9_variance_fn_t variance4x4_c = vp9_variance4x4_c;
+const vp9_variance_fn_t variance8x8_c = vp9_variance8x8_c;
+const vp9_variance_fn_t variance8x16_c = vp9_variance8x16_c;
+const vp9_variance_fn_t variance16x8_c = vp9_variance16x8_c;
+const vp9_variance_fn_t variance16x16_c = vp9_variance16x16_c;
+INSTANTIATE_TEST_CASE_P(
+ C, VP9VarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance4x4_c),
+ make_tuple(8, 8, variance8x8_c),
+ make_tuple(8, 16, variance8x16_c),
+ make_tuple(16, 8, variance16x8_c),
+ make_tuple(16, 16, variance16x16_c)));
+
+#if HAVE_MMX
+const vp9_variance_fn_t variance4x4_mmx = vp9_variance4x4_mmx;
+const vp9_variance_fn_t variance8x8_mmx = vp9_variance8x8_mmx;
+const vp9_variance_fn_t variance8x16_mmx = vp9_variance8x16_mmx;
+const vp9_variance_fn_t variance16x8_mmx = vp9_variance16x8_mmx;
+const vp9_variance_fn_t variance16x16_mmx = vp9_variance16x16_mmx;
+INSTANTIATE_TEST_CASE_P(
+ MMX, VP9VarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance4x4_mmx),
+ make_tuple(8, 8, variance8x8_mmx),
+ make_tuple(8, 16, variance8x16_mmx),
+ make_tuple(16, 8, variance16x8_mmx),
+ make_tuple(16, 16, variance16x16_mmx)));
+#endif
+
+#if HAVE_SSE2
+const vp9_variance_fn_t variance4x4_wmt = vp9_variance4x4_sse2;
+const vp9_variance_fn_t variance8x8_wmt = vp9_variance8x8_sse2;
+const vp9_variance_fn_t variance8x16_wmt = vp9_variance8x16_sse2;
+const vp9_variance_fn_t variance16x8_wmt = vp9_variance16x8_sse2;
+const vp9_variance_fn_t variance16x16_wmt = vp9_variance16x16_sse2;
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VP9VarianceTest,
+ ::testing::Values(make_tuple(4, 4, variance4x4_wmt),
+ make_tuple(8, 8, variance8x8_wmt),
+ make_tuple(8, 16, variance8x16_wmt),
+ make_tuple(16, 8, variance16x8_wmt),
+ make_tuple(16, 16, variance16x16_wmt)));
+#endif
+#endif // CONFIG_VP9_ENCODER
+
+} // namespace vp9
+
+} // namespace
diff --git a/libvpx/test/video_source.h b/libvpx/test/video_source.h
index 9772657..26d5328 100644
--- a/libvpx/test/video_source.h
+++ b/libvpx/test/video_source.h
@@ -103,7 +103,7 @@ class DummyVideoSource : public VideoSource {
if (width != width_ || height != height_) {
vpx_img_free(img_);
raw_sz_ = ((width + 31)&~31) * height * 3 / 2;
- img_ = vpx_img_alloc(NULL, VPX_IMG_FMT_VPXI420, width, height, 32);
+ img_ = vpx_img_alloc(NULL, VPX_IMG_FMT_I420, width, height, 32);
width_ = width;
height_ = height;
}
diff --git a/libvpx/test/boolcoder_test.cc b/libvpx/test/vp8_boolcoder_test.cc
index 4e21be8..c3a8d12 100644
--- a/libvpx/test/boolcoder_test.cc
+++ b/libvpx/test/vp8_boolcoder_test.cc
@@ -26,6 +26,30 @@ extern "C" {
namespace {
const int num_tests = 10;
+
+// In a real use the 'decrypt_state' parameter will be a pointer to a struct
+// with whatever internal state the decryptor uses. For testing we'll just
+// xor with a constant key, and decrypt_state will point to the start of
+// the original buffer.
+const uint8_t secret_key[16] = {
+ 0x01, 0x12, 0x23, 0x34, 0x45, 0x56, 0x67, 0x78,
+ 0x89, 0x9a, 0xab, 0xbc, 0xcd, 0xde, 0xef, 0xf0
+};
+
+void encrypt_buffer(uint8_t *buffer, int size) {
+ for (int i = 0; i < size; ++i) {
+ buffer[i] ^= secret_key[i & 15];
+ }
+}
+
+void test_decrypt_cb(void *decrypt_state, const uint8_t *input,
+ uint8_t *output, int count) {
+ int offset = input - (uint8_t *)decrypt_state;
+ for (int i = 0; i < count; i++) {
+ output[i] = input[i] ^ secret_key[(offset + i) & 15];
+ }
+}
+
} // namespace
using libvpx_test::ACMRandom;
@@ -71,7 +95,13 @@ TEST(VP8, TestBitIO) {
vp8_stop_encode(&bw);
BOOL_DECODER br;
- vp8dx_start_decode(&br, bw_buffer, buffer_size);
+#if CONFIG_DECRYPT
+ encrypt_buffer(bw_buffer, buffer_size);
+ vp8dx_start_decode(&br, bw_buffer, buffer_size,
+ test_decrypt_cb, (void *)bw_buffer);
+#else
+ vp8dx_start_decode(&br, bw_buffer, buffer_size, NULL, NULL);
+#endif
bit_rnd.Reset(random_seed);
for (int i = 0; i < bits_to_test; ++i) {
if (bit_method == 2) {
diff --git a/libvpx/test/vp8_decrypt_test.cc b/libvpx/test/vp8_decrypt_test.cc
new file mode 100644
index 0000000..d850f00
--- /dev/null
+++ b/libvpx/test/vp8_decrypt_test.cc
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <cstdio>
+#include <cstdlib>
+#include <string>
+#include <vector>
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
+#include "test/ivf_video_source.h"
+
+namespace {
+// In a real use the 'decrypt_state' parameter will be a pointer to a struct
+// with whatever internal state the decryptor uses. For testing we'll just
+// xor with a constant key, and decrypt_state will point to the start of
+// the original buffer.
+const uint8_t test_key[16] = {
+ 0x01, 0x12, 0x23, 0x34, 0x45, 0x56, 0x67, 0x78,
+ 0x89, 0x9a, 0xab, 0xbc, 0xcd, 0xde, 0xef, 0xf0
+};
+
+void encrypt_buffer(const uint8_t *src, uint8_t *dst, int size, int offset = 0) {
+ for (int i = 0; i < size; ++i) {
+ dst[i] = src[i] ^ test_key[(offset + i) & 15];
+ }
+}
+
+void test_decrypt_cb(void *decrypt_state, const uint8_t *input,
+ uint8_t *output, int count) {
+ encrypt_buffer(input, output, count, input - (uint8_t *)decrypt_state);
+}
+
+} // namespace
+
+namespace libvpx_test {
+
+TEST(TestDecrypt, DecryptWorks) {
+ libvpx_test::IVFVideoSource video("vp80-00-comprehensive-001.ivf");
+ video.Init();
+
+ vpx_codec_dec_cfg_t dec_cfg = {0};
+ VP8Decoder decoder(dec_cfg, 0);
+
+ video.Begin();
+
+ // no decryption
+ vpx_codec_err_t res = decoder.DecodeFrame(video.cxdata(), video.frame_size());
+ ASSERT_EQ(VPX_CODEC_OK, res) << decoder.DecodeError();
+
+ // decrypt frame
+ video.Next();
+
+#if CONFIG_DECRYPT
+ std::vector<uint8_t> encrypted(video.frame_size());
+ encrypt_buffer(video.cxdata(), &encrypted[0], video.frame_size());
+ vp8_decrypt_init di = { test_decrypt_cb, &encrypted[0] };
+ decoder.Control(VP8D_SET_DECRYPTOR, &di);
+#endif // CONFIG_DECRYPT
+
+ res = decoder.DecodeFrame(video.cxdata(), video.frame_size());
+ ASSERT_EQ(VPX_CODEC_OK, res) << decoder.DecodeError();
+}
+
+} // namespace libvpx_test
diff --git a/libvpx/test/vp8_fdct4x4_test.cc b/libvpx/test/vp8_fdct4x4_test.cc
new file mode 100644
index 0000000..3c60011
--- /dev/null
+++ b/libvpx/test/vp8_fdct4x4_test.cc
@@ -0,0 +1,169 @@
+/*
+* Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+*
+* Use of this source code is governed by a BSD-style license
+* that can be found in the LICENSE file in the root of the source
+* tree. An additional intellectual property rights grant can be found
+* in the file PATENTS. All contributing project authors may
+* be found in the AUTHORS file in the root of the source tree.
+*/
+
+
+#include <math.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+
+
+extern "C" {
+#include "vp8_rtcd.h"
+}
+
+#include "test/acm_random.h"
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "vpx/vpx_integer.h"
+
+
+namespace {
+
+const int cospi8sqrt2minus1 = 20091;
+const int sinpi8sqrt2 = 35468;
+
+void reference_idct4x4(const int16_t *input, int16_t *output) {
+ const int16_t *ip = input;
+ int16_t *op = output;
+
+ for (int i = 0; i < 4; ++i) {
+ const int a1 = ip[0] + ip[8];
+ const int b1 = ip[0] - ip[8];
+ const int temp1 = (ip[4] * sinpi8sqrt2) >> 16;
+ const int temp2 = ip[12] + ((ip[12] * cospi8sqrt2minus1) >> 16);
+ const int c1 = temp1 - temp2;
+ const int temp3 = ip[4] + ((ip[4] * cospi8sqrt2minus1) >> 16);
+ const int temp4 = (ip[12] * sinpi8sqrt2) >> 16;
+ const int d1 = temp3 + temp4;
+ op[0] = a1 + d1;
+ op[12] = a1 - d1;
+ op[4] = b1 + c1;
+ op[8] = b1 - c1;
+ ++ip;
+ ++op;
+ }
+ ip = output;
+ op = output;
+ for (int i = 0; i < 4; ++i) {
+ const int a1 = ip[0] + ip[2];
+ const int b1 = ip[0] - ip[2];
+ const int temp1 = (ip[1] * sinpi8sqrt2) >> 16;
+ const int temp2 = ip[3] + ((ip[3] * cospi8sqrt2minus1) >> 16);
+ const int c1 = temp1 - temp2;
+ const int temp3 = ip[1] + ((ip[1] * cospi8sqrt2minus1) >> 16);
+ const int temp4 = (ip[3] * sinpi8sqrt2) >> 16;
+ const int d1 = temp3 + temp4;
+ op[0] = (a1 + d1 + 4) >> 3;
+ op[3] = (a1 - d1 + 4) >> 3;
+ op[1] = (b1 + c1 + 4) >> 3;
+ op[2] = (b1 - c1 + 4) >> 3;
+ ip += 4;
+ op += 4;
+ }
+}
+
+using libvpx_test::ACMRandom;
+
+TEST(Vp8FdctTest, SignBiasCheck) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ int16_t test_input_block[16];
+ int16_t test_output_block[16];
+ const int pitch = 8;
+ int count_sign_block[16][2];
+ const int count_test_block = 1000000;
+
+ memset(count_sign_block, 0, sizeof(count_sign_block));
+
+ for (int i = 0; i < count_test_block; ++i) {
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < 16; ++j)
+ test_input_block[j] = rnd.Rand8() - rnd.Rand8();
+
+ vp8_short_fdct4x4_c(test_input_block, test_output_block, pitch);
+
+ for (int j = 0; j < 16; ++j) {
+ if (test_output_block[j] < 0)
+ ++count_sign_block[j][0];
+ else if (test_output_block[j] > 0)
+ ++count_sign_block[j][1];
+ }
+ }
+
+ bool bias_acceptable = true;
+ for (int j = 0; j < 16; ++j)
+ bias_acceptable = bias_acceptable &&
+ (abs(count_sign_block[j][0] - count_sign_block[j][1]) < 10000);
+
+ EXPECT_EQ(true, bias_acceptable)
+ << "Error: 4x4 FDCT has a sign bias > 1% for input range [-255, 255]";
+
+ memset(count_sign_block, 0, sizeof(count_sign_block));
+
+ for (int i = 0; i < count_test_block; ++i) {
+ // Initialize a test block with input range [-15, 15].
+ for (int j = 0; j < 16; ++j)
+ test_input_block[j] = (rnd.Rand8() >> 4) - (rnd.Rand8() >> 4);
+
+ vp8_short_fdct4x4_c(test_input_block, test_output_block, pitch);
+
+ for (int j = 0; j < 16; ++j) {
+ if (test_output_block[j] < 0)
+ ++count_sign_block[j][0];
+ else if (test_output_block[j] > 0)
+ ++count_sign_block[j][1];
+ }
+ }
+
+ bias_acceptable = true;
+ for (int j = 0; j < 16; ++j)
+ bias_acceptable = bias_acceptable &&
+ (abs(count_sign_block[j][0] - count_sign_block[j][1]) < 100000);
+
+ EXPECT_EQ(true, bias_acceptable)
+ << "Error: 4x4 FDCT has a sign bias > 10% for input range [-15, 15]";
+};
+
+TEST(Vp8FdctTest, RoundTripErrorCheck) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ int max_error = 0;
+ double total_error = 0;
+ const int count_test_block = 1000000;
+ for (int i = 0; i < count_test_block; ++i) {
+ int16_t test_input_block[16];
+ int16_t test_temp_block[16];
+ int16_t test_output_block[16];
+
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < 16; ++j)
+ test_input_block[j] = rnd.Rand8() - rnd.Rand8();
+
+ const int pitch = 8;
+ vp8_short_fdct4x4_c(test_input_block, test_temp_block, pitch);
+ reference_idct4x4(test_temp_block, test_output_block);
+
+ for (int j = 0; j < 16; ++j) {
+ const int diff = test_input_block[j] - test_output_block[j];
+ const int error = diff * diff;
+ if (max_error < error)
+ max_error = error;
+ total_error += error;
+ }
+ }
+
+ EXPECT_GE(1, max_error )
+ << "Error: FDCT/IDCT has an individual roundtrip error > 1";
+
+ EXPECT_GE(count_test_block, total_error)
+ << "Error: FDCT/IDCT has average roundtrip error > 1 per block";
+};
+
+} // namespace
diff --git a/libvpx/test/vp9_boolcoder_test.cc b/libvpx/test/vp9_boolcoder_test.cc
new file mode 100644
index 0000000..42b2229
--- /dev/null
+++ b/libvpx/test/vp9_boolcoder_test.cc
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
+extern "C" {
+#include "vp9/encoder/vp9_boolhuff.h"
+#include "vp9/decoder/vp9_dboolhuff.h"
+}
+
+#include "acm_random.h"
+#include "vpx/vpx_integer.h"
+
+using libvpx_test::ACMRandom;
+
+namespace {
+const int num_tests = 10;
+} // namespace
+
+TEST(VP9, TestBitIO) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ for (int n = 0; n < num_tests; ++n) {
+ for (int method = 0; method <= 7; ++method) { // we generate various proba
+ const int bits_to_test = 1000;
+ uint8_t probas[bits_to_test];
+
+ for (int i = 0; i < bits_to_test; ++i) {
+ const int parity = i & 1;
+ probas[i] =
+ (method == 0) ? 0 : (method == 1) ? 255 :
+ (method == 2) ? 128 :
+ (method == 3) ? rnd.Rand8() :
+ (method == 4) ? (parity ? 0 : 255) :
+ // alternate between low and high proba:
+ (method == 5) ? (parity ? rnd(128) : 255 - rnd(128)) :
+ (method == 6) ?
+ (parity ? rnd(64) : 255 - rnd(64)) :
+ (parity ? rnd(32) : 255 - rnd(32));
+ }
+ for (int bit_method = 0; bit_method <= 3; ++bit_method) {
+ const int random_seed = 6432;
+ const int buffer_size = 10000;
+ ACMRandom bit_rnd(random_seed);
+ vp9_writer bw;
+ uint8_t bw_buffer[buffer_size];
+ vp9_start_encode(&bw, bw_buffer);
+
+ int bit = (bit_method == 0) ? 0 : (bit_method == 1) ? 1 : 0;
+ for (int i = 0; i < bits_to_test; ++i) {
+ if (bit_method == 2) {
+ bit = (i & 1);
+ } else if (bit_method == 3) {
+ bit = bit_rnd(2);
+ }
+ vp9_write(&bw, bit, static_cast<int>(probas[i]));
+ }
+
+ vp9_stop_encode(&bw);
+
+ // First bit should be zero
+ GTEST_ASSERT_EQ(bw_buffer[0] & 0x80, 0);
+
+ vp9_reader br;
+ vp9_reader_init(&br, bw_buffer, buffer_size);
+ bit_rnd.Reset(random_seed);
+ for (int i = 0; i < bits_to_test; ++i) {
+ if (bit_method == 2) {
+ bit = (i & 1);
+ } else if (bit_method == 3) {
+ bit = bit_rnd(2);
+ }
+ GTEST_ASSERT_EQ(vp9_read(&br, probas[i]), bit)
+ << "pos: " << i << " / " << bits_to_test
+ << " bit_method: " << bit_method
+ << " method: " << method;
+ }
+ }
+ }
+ }
+}
diff --git a/libvpx/test/webm_video_source.h b/libvpx/test/webm_video_source.h
new file mode 100644
index 0000000..c7919a9
--- /dev/null
+++ b/libvpx/test/webm_video_source.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef TEST_WEBM_VIDEO_SOURCE_H_
+#define TEST_WEBM_VIDEO_SOURCE_H_
+#include <cstdarg>
+#include <cstdio>
+#include <cstdlib>
+#include <new>
+#include <string>
+#include "nestegg/include/nestegg/nestegg.h"
+#include "test/video_source.h"
+
+namespace libvpx_test {
+
+static int
+nestegg_read_cb(void *buffer, size_t length, void *userdata) {
+ FILE *f = reinterpret_cast<FILE *>(userdata);
+
+ if (fread(buffer, 1, length, f) < length) {
+ if (ferror(f))
+ return -1;
+ if (feof(f))
+ return 0;
+ }
+ return 1;
+}
+
+
+static int
+nestegg_seek_cb(int64_t offset, int whence, void *userdata) {
+ FILE *f = reinterpret_cast<FILE *>(userdata);
+ switch (whence) {
+ case NESTEGG_SEEK_SET:
+ whence = SEEK_SET;
+ break;
+ case NESTEGG_SEEK_CUR:
+ whence = SEEK_CUR;
+ break;
+ case NESTEGG_SEEK_END:
+ whence = SEEK_END;
+ break;
+ };
+ return fseek(f, (long)offset, whence) ? -1 : 0;
+}
+
+
+static int64_t
+nestegg_tell_cb(void *userdata) {
+ FILE *f = reinterpret_cast<FILE *>(userdata);
+ return ftell(f);
+}
+
+
+static void
+nestegg_log_cb(nestegg *context, unsigned int severity, char const *format,
+ ...) {
+ va_list ap;
+
+ va_start(ap, format);
+ vfprintf(stderr, format, ap);
+ fprintf(stderr, "\n");
+ va_end(ap);
+}
+
+// This class extends VideoSource to allow parsing of WebM files,
+// so that we can do actual file decodes.
+class WebMVideoSource : public CompressedVideoSource {
+ public:
+ explicit WebMVideoSource(const std::string &file_name)
+ : file_name_(file_name),
+ input_file_(NULL),
+ nestegg_ctx_(NULL),
+ pkt_(NULL),
+ video_track_(0),
+ chunk_(0),
+ chunks_(0),
+ buf_(NULL),
+ buf_sz_(0),
+ frame_(0),
+ end_of_file_(false) {
+ }
+
+ virtual ~WebMVideoSource() {
+ if (input_file_)
+ fclose(input_file_);
+ if (nestegg_ctx_)
+ nestegg_destroy(nestegg_ctx_);
+ }
+
+ virtual void Init() {
+ }
+
+ virtual void Begin() {
+ input_file_ = OpenTestDataFile(file_name_);
+ ASSERT_TRUE(input_file_) << "Input file open failed. Filename: "
+ << file_name_;
+
+ nestegg_io io = {nestegg_read_cb, nestegg_seek_cb, nestegg_tell_cb,
+ input_file_};
+ ASSERT_FALSE(nestegg_init(&nestegg_ctx_, io, NULL))
+ << "nestegg_init failed";
+
+ unsigned int n;
+ ASSERT_FALSE(nestegg_track_count(nestegg_ctx_, &n))
+ << "failed to get track count";
+
+ for (unsigned int i = 0; i < n; i++) {
+ int track_type = nestegg_track_type(nestegg_ctx_, i);
+ ASSERT_GE(track_type, 0) << "failed to get track type";
+
+ if (track_type == NESTEGG_TRACK_VIDEO) {
+ video_track_ = i;
+ break;
+ }
+ }
+
+ FillFrame();
+ }
+
+ virtual void Next() {
+ ++frame_;
+ FillFrame();
+ }
+
+ void FillFrame() {
+ if (chunk_ >= chunks_) {
+ unsigned int track;
+
+ do {
+ /* End of this packet, get another. */
+ if (pkt_)
+ nestegg_free_packet(pkt_);
+
+ int again = nestegg_read_packet(nestegg_ctx_, &pkt_);
+ ASSERT_GE(again, 0) << "nestegg_read_packet failed";
+ if (!again) {
+ end_of_file_ = true;
+ return;
+ }
+
+ ASSERT_FALSE(nestegg_packet_track(pkt_, &track))
+ << "nestegg_packet_track failed";
+ } while (track != video_track_);
+
+ ASSERT_FALSE(nestegg_packet_count(pkt_, &chunks_))
+ << "nestegg_packet_count failed";
+ chunk_ = 0;
+ }
+
+ ASSERT_FALSE(nestegg_packet_data(pkt_, chunk_, &buf_, &buf_sz_))
+ << "nestegg_packet_data failed";
+ chunk_++;
+ }
+
+ virtual const uint8_t *cxdata() const {
+ return end_of_file_ ? NULL : buf_;
+ }
+ virtual const unsigned int frame_size() const { return buf_sz_; }
+ virtual const unsigned int frame_number() const { return frame_; }
+
+ protected:
+ std::string file_name_;
+ FILE *input_file_;
+ nestegg *nestegg_ctx_;
+ nestegg_packet *pkt_;
+ unsigned int video_track_;
+ unsigned int chunk_;
+ unsigned int chunks_;
+ uint8_t *buf_;
+ size_t buf_sz_;
+ unsigned int frame_;
+ bool end_of_file_;
+};
+
+} // namespace libvpx_test
+
+#endif // TEST_WEBM_VIDEO_SOURCE_H_
diff --git a/libvpx/third_party/libyuv/source/scale.c b/libvpx/third_party/libyuv/source/scale.c
index c142a17..72a817d 100644
--- a/libvpx/third_party/libyuv/source/scale.c
+++ b/libvpx/third_party/libyuv/source/scale.c
@@ -632,7 +632,7 @@ TALIGN16(const uint16, scaleab2[8]) =
{ 65536 / 3, 65536 / 3, 65536 / 2, 65536 / 3, 65536 / 3, 65536 / 2, 0, 0 };
#endif
-#if defined(_M_IX86) && !defined(YUV_DISABLE_ASM)
+#if defined(_M_IX86) && !defined(YUV_DISABLE_ASM) && defined(_MSC_VER)
#define HAS_SCALEROWDOWN2_SSE2
// Reads 32 pixels, throws half away and writes 16 pixels.
diff --git a/libvpx/third_party/x86inc/LICENSE b/libvpx/third_party/x86inc/LICENSE
new file mode 100644
index 0000000..7d07645
--- /dev/null
+++ b/libvpx/third_party/x86inc/LICENSE
@@ -0,0 +1,18 @@
+Copyright (C) 2005-2012 x264 project
+
+Authors: Loren Merritt <lorenm@u.washington.edu>
+ Anton Mitrofanov <BugMaster@narod.ru>
+ Jason Garrett-Glaser <darkshikari@gmail.com>
+ Henrik Gramner <hengar-6@student.ltu.se>
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/libvpx/third_party/x86inc/README.webm b/libvpx/third_party/x86inc/README.webm
new file mode 100644
index 0000000..02cd9ab
--- /dev/null
+++ b/libvpx/third_party/x86inc/README.webm
@@ -0,0 +1,11 @@
+URL: http://git.videolan.org/?p=x264.git
+Version: 999b753ff0f4dc872077f4fa90d465e948cbe656
+License: ISC
+License File: LICENSE
+
+Description:
+x264/libav's framework for x86 assembly. Contains a variety of macros and
+defines that help automatically allow assembly to work cross-platform.
+
+Local Modifications:
+Some modifications to allow PIC to work with x86inc.
diff --git a/libvpx/third_party/x86inc/x86inc.asm b/libvpx/third_party/x86inc/x86inc.asm
new file mode 100644
index 0000000..a66a96b
--- /dev/null
+++ b/libvpx/third_party/x86inc/x86inc.asm
@@ -0,0 +1,1125 @@
+;*****************************************************************************
+;* x86inc.asm: x264asm abstraction layer
+;*****************************************************************************
+;* Copyright (C) 2005-2012 x264 project
+;*
+;* Authors: Loren Merritt <lorenm@u.washington.edu>
+;* Anton Mitrofanov <BugMaster@narod.ru>
+;* Jason Garrett-Glaser <darkshikari@gmail.com>
+;* Henrik Gramner <hengar-6@student.ltu.se>
+;*
+;* Permission to use, copy, modify, and/or distribute this software for any
+;* purpose with or without fee is hereby granted, provided that the above
+;* copyright notice and this permission notice appear in all copies.
+;*
+;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+;*****************************************************************************
+
+; This is a header file for the x264ASM assembly language, which uses
+; NASM/YASM syntax combined with a large number of macros to provide easy
+; abstraction between different calling conventions (x86_32, win64, linux64).
+; It also has various other useful features to simplify writing the kind of
+; DSP functions that are most often used in x264.
+
+; Unlike the rest of x264, this file is available under an ISC license, as it
+; has significant usefulness outside of x264 and we want it to be available
+; to the largest audience possible. Of course, if you modify it for your own
+; purposes to add a new feature, we strongly encourage contributing a patch
+; as this feature might be useful for others as well. Send patches or ideas
+; to x264-devel@videolan.org .
+
+%include "vpx_config.asm"
+
+%define program_name vp9
+
+
+%define UNIX64 0
+%define WIN64 0
+%if ARCH_X86_64
+ %ifidn __OUTPUT_FORMAT__,win32
+ %define WIN64 1
+ %elifidn __OUTPUT_FORMAT__,win64
+ %define WIN64 1
+ %elifidn __OUTPUT_FORMAT__,x64
+ %define WIN64 1
+ %else
+ %define UNIX64 1
+ %endif
+%endif
+
+%ifidn __OUTPUT_FORMAT__,elf32
+ %define mangle(x) x
+%elifidn __OUTPUT_FORMAT__,elf64
+ %define mangle(x) x
+%elifidn __OUTPUT_FORMAT__,elf
+ %define mangle(x) x
+%elifidn __OUTPUT_FORMAT__,x64
+ %define mangle(x) x
+%elifidn __OUTPUT_FORMAT__,win64
+ %define mangle(x) x
+%else
+ %define mangle(x) _ %+ x
+%endif
+
+; FIXME: All of the 64bit asm functions that take a stride as an argument
+; via register, assume that the high dword of that register is filled with 0.
+; This is true in practice (since we never do any 64bit arithmetic on strides,
+; and x264's strides are all positive), but is not guaranteed by the ABI.
+
+; Name of the .rodata section.
+; Kludge: Something on OS X fails to align .rodata even given an align attribute,
+; so use a different read-only section.
+%macro SECTION_RODATA 0-1 16
+ %ifidn __OUTPUT_FORMAT__,macho64
+ SECTION .text align=%1
+ %elifidn __OUTPUT_FORMAT__,macho
+ SECTION .text align=%1
+ fakegot:
+ %elifidn __OUTPUT_FORMAT__,aout
+ section .text
+ %else
+ SECTION .rodata align=%1
+ %endif
+%endmacro
+
+; aout does not support align=
+%macro SECTION_TEXT 0-1 16
+ %ifidn __OUTPUT_FORMAT__,aout
+ SECTION .text
+ %else
+ SECTION .text align=%1
+ %endif
+%endmacro
+
+%if WIN64
+ %define PIC
+%elifidn __OUTPUT_FORMAT__,macho64
+ %define PIC
+%elif ARCH_X86_64 == 0
+; x86_32 doesn't require PIC.
+; Some distros prefer shared objects to be PIC, but nothing breaks if
+; the code contains a few textrels, so we'll skip that complexity.
+ %undef PIC
+%elif CONFIG_PIC
+ %define PIC
+%endif
+%ifdef PIC
+ default rel
+%endif
+
+; Always use long nops (reduces 0x90 spam in disassembly on x86_32)
+%ifndef __NASM_VER__
+CPU amdnop
+%else
+%use smartalign
+ALIGNMODE k7
+%endif
+
+; Macros to eliminate most code duplication between x86_32 and x86_64:
+; Currently this works only for leaf functions which load all their arguments
+; into registers at the start, and make no other use of the stack. Luckily that
+; covers most of x264's asm.
+
+; PROLOGUE:
+; %1 = number of arguments. loads them from stack if needed.
+; %2 = number of registers used. pushes callee-saved regs if needed.
+; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
+; %4 = list of names to define to registers
+; PROLOGUE can also be invoked by adding the same options to cglobal
+
+; e.g.
+; cglobal foo, 2,3,0, dst, src, tmp
+; declares a function (foo), taking two args (dst and src) and one local variable (tmp)
+
+; TODO Some functions can use some args directly from the stack. If they're the
+; last args then you can just not declare them, but if they're in the middle
+; we need more flexible macro.
+
+; RET:
+; Pops anything that was pushed by PROLOGUE, and returns.
+
+; REP_RET:
+; Same, but if it doesn't pop anything it becomes a 2-byte ret, for athlons
+; which are slow when a normal ret follows a branch.
+
+; registers:
+; rN and rNq are the native-size register holding function argument N
+; rNd, rNw, rNb are dword, word, and byte size
+; rNm is the original location of arg N (a register or on the stack), dword
+; rNmp is native size
+
+%macro DECLARE_REG 5-6
+ %define r%1q %2
+ %define r%1d %3
+ %define r%1w %4
+ %define r%1b %5
+ %if %0 == 5
+ %define r%1m %3
+ %define r%1mp %2
+ %elif ARCH_X86_64 ; memory
+ %define r%1m [rsp + stack_offset + %6]
+ %define r%1mp qword r %+ %1m
+ %else
+ %define r%1m [esp + stack_offset + %6]
+ %define r%1mp dword r %+ %1m
+ %endif
+ %define r%1 %2
+%endmacro
+
+%macro DECLARE_REG_SIZE 2
+ %define r%1q r%1
+ %define e%1q r%1
+ %define r%1d e%1
+ %define e%1d e%1
+ %define r%1w %1
+ %define e%1w %1
+ %define r%1b %2
+ %define e%1b %2
+%if ARCH_X86_64 == 0
+ %define r%1 e%1
+%endif
+%endmacro
+
+DECLARE_REG_SIZE ax, al
+DECLARE_REG_SIZE bx, bl
+DECLARE_REG_SIZE cx, cl
+DECLARE_REG_SIZE dx, dl
+DECLARE_REG_SIZE si, sil
+DECLARE_REG_SIZE di, dil
+DECLARE_REG_SIZE bp, bpl
+
+; t# defines for when per-arch register allocation is more complex than just function arguments
+
+%macro DECLARE_REG_TMP 1-*
+ %assign %%i 0
+ %rep %0
+ CAT_XDEFINE t, %%i, r%1
+ %assign %%i %%i+1
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro DECLARE_REG_TMP_SIZE 0-*
+ %rep %0
+ %define t%1q t%1 %+ q
+ %define t%1d t%1 %+ d
+ %define t%1w t%1 %+ w
+ %define t%1b t%1 %+ b
+ %rotate 1
+ %endrep
+%endmacro
+
+DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
+
+%if ARCH_X86_64
+ %define gprsize 8
+%else
+ %define gprsize 4
+%endif
+
+%macro PUSH 1
+ push %1
+ %assign stack_offset stack_offset+gprsize
+%endmacro
+
+%macro POP 1
+ pop %1
+ %assign stack_offset stack_offset-gprsize
+%endmacro
+
+%macro PUSH_IF_USED 1-*
+ %rep %0
+ %if %1 < regs_used
+ PUSH r%1
+ %endif
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro POP_IF_USED 1-*
+ %rep %0
+ %if %1 < regs_used
+ pop r%1
+ %endif
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro LOAD_IF_USED 1-*
+ %rep %0
+ %if %1 < num_args
+ mov r%1, r %+ %1 %+ mp
+ %endif
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro SUB 2
+ sub %1, %2
+ %ifidn %1, rsp
+ %assign stack_offset stack_offset+(%2)
+ %endif
+%endmacro
+
+%macro ADD 2
+ add %1, %2
+ %ifidn %1, rsp
+ %assign stack_offset stack_offset-(%2)
+ %endif
+%endmacro
+
+%macro movifnidn 2
+ %ifnidn %1, %2
+ mov %1, %2
+ %endif
+%endmacro
+
+%macro movsxdifnidn 2
+ %ifnidn %1, %2
+ movsxd %1, %2
+ %endif
+%endmacro
+
+%macro ASSERT 1
+ %if (%1) == 0
+ %error assert failed
+ %endif
+%endmacro
+
+%macro DEFINE_ARGS 0-*
+ %ifdef n_arg_names
+ %assign %%i 0
+ %rep n_arg_names
+ CAT_UNDEF arg_name %+ %%i, q
+ CAT_UNDEF arg_name %+ %%i, d
+ CAT_UNDEF arg_name %+ %%i, w
+ CAT_UNDEF arg_name %+ %%i, b
+ CAT_UNDEF arg_name %+ %%i, m
+ CAT_UNDEF arg_name %+ %%i, mp
+ CAT_UNDEF arg_name, %%i
+ %assign %%i %%i+1
+ %endrep
+ %endif
+
+ %xdefine %%stack_offset stack_offset
+ %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine
+ %assign %%i 0
+ %rep %0
+ %xdefine %1q r %+ %%i %+ q
+ %xdefine %1d r %+ %%i %+ d
+ %xdefine %1w r %+ %%i %+ w
+ %xdefine %1b r %+ %%i %+ b
+ %xdefine %1m r %+ %%i %+ m
+ %xdefine %1mp r %+ %%i %+ mp
+ CAT_XDEFINE arg_name, %%i, %1
+ %assign %%i %%i+1
+ %rotate 1
+ %endrep
+ %xdefine stack_offset %%stack_offset
+ %assign n_arg_names %0
+%endmacro
+
+%if WIN64 ; Windows x64 ;=================================================
+
+DECLARE_REG 0, rcx, ecx, cx, cl
+DECLARE_REG 1, rdx, edx, dx, dl
+DECLARE_REG 2, R8, R8D, R8W, R8B
+DECLARE_REG 3, R9, R9D, R9W, R9B
+DECLARE_REG 4, R10, R10D, R10W, R10B, 40
+DECLARE_REG 5, R11, R11D, R11W, R11B, 48
+DECLARE_REG 6, rax, eax, ax, al, 56
+DECLARE_REG 7, rdi, edi, di, dil, 64
+DECLARE_REG 8, rsi, esi, si, sil, 72
+DECLARE_REG 9, rbx, ebx, bx, bl, 80
+DECLARE_REG 10, rbp, ebp, bp, bpl, 88
+DECLARE_REG 11, R12, R12D, R12W, R12B, 96
+DECLARE_REG 12, R13, R13D, R13W, R13B, 104
+DECLARE_REG 13, R14, R14D, R14W, R14B, 112
+DECLARE_REG 14, R15, R15D, R15W, R15B, 120
+
+%macro PROLOGUE 2-4+ 0 ; #args, #regs, #xmm_regs, arg_names...
+ %assign num_args %1
+ %assign regs_used %2
+ ASSERT regs_used >= num_args
+ ASSERT regs_used <= 15
+ PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14
+ %if mmsize == 8
+ %assign xmm_regs_used 0
+ %else
+ WIN64_SPILL_XMM %3
+ %endif
+ LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+ DEFINE_ARGS %4
+%endmacro
+
+%macro WIN64_SPILL_XMM 1
+ %assign xmm_regs_used %1
+ ASSERT xmm_regs_used <= 16
+ %if xmm_regs_used > 6
+ SUB rsp, (xmm_regs_used-6)*16+16
+ %assign %%i xmm_regs_used
+ %rep (xmm_regs_used-6)
+ %assign %%i %%i-1
+ movdqa [rsp + (%%i-6)*16+(~stack_offset&8)], xmm %+ %%i
+ %endrep
+ %endif
+%endmacro
+
+%macro WIN64_RESTORE_XMM_INTERNAL 1
+ %if xmm_regs_used > 6
+ %assign %%i xmm_regs_used
+ %rep (xmm_regs_used-6)
+ %assign %%i %%i-1
+ movdqa xmm %+ %%i, [%1 + (%%i-6)*16+(~stack_offset&8)]
+ %endrep
+ add %1, (xmm_regs_used-6)*16+16
+ %endif
+%endmacro
+
+%macro WIN64_RESTORE_XMM 1
+ WIN64_RESTORE_XMM_INTERNAL %1
+ %assign stack_offset stack_offset-(xmm_regs_used-6)*16+16
+ %assign xmm_regs_used 0
+%endmacro
+
+%macro RET 0
+ WIN64_RESTORE_XMM_INTERNAL rsp
+ POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
+ ret
+%endmacro
+
+%macro REP_RET 0
+ %if regs_used > 7 || xmm_regs_used > 6
+ RET
+ %else
+ rep ret
+ %endif
+%endmacro
+
+%elif ARCH_X86_64 ; *nix x64 ;=============================================
+
+DECLARE_REG 0, rdi, edi, di, dil
+DECLARE_REG 1, rsi, esi, si, sil
+DECLARE_REG 2, rdx, edx, dx, dl
+DECLARE_REG 3, rcx, ecx, cx, cl
+DECLARE_REG 4, R8, R8D, R8W, R8B
+DECLARE_REG 5, R9, R9D, R9W, R9B
+DECLARE_REG 6, rax, eax, ax, al, 8
+DECLARE_REG 7, R10, R10D, R10W, R10B, 16
+DECLARE_REG 8, R11, R11D, R11W, R11B, 24
+DECLARE_REG 9, rbx, ebx, bx, bl, 32
+DECLARE_REG 10, rbp, ebp, bp, bpl, 40
+DECLARE_REG 11, R12, R12D, R12W, R12B, 48
+DECLARE_REG 12, R13, R13D, R13W, R13B, 56
+DECLARE_REG 13, R14, R14D, R14W, R14B, 64
+DECLARE_REG 14, R15, R15D, R15W, R15B, 72
+
+%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
+ %assign num_args %1
+ %assign regs_used %2
+ ASSERT regs_used >= num_args
+ ASSERT regs_used <= 15
+ PUSH_IF_USED 9, 10, 11, 12, 13, 14
+ LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14
+ DEFINE_ARGS %4
+%endmacro
+
+%macro RET 0
+ POP_IF_USED 14, 13, 12, 11, 10, 9
+ ret
+%endmacro
+
+%macro REP_RET 0
+ %if regs_used > 9
+ RET
+ %else
+ rep ret
+ %endif
+%endmacro
+
+%else ; X86_32 ;==============================================================
+
+DECLARE_REG 0, eax, eax, ax, al, 4
+DECLARE_REG 1, ecx, ecx, cx, cl, 8
+DECLARE_REG 2, edx, edx, dx, dl, 12
+DECLARE_REG 3, ebx, ebx, bx, bl, 16
+DECLARE_REG 4, esi, esi, si, null, 20
+DECLARE_REG 5, edi, edi, di, null, 24
+DECLARE_REG 6, ebp, ebp, bp, null, 28
+%define rsp esp
+
+%macro DECLARE_ARG 1-*
+ %rep %0
+ %define r%1m [esp + stack_offset + 4*%1 + 4]
+ %define r%1mp dword r%1m
+ %rotate 1
+ %endrep
+%endmacro
+
+DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
+
+%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
+ %assign num_args %1
+ %assign regs_used %2
+ %if regs_used > 7
+ %assign regs_used 7
+ %endif
+ ASSERT regs_used >= num_args
+ PUSH_IF_USED 3, 4, 5, 6
+ LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6
+ DEFINE_ARGS %4
+%endmacro
+
+%macro RET 0
+ POP_IF_USED 6, 5, 4, 3
+ ret
+%endmacro
+
+%macro REP_RET 0
+ %if regs_used > 3
+ RET
+ %else
+ rep ret
+ %endif
+%endmacro
+
+%endif ;======================================================================
+
+%if WIN64 == 0
+%macro WIN64_SPILL_XMM 1
+%endmacro
+%macro WIN64_RESTORE_XMM 1
+%endmacro
+%endif
+
+;=============================================================================
+; arch-independent part
+;=============================================================================
+
+%assign function_align 16
+
+; Begin a function.
+; Applies any symbol mangling needed for C linkage, and sets up a define such that
+; subsequent uses of the function name automatically refer to the mangled version.
+; Appends cpuflags to the function name if cpuflags has been specified.
+%macro cglobal 1-2+ ; name, [PROLOGUE args]
+%if %0 == 1
+ cglobal_internal %1 %+ SUFFIX
+%else
+ cglobal_internal %1 %+ SUFFIX, %2
+%endif
+%endmacro
+%macro cglobal_internal 1-2+
+ %ifndef cglobaled_%1
+ %xdefine %1 mangle(program_name %+ _ %+ %1)
+ %xdefine %1.skip_prologue %1 %+ .skip_prologue
+ CAT_XDEFINE cglobaled_, %1, 1
+ %endif
+ %xdefine current_function %1
+ %ifidn __OUTPUT_FORMAT__,elf
+ global %1:function hidden
+ %elifidn __OUTPUT_FORMAT__,elf32
+ global %1:function hidden
+ %elifidn __OUTPUT_FORMAT__,elf64
+ global %1:function hidden
+ %else
+ global %1
+ %endif
+ align function_align
+ %1:
+ RESET_MM_PERMUTATION ; not really needed, but makes disassembly somewhat nicer
+ %assign stack_offset 0
+ %if %0 > 1
+ PROLOGUE %2
+ %endif
+%endmacro
+
+%macro cextern 1
+ %xdefine %1 mangle(program_name %+ _ %+ %1)
+ CAT_XDEFINE cglobaled_, %1, 1
+ extern %1
+%endmacro
+
+; like cextern, but without the prefix
+%macro cextern_naked 1
+ %xdefine %1 mangle(%1)
+ CAT_XDEFINE cglobaled_, %1, 1
+ extern %1
+%endmacro
+
+%macro const 2+
+ %xdefine %1 mangle(program_name %+ _ %+ %1)
+ global %1
+ %1: %2
+%endmacro
+
+; This is needed for ELF, otherwise the GNU linker assumes the stack is
+; executable by default.
+%ifidn __OUTPUT_FORMAT__,elf
+SECTION .note.GNU-stack noalloc noexec nowrite progbits
+%elifidn __OUTPUT_FORMAT__,elf32
+SECTION .note.GNU-stack noalloc noexec nowrite progbits
+%elifidn __OUTPUT_FORMAT__,elf64
+SECTION .note.GNU-stack noalloc noexec nowrite progbits
+%endif
+
+; cpuflags
+
+%assign cpuflags_mmx (1<<0)
+%assign cpuflags_mmx2 (1<<1) | cpuflags_mmx
+%assign cpuflags_3dnow (1<<2) | cpuflags_mmx
+%assign cpuflags_3dnow2 (1<<3) | cpuflags_3dnow
+%assign cpuflags_sse (1<<4) | cpuflags_mmx2
+%assign cpuflags_sse2 (1<<5) | cpuflags_sse
+%assign cpuflags_sse2slow (1<<6) | cpuflags_sse2
+%assign cpuflags_sse3 (1<<7) | cpuflags_sse2
+%assign cpuflags_ssse3 (1<<8) | cpuflags_sse3
+%assign cpuflags_sse4 (1<<9) | cpuflags_ssse3
+%assign cpuflags_sse42 (1<<10)| cpuflags_sse4
+%assign cpuflags_avx (1<<11)| cpuflags_sse42
+%assign cpuflags_xop (1<<12)| cpuflags_avx
+%assign cpuflags_fma4 (1<<13)| cpuflags_avx
+
+%assign cpuflags_cache32 (1<<16)
+%assign cpuflags_cache64 (1<<17)
+%assign cpuflags_slowctz (1<<18)
+%assign cpuflags_lzcnt (1<<19)
+%assign cpuflags_misalign (1<<20)
+%assign cpuflags_aligned (1<<21) ; not a cpu feature, but a function variant
+%assign cpuflags_atom (1<<22)
+
+%define cpuflag(x) ((cpuflags & (cpuflags_ %+ x)) == (cpuflags_ %+ x))
+%define notcpuflag(x) ((cpuflags & (cpuflags_ %+ x)) != (cpuflags_ %+ x))
+
+; Takes up to 2 cpuflags from the above list.
+; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu.
+; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co.
+%macro INIT_CPUFLAGS 0-2
+ %if %0 >= 1
+ %xdefine cpuname %1
+ %assign cpuflags cpuflags_%1
+ %if %0 >= 2
+ %xdefine cpuname %1_%2
+ %assign cpuflags cpuflags | cpuflags_%2
+ %endif
+ %xdefine SUFFIX _ %+ cpuname
+ %if cpuflag(avx)
+ %assign avx_enabled 1
+ %endif
+ %if mmsize == 16 && notcpuflag(sse2)
+ %define mova movaps
+ %define movu movups
+ %define movnta movntps
+ %endif
+ %if cpuflag(aligned)
+ %define movu mova
+ %elifidn %1, sse3
+ %define movu lddqu
+ %endif
+ %else
+ %xdefine SUFFIX
+ %undef cpuname
+ %undef cpuflags
+ %endif
+%endmacro
+
+; merge mmx and sse*
+
+%macro CAT_XDEFINE 3
+ %xdefine %1%2 %3
+%endmacro
+
+%macro CAT_UNDEF 2
+ %undef %1%2
+%endmacro
+
+%macro INIT_MMX 0-1+
+ %assign avx_enabled 0
+ %define RESET_MM_PERMUTATION INIT_MMX %1
+ %define mmsize 8
+ %define num_mmregs 8
+ %define mova movq
+ %define movu movq
+ %define movh movd
+ %define movnta movntq
+ %assign %%i 0
+ %rep 8
+ CAT_XDEFINE m, %%i, mm %+ %%i
+ CAT_XDEFINE nmm, %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+ %rep 8
+ CAT_UNDEF m, %%i
+ CAT_UNDEF nmm, %%i
+ %assign %%i %%i+1
+ %endrep
+ INIT_CPUFLAGS %1
+%endmacro
+
+%macro INIT_XMM 0-1+
+ %assign avx_enabled 0
+ %define RESET_MM_PERMUTATION INIT_XMM %1
+ %define mmsize 16
+ %define num_mmregs 8
+ %if ARCH_X86_64
+ %define num_mmregs 16
+ %endif
+ %define mova movdqa
+ %define movu movdqu
+ %define movh movq
+ %define movnta movntdq
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE m, %%i, xmm %+ %%i
+ CAT_XDEFINE nxmm, %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+ INIT_CPUFLAGS %1
+%endmacro
+
+; FIXME: INIT_AVX can be replaced by INIT_XMM avx
+%macro INIT_AVX 0
+ INIT_XMM
+ %assign avx_enabled 1
+ %define PALIGNR PALIGNR_SSSE3
+ %define RESET_MM_PERMUTATION INIT_AVX
+%endmacro
+
+%macro INIT_YMM 0-1+
+ %assign avx_enabled 1
+ %define RESET_MM_PERMUTATION INIT_YMM %1
+ %define mmsize 32
+ %define num_mmregs 8
+ %if ARCH_X86_64
+ %define num_mmregs 16
+ %endif
+ %define mova vmovaps
+ %define movu vmovups
+ %undef movh
+ %define movnta vmovntps
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE m, %%i, ymm %+ %%i
+ CAT_XDEFINE nymm, %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+ INIT_CPUFLAGS %1
+%endmacro
+
+INIT_XMM
+
+; I often want to use macros that permute their arguments. e.g. there's no
+; efficient way to implement butterfly or transpose or dct without swapping some
+; arguments.
+;
+; I would like to not have to manually keep track of the permutations:
+; If I insert a permutation in the middle of a function, it should automatically
+; change everything that follows. For more complex macros I may also have multiple
+; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
+;
+; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
+; permutes its arguments. It's equivalent to exchanging the contents of the
+; registers, except that this way you exchange the register names instead, so it
+; doesn't cost any cycles.
+
+%macro PERMUTE 2-* ; takes a list of pairs to swap
+%rep %0/2
+ %xdefine tmp%2 m%2
+ %xdefine ntmp%2 nm%2
+ %rotate 2
+%endrep
+%rep %0/2
+ %xdefine m%1 tmp%2
+ %xdefine nm%1 ntmp%2
+ %undef tmp%2
+ %undef ntmp%2
+ %rotate 2
+%endrep
+%endmacro
+
+%macro SWAP 2-* ; swaps a single chain (sometimes more concise than pairs)
+%rep %0-1
+%ifdef m%1
+ %xdefine tmp m%1
+ %xdefine m%1 m%2
+ %xdefine m%2 tmp
+ CAT_XDEFINE n, m%1, %1
+ CAT_XDEFINE n, m%2, %2
+%else
+ ; If we were called as "SWAP m0,m1" rather than "SWAP 0,1" infer the original numbers here.
+ ; Be careful using this mode in nested macros though, as in some cases there may be
+ ; other copies of m# that have already been dereferenced and don't get updated correctly.
+ %xdefine %%n1 n %+ %1
+ %xdefine %%n2 n %+ %2
+ %xdefine tmp m %+ %%n1
+ CAT_XDEFINE m, %%n1, m %+ %%n2
+ CAT_XDEFINE m, %%n2, tmp
+ CAT_XDEFINE n, m %+ %%n1, %%n1
+ CAT_XDEFINE n, m %+ %%n2, %%n2
+%endif
+ %undef tmp
+ %rotate 1
+%endrep
+%endmacro
+
+; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later
+; calls to that function will automatically load the permutation, so values can
+; be returned in mmregs.
+%macro SAVE_MM_PERMUTATION 0-1
+ %if %0
+ %xdefine %%f %1_m
+ %else
+ %xdefine %%f current_function %+ _m
+ %endif
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE %%f, %%i, m %+ %%i
+ %assign %%i %%i+1
+ %endrep
+%endmacro
+
+%macro LOAD_MM_PERMUTATION 1 ; name to load from
+ %ifdef %1_m0
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE m, %%i, %1_m %+ %%i
+ CAT_XDEFINE n, m %+ %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+ %endif
+%endmacro
+
+; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't
+%macro call 1
+ call_internal %1, %1 %+ SUFFIX
+%endmacro
+%macro call_internal 2
+ %xdefine %%i %1
+ %ifndef cglobaled_%1
+ %ifdef cglobaled_%2
+ %xdefine %%i %2
+ %endif
+ %endif
+ call %%i
+ LOAD_MM_PERMUTATION %%i
+%endmacro
+
+; Substitutions that reduce instruction size but are functionally equivalent
+%macro add 2
+ %ifnum %2
+ %if %2==128
+ sub %1, -128
+ %else
+ add %1, %2
+ %endif
+ %else
+ add %1, %2
+ %endif
+%endmacro
+
+%macro sub 2
+ %ifnum %2
+ %if %2==128
+ add %1, -128
+ %else
+ sub %1, %2
+ %endif
+ %else
+ sub %1, %2
+ %endif
+%endmacro
+
+;=============================================================================
+; AVX abstraction layer
+;=============================================================================
+
+%assign i 0
+%rep 16
+ %if i < 8
+ CAT_XDEFINE sizeofmm, i, 8
+ %endif
+ CAT_XDEFINE sizeofxmm, i, 16
+ CAT_XDEFINE sizeofymm, i, 32
+%assign i i+1
+%endrep
+%undef i
+
+;%1 == instruction
+;%2 == 1 if float, 0 if int
+;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 2- or 3-operand (xmm, xmm, xmm)
+;%4 == number of operands given
+;%5+: operands
+%macro RUN_AVX_INSTR 6-7+
+ %ifid %5
+ %define %%size sizeof%5
+ %else
+ %define %%size mmsize
+ %endif
+ %if %%size==32
+ %if %0 >= 7
+ v%1 %5, %6, %7
+ %else
+ v%1 %5, %6
+ %endif
+ %else
+ %if %%size==8
+ %define %%regmov movq
+ %elif %2
+ %define %%regmov movaps
+ %else
+ %define %%regmov movdqa
+ %endif
+
+ %if %4>=3+%3
+ %ifnidn %5, %6
+ %if avx_enabled && sizeof%5==16
+ v%1 %5, %6, %7
+ %else
+ %%regmov %5, %6
+ %1 %5, %7
+ %endif
+ %else
+ %1 %5, %7
+ %endif
+ %elif %3
+ %1 %5, %6, %7
+ %else
+ %1 %5, %6
+ %endif
+ %endif
+%endmacro
+
+; 3arg AVX ops with a memory arg can only have it in src2,
+; whereas SSE emulation of 3arg prefers to have it in src1 (i.e. the mov).
+; So, if the op is symmetric and the wrong one is memory, swap them.
+%macro RUN_AVX_INSTR1 8
+ %assign %%swap 0
+ %if avx_enabled
+ %ifnid %6
+ %assign %%swap 1
+ %endif
+ %elifnidn %5, %6
+ %ifnid %7
+ %assign %%swap 1
+ %endif
+ %endif
+ %if %%swap && %3 == 0 && %8 == 1
+ RUN_AVX_INSTR %1, %2, %3, %4, %5, %7, %6
+ %else
+ RUN_AVX_INSTR %1, %2, %3, %4, %5, %6, %7
+ %endif
+%endmacro
+
+;%1 == instruction
+;%2 == 1 if float, 0 if int
+;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 3-operand (xmm, xmm, xmm)
+;%4 == 1 if symmetric (i.e. doesn't matter which src arg is which), 0 if not
+%macro AVX_INSTR 4
+ %macro %1 2-9 fnord, fnord, fnord, %1, %2, %3, %4
+ %ifidn %3, fnord
+ RUN_AVX_INSTR %6, %7, %8, 2, %1, %2
+ %elifidn %4, fnord
+ RUN_AVX_INSTR1 %6, %7, %8, 3, %1, %2, %3, %9
+ %elifidn %5, fnord
+ RUN_AVX_INSTR %6, %7, %8, 4, %1, %2, %3, %4
+ %else
+ RUN_AVX_INSTR %6, %7, %8, 5, %1, %2, %3, %4, %5
+ %endif
+ %endmacro
+%endmacro
+
+AVX_INSTR addpd, 1, 0, 1
+AVX_INSTR addps, 1, 0, 1
+AVX_INSTR addsd, 1, 0, 1
+AVX_INSTR addss, 1, 0, 1
+AVX_INSTR addsubpd, 1, 0, 0
+AVX_INSTR addsubps, 1, 0, 0
+AVX_INSTR andpd, 1, 0, 1
+AVX_INSTR andps, 1, 0, 1
+AVX_INSTR andnpd, 1, 0, 0
+AVX_INSTR andnps, 1, 0, 0
+AVX_INSTR blendpd, 1, 0, 0
+AVX_INSTR blendps, 1, 0, 0
+AVX_INSTR blendvpd, 1, 0, 0
+AVX_INSTR blendvps, 1, 0, 0
+AVX_INSTR cmppd, 1, 0, 0
+AVX_INSTR cmpps, 1, 0, 0
+AVX_INSTR cmpsd, 1, 0, 0
+AVX_INSTR cmpss, 1, 0, 0
+AVX_INSTR cvtdq2ps, 1, 0, 0
+AVX_INSTR cvtps2dq, 1, 0, 0
+AVX_INSTR divpd, 1, 0, 0
+AVX_INSTR divps, 1, 0, 0
+AVX_INSTR divsd, 1, 0, 0
+AVX_INSTR divss, 1, 0, 0
+AVX_INSTR dppd, 1, 1, 0
+AVX_INSTR dpps, 1, 1, 0
+AVX_INSTR haddpd, 1, 0, 0
+AVX_INSTR haddps, 1, 0, 0
+AVX_INSTR hsubpd, 1, 0, 0
+AVX_INSTR hsubps, 1, 0, 0
+AVX_INSTR maxpd, 1, 0, 1
+AVX_INSTR maxps, 1, 0, 1
+AVX_INSTR maxsd, 1, 0, 1
+AVX_INSTR maxss, 1, 0, 1
+AVX_INSTR minpd, 1, 0, 1
+AVX_INSTR minps, 1, 0, 1
+AVX_INSTR minsd, 1, 0, 1
+AVX_INSTR minss, 1, 0, 1
+AVX_INSTR movhlps, 1, 0, 0
+AVX_INSTR movlhps, 1, 0, 0
+AVX_INSTR movsd, 1, 0, 0
+AVX_INSTR movss, 1, 0, 0
+AVX_INSTR mpsadbw, 0, 1, 0
+AVX_INSTR mulpd, 1, 0, 1
+AVX_INSTR mulps, 1, 0, 1
+AVX_INSTR mulsd, 1, 0, 1
+AVX_INSTR mulss, 1, 0, 1
+AVX_INSTR orpd, 1, 0, 1
+AVX_INSTR orps, 1, 0, 1
+AVX_INSTR packsswb, 0, 0, 0
+AVX_INSTR packssdw, 0, 0, 0
+AVX_INSTR packuswb, 0, 0, 0
+AVX_INSTR packusdw, 0, 0, 0
+AVX_INSTR paddb, 0, 0, 1
+AVX_INSTR paddw, 0, 0, 1
+AVX_INSTR paddd, 0, 0, 1
+AVX_INSTR paddq, 0, 0, 1
+AVX_INSTR paddsb, 0, 0, 1
+AVX_INSTR paddsw, 0, 0, 1
+AVX_INSTR paddusb, 0, 0, 1
+AVX_INSTR paddusw, 0, 0, 1
+AVX_INSTR palignr, 0, 1, 0
+AVX_INSTR pand, 0, 0, 1
+AVX_INSTR pandn, 0, 0, 0
+AVX_INSTR pavgb, 0, 0, 1
+AVX_INSTR pavgw, 0, 0, 1
+AVX_INSTR pblendvb, 0, 0, 0
+AVX_INSTR pblendw, 0, 1, 0
+AVX_INSTR pcmpestri, 0, 0, 0
+AVX_INSTR pcmpestrm, 0, 0, 0
+AVX_INSTR pcmpistri, 0, 0, 0
+AVX_INSTR pcmpistrm, 0, 0, 0
+AVX_INSTR pcmpeqb, 0, 0, 1
+AVX_INSTR pcmpeqw, 0, 0, 1
+AVX_INSTR pcmpeqd, 0, 0, 1
+AVX_INSTR pcmpeqq, 0, 0, 1
+AVX_INSTR pcmpgtb, 0, 0, 0
+AVX_INSTR pcmpgtw, 0, 0, 0
+AVX_INSTR pcmpgtd, 0, 0, 0
+AVX_INSTR pcmpgtq, 0, 0, 0
+AVX_INSTR phaddw, 0, 0, 0
+AVX_INSTR phaddd, 0, 0, 0
+AVX_INSTR phaddsw, 0, 0, 0
+AVX_INSTR phsubw, 0, 0, 0
+AVX_INSTR phsubd, 0, 0, 0
+AVX_INSTR phsubsw, 0, 0, 0
+AVX_INSTR pmaddwd, 0, 0, 1
+AVX_INSTR pmaddubsw, 0, 0, 0
+AVX_INSTR pmaxsb, 0, 0, 1
+AVX_INSTR pmaxsw, 0, 0, 1
+AVX_INSTR pmaxsd, 0, 0, 1
+AVX_INSTR pmaxub, 0, 0, 1
+AVX_INSTR pmaxuw, 0, 0, 1
+AVX_INSTR pmaxud, 0, 0, 1
+AVX_INSTR pminsb, 0, 0, 1
+AVX_INSTR pminsw, 0, 0, 1
+AVX_INSTR pminsd, 0, 0, 1
+AVX_INSTR pminub, 0, 0, 1
+AVX_INSTR pminuw, 0, 0, 1
+AVX_INSTR pminud, 0, 0, 1
+AVX_INSTR pmulhuw, 0, 0, 1
+AVX_INSTR pmulhrsw, 0, 0, 1
+AVX_INSTR pmulhw, 0, 0, 1
+AVX_INSTR pmullw, 0, 0, 1
+AVX_INSTR pmulld, 0, 0, 1
+AVX_INSTR pmuludq, 0, 0, 1
+AVX_INSTR pmuldq, 0, 0, 1
+AVX_INSTR por, 0, 0, 1
+AVX_INSTR psadbw, 0, 0, 1
+AVX_INSTR pshufb, 0, 0, 0
+AVX_INSTR psignb, 0, 0, 0
+AVX_INSTR psignw, 0, 0, 0
+AVX_INSTR psignd, 0, 0, 0
+AVX_INSTR psllw, 0, 0, 0
+AVX_INSTR pslld, 0, 0, 0
+AVX_INSTR psllq, 0, 0, 0
+AVX_INSTR pslldq, 0, 0, 0
+AVX_INSTR psraw, 0, 0, 0
+AVX_INSTR psrad, 0, 0, 0
+AVX_INSTR psrlw, 0, 0, 0
+AVX_INSTR psrld, 0, 0, 0
+AVX_INSTR psrlq, 0, 0, 0
+AVX_INSTR psrldq, 0, 0, 0
+AVX_INSTR psubb, 0, 0, 0
+AVX_INSTR psubw, 0, 0, 0
+AVX_INSTR psubd, 0, 0, 0
+AVX_INSTR psubq, 0, 0, 0
+AVX_INSTR psubsb, 0, 0, 0
+AVX_INSTR psubsw, 0, 0, 0
+AVX_INSTR psubusb, 0, 0, 0
+AVX_INSTR psubusw, 0, 0, 0
+AVX_INSTR punpckhbw, 0, 0, 0
+AVX_INSTR punpckhwd, 0, 0, 0
+AVX_INSTR punpckhdq, 0, 0, 0
+AVX_INSTR punpckhqdq, 0, 0, 0
+AVX_INSTR punpcklbw, 0, 0, 0
+AVX_INSTR punpcklwd, 0, 0, 0
+AVX_INSTR punpckldq, 0, 0, 0
+AVX_INSTR punpcklqdq, 0, 0, 0
+AVX_INSTR pxor, 0, 0, 1
+AVX_INSTR shufps, 1, 1, 0
+AVX_INSTR subpd, 1, 0, 0
+AVX_INSTR subps, 1, 0, 0
+AVX_INSTR subsd, 1, 0, 0
+AVX_INSTR subss, 1, 0, 0
+AVX_INSTR unpckhpd, 1, 0, 0
+AVX_INSTR unpckhps, 1, 0, 0
+AVX_INSTR unpcklpd, 1, 0, 0
+AVX_INSTR unpcklps, 1, 0, 0
+AVX_INSTR xorpd, 1, 0, 1
+AVX_INSTR xorps, 1, 0, 1
+
+; 3DNow instructions, for sharing code between AVX, SSE and 3DN
+AVX_INSTR pfadd, 1, 0, 1
+AVX_INSTR pfsub, 1, 0, 0
+AVX_INSTR pfmul, 1, 0, 1
+
+; base-4 constants for shuffles
+%assign i 0
+%rep 256
+ %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3)
+ %if j < 10
+ CAT_XDEFINE q000, j, i
+ %elif j < 100
+ CAT_XDEFINE q00, j, i
+ %elif j < 1000
+ CAT_XDEFINE q0, j, i
+ %else
+ CAT_XDEFINE q, j, i
+ %endif
+%assign i i+1
+%endrep
+%undef i
+%undef j
+
+%macro FMA_INSTR 3
+ %macro %1 4-7 %1, %2, %3
+ %if cpuflag(xop)
+ v%5 %1, %2, %3, %4
+ %else
+ %6 %1, %2, %3
+ %7 %1, %4
+ %endif
+ %endmacro
+%endmacro
+
+FMA_INSTR pmacsdd, pmulld, paddd
+FMA_INSTR pmacsww, pmullw, paddw
+FMA_INSTR pmadcswd, pmaddwd, paddd
diff --git a/libvpx/tools/all_builds.py b/libvpx/tools/all_builds.py
new file mode 100755
index 0000000..d1f0c80
--- /dev/null
+++ b/libvpx/tools/all_builds.py
@@ -0,0 +1,72 @@
+#!/usr/bin/python
+
+import getopt
+import subprocess
+import sys
+
+LONG_OPTIONS = ["shard=", "shards="]
+BASE_COMMAND = "./configure --enable-internal-stats --enable-experimental"
+
+def RunCommand(command):
+ run = subprocess.Popen(command, shell=True)
+ output = run.communicate()
+ if run.returncode:
+ print "Non-zero return code: " + str(run.returncode) + " => exiting!"
+ sys.exit(1)
+
+def list_of_experiments():
+ experiments = []
+ configure_file = open("configure")
+ list_start = False
+ for line in configure_file.read().split("\n"):
+ if line == 'EXPERIMENT_LIST="':
+ list_start = True
+ elif line == '"':
+ list_start = False
+ elif list_start:
+ currently_broken = ["csm"]
+ experiment = line[4:]
+ if experiment not in currently_broken:
+ experiments.append(experiment)
+ return experiments
+
+def main(argv):
+ # Parse arguments
+ options = {"--shard": 0, "--shards": 1}
+ if "--" in argv:
+ opt_end_index = argv.index("--")
+ else:
+ opt_end_index = len(argv)
+ try:
+ o, _ = getopt.getopt(argv[1:opt_end_index], None, LONG_OPTIONS)
+ except getopt.GetoptError, err:
+ print str(err)
+ print "Usage: %s [--shard=<n> --shards=<n>] -- [configure flag ...]"%argv[0]
+ sys.exit(2)
+
+ options.update(o)
+ extra_args = argv[opt_end_index + 1:]
+
+ # Shard experiment list
+ shard = int(options["--shard"])
+ shards = int(options["--shards"])
+ experiments = list_of_experiments()
+ base_command = " ".join([BASE_COMMAND] + extra_args)
+ configs = [base_command]
+ configs += ["%s --enable-%s" % (base_command, e) for e in experiments]
+ my_configs = zip(configs, range(len(configs)))
+ my_configs = filter(lambda x: x[1] % shards == shard, my_configs)
+ my_configs = [e[0] for e in my_configs]
+
+ # Run configs for this shard
+ for config in my_configs:
+ test_build(config)
+
+def test_build(configure_command):
+ print "\033[34m\033[47mTesting %s\033[0m" % (configure_command)
+ RunCommand(configure_command)
+ RunCommand("make clean")
+ RunCommand("make")
+
+if __name__ == "__main__":
+ main(sys.argv)
diff --git a/libvpx/tools/cpplint.py b/libvpx/tools/cpplint.py
new file mode 100755
index 0000000..159dbbb
--- /dev/null
+++ b/libvpx/tools/cpplint.py
@@ -0,0 +1,4020 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Here are some issues that I've had people identify in my code during reviews,
+# that I think are possible to flag automatically in a lint tool. If these were
+# caught by lint, it would save time both for myself and that of my reviewers.
+# Most likely, some of these are beyond the scope of the current lint framework,
+# but I think it is valuable to retain these wish-list items even if they cannot
+# be immediately implemented.
+#
+# Suggestions
+# -----------
+# - Check for no 'explicit' for multi-arg ctor
+# - Check for boolean assign RHS in parens
+# - Check for ctor initializer-list colon position and spacing
+# - Check that if there's a ctor, there should be a dtor
+# - Check accessors that return non-pointer member variables are
+# declared const
+# - Check accessors that return non-const pointer member vars are
+# *not* declared const
+# - Check for using public includes for testing
+# - Check for spaces between brackets in one-line inline method
+# - Check for no assert()
+# - Check for spaces surrounding operators
+# - Check for 0 in pointer context (should be NULL)
+# - Check for 0 in char context (should be '\0')
+# - Check for camel-case method name conventions for methods
+# that are not simple inline getters and setters
+# - Do not indent namespace contents
+# - Avoid inlining non-trivial constructors in header files
+# - Check for old-school (void) cast for call-sites of functions
+# ignored return value
+# - Check gUnit usage of anonymous namespace
+# - Check for class declaration order (typedefs, consts, enums,
+# ctor(s?), dtor, friend declarations, methods, member vars)
+#
+
+"""Does google-lint on c++ files.
+
+The goal of this script is to identify places in the code that *may*
+be in non-compliance with google style. It does not attempt to fix
+up these problems -- the point is to educate. It does also not
+attempt to find all problems, or to ensure that everything it does
+find is legitimately a problem.
+
+In particular, we can get very confused by /* and // inside strings!
+We do a small hack, which is to ignore //'s with "'s after them on the
+same line, but it is far from perfect (in either direction).
+"""
+
+import codecs
+import copy
+import getopt
+import math # for log
+import os
+import re
+import sre_compile
+import string
+import sys
+import unicodedata
+
+
+_USAGE = """
+Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
+ [--counting=total|toplevel|detailed]
+ <file> [file] ...
+
+ The style guidelines this tries to follow are those in
+ http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
+
+ Every problem is given a confidence score from 1-5, with 5 meaning we are
+ certain of the problem, and 1 meaning it could be a legitimate construct.
+ This will miss some errors, and is not a substitute for a code review.
+
+ To suppress false-positive errors of a certain category, add a
+ 'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
+ suppresses errors of all categories on that line.
+
+ The files passed in will be linted; at least one file must be provided.
+ Linted extensions are .cc, .cpp, and .h. Other file types will be ignored.
+
+ Flags:
+
+ output=vs7
+ By default, the output is formatted to ease emacs parsing. Visual Studio
+ compatible output (vs7) may also be used. Other formats are unsupported.
+
+ verbose=#
+ Specify a number 0-5 to restrict errors to certain verbosity levels.
+
+ filter=-x,+y,...
+ Specify a comma-separated list of category-filters to apply: only
+ error messages whose category names pass the filters will be printed.
+ (Category names are printed with the message and look like
+ "[whitespace/indent]".) Filters are evaluated left to right.
+ "-FOO" and "FOO" means "do not print categories that start with FOO".
+ "+FOO" means "do print categories that start with FOO".
+
+ Examples: --filter=-whitespace,+whitespace/braces
+ --filter=whitespace,runtime/printf,+runtime/printf_format
+ --filter=-,+build/include_what_you_use
+
+ To see a list of all the categories used in cpplint, pass no arg:
+ --filter=
+
+ counting=total|toplevel|detailed
+ The total number of errors found is always printed. If
+ 'toplevel' is provided, then the count of errors in each of
+ the top-level categories like 'build' and 'whitespace' will
+ also be printed. If 'detailed' is provided, then a count
+ is provided for each category like 'build/class'.
+
+ root=subdir
+ The root directory used for deriving header guard CPP variable.
+ By default, the header guard CPP variable is calculated as the relative
+ path to the directory that contains .git, .hg, or .svn. When this flag
+ is specified, the relative path is calculated from the specified
+ directory. If the specified directory does not exist, this flag is
+ ignored.
+
+ Examples:
+ Assuing that src/.git exists, the header guard CPP variables for
+ src/chrome/browser/ui/browser.h are:
+
+ No flag => CHROME_BROWSER_UI_BROWSER_H_
+ --root=chrome => BROWSER_UI_BROWSER_H_
+ --root=chrome/browser => UI_BROWSER_H_
+"""
+
+# We categorize each error message we print. Here are the categories.
+# We want an explicit list so we can list them all in cpplint --filter=.
+# If you add a new error message with a new category, add it to the list
+# here! cpplint_unittest.py should tell you if you forget to do this.
+# \ used for clearer layout -- pylint: disable-msg=C6013
+_ERROR_CATEGORIES = [
+ 'build/class',
+ 'build/deprecated',
+ 'build/endif_comment',
+ 'build/explicit_make_pair',
+ 'build/forward_decl',
+ 'build/header_guard',
+ 'build/include',
+ 'build/include_alpha',
+ 'build/include_order',
+ 'build/include_what_you_use',
+ 'build/namespaces',
+ 'build/printf_format',
+ 'build/storage_class',
+ 'legal/copyright',
+ 'readability/alt_tokens',
+ 'readability/braces',
+ 'readability/casting',
+ 'readability/check',
+ 'readability/constructors',
+ 'readability/fn_size',
+ 'readability/function',
+ 'readability/multiline_comment',
+ 'readability/multiline_string',
+ 'readability/namespace',
+ 'readability/nolint',
+ 'readability/streams',
+ 'readability/todo',
+ 'readability/utf8',
+ 'runtime/arrays',
+ 'runtime/casting',
+ 'runtime/explicit',
+ 'runtime/int',
+ 'runtime/init',
+ 'runtime/invalid_increment',
+ 'runtime/member_string_references',
+ 'runtime/memset',
+ 'runtime/operator',
+ 'runtime/printf',
+ 'runtime/printf_format',
+ 'runtime/references',
+ 'runtime/rtti',
+ 'runtime/sizeof',
+ 'runtime/string',
+ 'runtime/threadsafe_fn',
+ 'whitespace/blank_line',
+ 'whitespace/braces',
+ 'whitespace/comma',
+ 'whitespace/comments',
+ 'whitespace/empty_loop_body',
+ 'whitespace/end_of_line',
+ 'whitespace/ending_newline',
+ 'whitespace/forcolon',
+ 'whitespace/indent',
+ 'whitespace/labels',
+ 'whitespace/line_length',
+ 'whitespace/newline',
+ 'whitespace/operators',
+ 'whitespace/parens',
+ 'whitespace/semicolon',
+ 'whitespace/tab',
+ 'whitespace/todo'
+ ]
+
+# The default state of the category filter. This is overrided by the --filter=
+# flag. By default all errors are on, so only add here categories that should be
+# off by default (i.e., categories that must be enabled by the --filter= flags).
+# All entries here should start with a '-' or '+', as in the --filter= flag.
+_DEFAULT_FILTERS = ['-build/include_alpha']
+
+# We used to check for high-bit characters, but after much discussion we
+# decided those were OK, as long as they were in UTF-8 and didn't represent
+# hard-coded international strings, which belong in a separate i18n file.
+
+# Headers that we consider STL headers.
+_STL_HEADERS = frozenset([
+ 'algobase.h', 'algorithm', 'alloc.h', 'bitset', 'deque', 'exception',
+ 'function.h', 'functional', 'hash_map', 'hash_map.h', 'hash_set',
+ 'hash_set.h', 'iterator', 'list', 'list.h', 'map', 'memory', 'new',
+ 'pair.h', 'pthread_alloc', 'queue', 'set', 'set.h', 'sstream', 'stack',
+ 'stl_alloc.h', 'stl_relops.h', 'type_traits.h',
+ 'utility', 'vector', 'vector.h',
+ ])
+
+
+# Non-STL C++ system headers.
+_CPP_HEADERS = frozenset([
+ 'algo.h', 'builtinbuf.h', 'bvector.h', 'cassert', 'cctype',
+ 'cerrno', 'cfloat', 'ciso646', 'climits', 'clocale', 'cmath',
+ 'complex', 'complex.h', 'csetjmp', 'csignal', 'cstdarg', 'cstddef',
+ 'cstdio', 'cstdlib', 'cstring', 'ctime', 'cwchar', 'cwctype',
+ 'defalloc.h', 'deque.h', 'editbuf.h', 'exception', 'fstream',
+ 'fstream.h', 'hashtable.h', 'heap.h', 'indstream.h', 'iomanip',
+ 'iomanip.h', 'ios', 'iosfwd', 'iostream', 'iostream.h', 'istream',
+ 'istream.h', 'iterator.h', 'limits', 'map.h', 'multimap.h', 'multiset.h',
+ 'numeric', 'ostream', 'ostream.h', 'parsestream.h', 'pfstream.h',
+ 'PlotFile.h', 'procbuf.h', 'pthread_alloc.h', 'rope', 'rope.h',
+ 'ropeimpl.h', 'SFile.h', 'slist', 'slist.h', 'stack.h', 'stdexcept',
+ 'stdiostream.h', 'streambuf.h', 'stream.h', 'strfile.h', 'string',
+ 'strstream', 'strstream.h', 'tempbuf.h', 'tree.h', 'typeinfo', 'valarray',
+ ])
+
+
+# Assertion macros. These are defined in base/logging.h and
+# testing/base/gunit.h. Note that the _M versions need to come first
+# for substring matching to work.
+_CHECK_MACROS = [
+ 'DCHECK', 'CHECK',
+ 'EXPECT_TRUE_M', 'EXPECT_TRUE',
+ 'ASSERT_TRUE_M', 'ASSERT_TRUE',
+ 'EXPECT_FALSE_M', 'EXPECT_FALSE',
+ 'ASSERT_FALSE_M', 'ASSERT_FALSE',
+ ]
+
+# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
+_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
+
+for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
+ ('>=', 'GE'), ('>', 'GT'),
+ ('<=', 'LE'), ('<', 'LT')]:
+ _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
+ _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
+ _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
+ _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
+ _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
+ _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
+
+for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
+ ('>=', 'LT'), ('>', 'LE'),
+ ('<=', 'GT'), ('<', 'GE')]:
+ _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
+ _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
+ _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
+ _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
+
+# Alternative tokens and their replacements. For full list, see section 2.5
+# Alternative tokens [lex.digraph] in the C++ standard.
+#
+# Digraphs (such as '%:') are not included here since it's a mess to
+# match those on a word boundary.
+_ALT_TOKEN_REPLACEMENT = {
+ 'and': '&&',
+ 'bitor': '|',
+ 'or': '||',
+ 'xor': '^',
+ 'compl': '~',
+ 'bitand': '&',
+ 'and_eq': '&=',
+ 'or_eq': '|=',
+ 'xor_eq': '^=',
+ 'not': '!',
+ 'not_eq': '!='
+ }
+
+# Compile regular expression that matches all the above keywords. The "[ =()]"
+# bit is meant to avoid matching these keywords outside of boolean expressions.
+#
+# False positives include C-style multi-line comments (http://go/nsiut )
+# and multi-line strings (http://go/beujw ), but those have always been
+# troublesome for cpplint.
+_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
+ r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
+
+
+# These constants define types of headers for use with
+# _IncludeState.CheckNextIncludeOrder().
+_C_SYS_HEADER = 1
+_CPP_SYS_HEADER = 2
+_LIKELY_MY_HEADER = 3
+_POSSIBLE_MY_HEADER = 4
+_OTHER_HEADER = 5
+
+# These constants define the current inline assembly state
+_NO_ASM = 0 # Outside of inline assembly block
+_INSIDE_ASM = 1 # Inside inline assembly block
+_END_ASM = 2 # Last line of inline assembly block
+_BLOCK_ASM = 3 # The whole block is an inline assembly block
+
+# Match start of assembly blocks
+_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
+ r'(?:\s+(volatile|__volatile__))?'
+ r'\s*[{(]')
+
+
+_regexp_compile_cache = {}
+
+# Finds occurrences of NOLINT or NOLINT(...).
+_RE_SUPPRESSION = re.compile(r'\bNOLINT\b(\([^)]*\))?')
+
+# {str, set(int)}: a map from error categories to sets of linenumbers
+# on which those errors are expected and should be suppressed.
+_error_suppressions = {}
+
+# The root directory used for deriving header guard CPP variable.
+# This is set by --root flag.
+_root = None
+
+def ParseNolintSuppressions(filename, raw_line, linenum, error):
+ """Updates the global list of error-suppressions.
+
+ Parses any NOLINT comments on the current line, updating the global
+ error_suppressions store. Reports an error if the NOLINT comment
+ was malformed.
+
+ Args:
+ filename: str, the name of the input file.
+ raw_line: str, the line of input text, with comments.
+ linenum: int, the number of the current line.
+ error: function, an error handler.
+ """
+ # FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*).
+ matched = _RE_SUPPRESSION.search(raw_line)
+ if matched:
+ category = matched.group(1)
+ if category in (None, '(*)'): # => "suppress all"
+ _error_suppressions.setdefault(None, set()).add(linenum)
+ else:
+ if category.startswith('(') and category.endswith(')'):
+ category = category[1:-1]
+ if category in _ERROR_CATEGORIES:
+ _error_suppressions.setdefault(category, set()).add(linenum)
+ else:
+ error(filename, linenum, 'readability/nolint', 5,
+ 'Unknown NOLINT error category: %s' % category)
+
+
+def ResetNolintSuppressions():
+ "Resets the set of NOLINT suppressions to empty."
+ _error_suppressions.clear()
+
+
+def IsErrorSuppressedByNolint(category, linenum):
+ """Returns true if the specified error category is suppressed on this line.
+
+ Consults the global error_suppressions map populated by
+ ParseNolintSuppressions/ResetNolintSuppressions.
+
+ Args:
+ category: str, the category of the error.
+ linenum: int, the current line number.
+ Returns:
+ bool, True iff the error should be suppressed due to a NOLINT comment.
+ """
+ return (linenum in _error_suppressions.get(category, set()) or
+ linenum in _error_suppressions.get(None, set()))
+
+def Match(pattern, s):
+ """Matches the string with the pattern, caching the compiled regexp."""
+ # The regexp compilation caching is inlined in both Match and Search for
+ # performance reasons; factoring it out into a separate function turns out
+ # to be noticeably expensive.
+ if not pattern in _regexp_compile_cache:
+ _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
+ return _regexp_compile_cache[pattern].match(s)
+
+
+def Search(pattern, s):
+ """Searches the string for the pattern, caching the compiled regexp."""
+ if not pattern in _regexp_compile_cache:
+ _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
+ return _regexp_compile_cache[pattern].search(s)
+
+
+class _IncludeState(dict):
+ """Tracks line numbers for includes, and the order in which includes appear.
+
+ As a dict, an _IncludeState object serves as a mapping between include
+ filename and line number on which that file was included.
+
+ Call CheckNextIncludeOrder() once for each header in the file, passing
+ in the type constants defined above. Calls in an illegal order will
+ raise an _IncludeError with an appropriate error message.
+
+ """
+ # self._section will move monotonically through this set. If it ever
+ # needs to move backwards, CheckNextIncludeOrder will raise an error.
+ _INITIAL_SECTION = 0
+ _MY_H_SECTION = 1
+ _C_SECTION = 2
+ _CPP_SECTION = 3
+ _OTHER_H_SECTION = 4
+
+ _TYPE_NAMES = {
+ _C_SYS_HEADER: 'C system header',
+ _CPP_SYS_HEADER: 'C++ system header',
+ _LIKELY_MY_HEADER: 'header this file implements',
+ _POSSIBLE_MY_HEADER: 'header this file may implement',
+ _OTHER_HEADER: 'other header',
+ }
+ _SECTION_NAMES = {
+ _INITIAL_SECTION: "... nothing. (This can't be an error.)",
+ _MY_H_SECTION: 'a header this file implements',
+ _C_SECTION: 'C system header',
+ _CPP_SECTION: 'C++ system header',
+ _OTHER_H_SECTION: 'other header',
+ }
+
+ def __init__(self):
+ dict.__init__(self)
+ # The name of the current section.
+ self._section = self._INITIAL_SECTION
+ # The path of last found header.
+ self._last_header = ''
+
+ def CanonicalizeAlphabeticalOrder(self, header_path):
+ """Returns a path canonicalized for alphabetical comparison.
+
+ - replaces "-" with "_" so they both cmp the same.
+ - removes '-inl' since we don't require them to be after the main header.
+ - lowercase everything, just in case.
+
+ Args:
+ header_path: Path to be canonicalized.
+
+ Returns:
+ Canonicalized path.
+ """
+ return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
+
+ def IsInAlphabeticalOrder(self, header_path):
+ """Check if a header is in alphabetical order with the previous header.
+
+ Args:
+ header_path: Header to be checked.
+
+ Returns:
+ Returns true if the header is in alphabetical order.
+ """
+ canonical_header = self.CanonicalizeAlphabeticalOrder(header_path)
+ if self._last_header > canonical_header:
+ return False
+ self._last_header = canonical_header
+ return True
+
+ def CheckNextIncludeOrder(self, header_type):
+ """Returns a non-empty error message if the next header is out of order.
+
+ This function also updates the internal state to be ready to check
+ the next include.
+
+ Args:
+ header_type: One of the _XXX_HEADER constants defined above.
+
+ Returns:
+ The empty string if the header is in the right order, or an
+ error message describing what's wrong.
+
+ """
+ error_message = ('Found %s after %s' %
+ (self._TYPE_NAMES[header_type],
+ self._SECTION_NAMES[self._section]))
+
+ last_section = self._section
+
+ if header_type == _C_SYS_HEADER:
+ if self._section <= self._C_SECTION:
+ self._section = self._C_SECTION
+ else:
+ self._last_header = ''
+ return error_message
+ elif header_type == _CPP_SYS_HEADER:
+ if self._section <= self._CPP_SECTION:
+ self._section = self._CPP_SECTION
+ else:
+ self._last_header = ''
+ return error_message
+ elif header_type == _LIKELY_MY_HEADER:
+ if self._section <= self._MY_H_SECTION:
+ self._section = self._MY_H_SECTION
+ else:
+ self._section = self._OTHER_H_SECTION
+ elif header_type == _POSSIBLE_MY_HEADER:
+ if self._section <= self._MY_H_SECTION:
+ self._section = self._MY_H_SECTION
+ else:
+ # This will always be the fallback because we're not sure
+ # enough that the header is associated with this file.
+ self._section = self._OTHER_H_SECTION
+ else:
+ assert header_type == _OTHER_HEADER
+ self._section = self._OTHER_H_SECTION
+
+ if last_section != self._section:
+ self._last_header = ''
+
+ return ''
+
+
+class _CppLintState(object):
+ """Maintains module-wide state.."""
+
+ def __init__(self):
+ self.verbose_level = 1 # global setting.
+ self.error_count = 0 # global count of reported errors
+ # filters to apply when emitting error messages
+ self.filters = _DEFAULT_FILTERS[:]
+ self.counting = 'total' # In what way are we counting errors?
+ self.errors_by_category = {} # string to int dict storing error counts
+
+ # output format:
+ # "emacs" - format that emacs can parse (default)
+ # "vs7" - format that Microsoft Visual Studio 7 can parse
+ self.output_format = 'emacs'
+
+ def SetOutputFormat(self, output_format):
+ """Sets the output format for errors."""
+ self.output_format = output_format
+
+ def SetVerboseLevel(self, level):
+ """Sets the module's verbosity, and returns the previous setting."""
+ last_verbose_level = self.verbose_level
+ self.verbose_level = level
+ return last_verbose_level
+
+ def SetCountingStyle(self, counting_style):
+ """Sets the module's counting options."""
+ self.counting = counting_style
+
+ def SetFilters(self, filters):
+ """Sets the error-message filters.
+
+ These filters are applied when deciding whether to emit a given
+ error message.
+
+ Args:
+ filters: A string of comma-separated filters (eg "+whitespace/indent").
+ Each filter should start with + or -; else we die.
+
+ Raises:
+ ValueError: The comma-separated filters did not all start with '+' or '-'.
+ E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
+ """
+ # Default filters always have less priority than the flag ones.
+ self.filters = _DEFAULT_FILTERS[:]
+ for filt in filters.split(','):
+ clean_filt = filt.strip()
+ if clean_filt:
+ self.filters.append(clean_filt)
+ for filt in self.filters:
+ if not (filt.startswith('+') or filt.startswith('-')):
+ raise ValueError('Every filter in --filters must start with + or -'
+ ' (%s does not)' % filt)
+
+ def ResetErrorCounts(self):
+ """Sets the module's error statistic back to zero."""
+ self.error_count = 0
+ self.errors_by_category = {}
+
+ def IncrementErrorCount(self, category):
+ """Bumps the module's error statistic."""
+ self.error_count += 1
+ if self.counting in ('toplevel', 'detailed'):
+ if self.counting != 'detailed':
+ category = category.split('/')[0]
+ if category not in self.errors_by_category:
+ self.errors_by_category[category] = 0
+ self.errors_by_category[category] += 1
+
+ def PrintErrorCounts(self):
+ """Print a summary of errors by category, and the total."""
+ for category, count in self.errors_by_category.iteritems():
+ sys.stderr.write('Category \'%s\' errors found: %d\n' %
+ (category, count))
+ sys.stderr.write('Total errors found: %d\n' % self.error_count)
+
+_cpplint_state = _CppLintState()
+
+
+def _OutputFormat():
+ """Gets the module's output format."""
+ return _cpplint_state.output_format
+
+
+def _SetOutputFormat(output_format):
+ """Sets the module's output format."""
+ _cpplint_state.SetOutputFormat(output_format)
+
+
+def _VerboseLevel():
+ """Returns the module's verbosity setting."""
+ return _cpplint_state.verbose_level
+
+
+def _SetVerboseLevel(level):
+ """Sets the module's verbosity, and returns the previous setting."""
+ return _cpplint_state.SetVerboseLevel(level)
+
+
+def _SetCountingStyle(level):
+ """Sets the module's counting options."""
+ _cpplint_state.SetCountingStyle(level)
+
+
+def _Filters():
+ """Returns the module's list of output filters, as a list."""
+ return _cpplint_state.filters
+
+
+def _SetFilters(filters):
+ """Sets the module's error-message filters.
+
+ These filters are applied when deciding whether to emit a given
+ error message.
+
+ Args:
+ filters: A string of comma-separated filters (eg "whitespace/indent").
+ Each filter should start with + or -; else we die.
+ """
+ _cpplint_state.SetFilters(filters)
+
+
+class _FunctionState(object):
+ """Tracks current function name and the number of lines in its body."""
+
+ _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
+ _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
+
+ def __init__(self):
+ self.in_a_function = False
+ self.lines_in_function = 0
+ self.current_function = ''
+
+ def Begin(self, function_name):
+ """Start analyzing function body.
+
+ Args:
+ function_name: The name of the function being tracked.
+ """
+ self.in_a_function = True
+ self.lines_in_function = 0
+ self.current_function = function_name
+
+ def Count(self):
+ """Count line in current function body."""
+ if self.in_a_function:
+ self.lines_in_function += 1
+
+ def Check(self, error, filename, linenum):
+ """Report if too many lines in function body.
+
+ Args:
+ error: The function to call with any errors found.
+ filename: The name of the current file.
+ linenum: The number of the line to check.
+ """
+ if Match(r'T(EST|est)', self.current_function):
+ base_trigger = self._TEST_TRIGGER
+ else:
+ base_trigger = self._NORMAL_TRIGGER
+ trigger = base_trigger * 2**_VerboseLevel()
+
+ if self.lines_in_function > trigger:
+ error_level = int(math.log(self.lines_in_function / base_trigger, 2))
+ # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
+ if error_level > 5:
+ error_level = 5
+ error(filename, linenum, 'readability/fn_size', error_level,
+ 'Small and focused functions are preferred:'
+ ' %s has %d non-comment lines'
+ ' (error triggered by exceeding %d lines).' % (
+ self.current_function, self.lines_in_function, trigger))
+
+ def End(self):
+ """Stop analyzing function body."""
+ self.in_a_function = False
+
+
+class _IncludeError(Exception):
+ """Indicates a problem with the include order in a file."""
+ pass
+
+
+class FileInfo:
+ """Provides utility functions for filenames.
+
+ FileInfo provides easy access to the components of a file's path
+ relative to the project root.
+ """
+
+ def __init__(self, filename):
+ self._filename = filename
+
+ def FullName(self):
+ """Make Windows paths like Unix."""
+ return os.path.abspath(self._filename).replace('\\', '/')
+
+ def RepositoryName(self):
+ """FullName after removing the local path to the repository.
+
+ If we have a real absolute path name here we can try to do something smart:
+ detecting the root of the checkout and truncating /path/to/checkout from
+ the name so that we get header guards that don't include things like
+ "C:\Documents and Settings\..." or "/home/username/..." in them and thus
+ people on different computers who have checked the source out to different
+ locations won't see bogus errors.
+ """
+ fullname = self.FullName()
+
+ if os.path.exists(fullname):
+ project_dir = os.path.dirname(fullname)
+
+ if os.path.exists(os.path.join(project_dir, ".svn")):
+ # If there's a .svn file in the current directory, we recursively look
+ # up the directory tree for the top of the SVN checkout
+ root_dir = project_dir
+ one_up_dir = os.path.dirname(root_dir)
+ while os.path.exists(os.path.join(one_up_dir, ".svn")):
+ root_dir = os.path.dirname(root_dir)
+ one_up_dir = os.path.dirname(one_up_dir)
+
+ prefix = os.path.commonprefix([root_dir, project_dir])
+ return fullname[len(prefix) + 1:]
+
+ # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
+ # searching up from the current path.
+ root_dir = os.path.dirname(fullname)
+ while (root_dir != os.path.dirname(root_dir) and
+ not os.path.exists(os.path.join(root_dir, ".git")) and
+ not os.path.exists(os.path.join(root_dir, ".hg")) and
+ not os.path.exists(os.path.join(root_dir, ".svn"))):
+ root_dir = os.path.dirname(root_dir)
+
+ if (os.path.exists(os.path.join(root_dir, ".git")) or
+ os.path.exists(os.path.join(root_dir, ".hg")) or
+ os.path.exists(os.path.join(root_dir, ".svn"))):
+ prefix = os.path.commonprefix([root_dir, project_dir])
+ return fullname[len(prefix) + 1:]
+
+ # Don't know what to do; header guard warnings may be wrong...
+ return fullname
+
+ def Split(self):
+ """Splits the file into the directory, basename, and extension.
+
+ For 'chrome/browser/browser.cc', Split() would
+ return ('chrome/browser', 'browser', '.cc')
+
+ Returns:
+ A tuple of (directory, basename, extension).
+ """
+
+ googlename = self.RepositoryName()
+ project, rest = os.path.split(googlename)
+ return (project,) + os.path.splitext(rest)
+
+ def BaseName(self):
+ """File base name - text after the final slash, before the final period."""
+ return self.Split()[1]
+
+ def Extension(self):
+ """File extension - text following the final period."""
+ return self.Split()[2]
+
+ def NoExtension(self):
+ """File has no source file extension."""
+ return '/'.join(self.Split()[0:2])
+
+ def IsSource(self):
+ """File has a source file extension."""
+ return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
+
+
+def _ShouldPrintError(category, confidence, linenum):
+ """If confidence >= verbose, category passes filter and is not suppressed."""
+
+ # There are three ways we might decide not to print an error message:
+ # a "NOLINT(category)" comment appears in the source,
+ # the verbosity level isn't high enough, or the filters filter it out.
+ if IsErrorSuppressedByNolint(category, linenum):
+ return False
+ if confidence < _cpplint_state.verbose_level:
+ return False
+
+ is_filtered = False
+ for one_filter in _Filters():
+ if one_filter.startswith('-'):
+ if category.startswith(one_filter[1:]):
+ is_filtered = True
+ elif one_filter.startswith('+'):
+ if category.startswith(one_filter[1:]):
+ is_filtered = False
+ else:
+ assert False # should have been checked for in SetFilter.
+ if is_filtered:
+ return False
+
+ return True
+
+
+def Error(filename, linenum, category, confidence, message):
+ """Logs the fact we've found a lint error.
+
+ We log where the error was found, and also our confidence in the error,
+ that is, how certain we are this is a legitimate style regression, and
+ not a misidentification or a use that's sometimes justified.
+
+ False positives can be suppressed by the use of
+ "cpplint(category)" comments on the offending line. These are
+ parsed into _error_suppressions.
+
+ Args:
+ filename: The name of the file containing the error.
+ linenum: The number of the line containing the error.
+ category: A string used to describe the "category" this bug
+ falls under: "whitespace", say, or "runtime". Categories
+ may have a hierarchy separated by slashes: "whitespace/indent".
+ confidence: A number from 1-5 representing a confidence score for
+ the error, with 5 meaning that we are certain of the problem,
+ and 1 meaning that it could be a legitimate construct.
+ message: The error message.
+ """
+ if _ShouldPrintError(category, confidence, linenum):
+ _cpplint_state.IncrementErrorCount(category)
+ if _cpplint_state.output_format == 'vs7':
+ sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
+ filename, linenum, message, category, confidence))
+ else:
+ sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
+ filename, linenum, message, category, confidence))
+
+
+# Matches standard C++ escape esequences per 2.13.2.3 of the C++ standard.
+_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
+ r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
+# Matches strings. Escape codes should already be removed by ESCAPES.
+_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"')
+# Matches characters. Escape codes should already be removed by ESCAPES.
+_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'")
+# Matches multi-line C++ comments.
+# This RE is a little bit more complicated than one might expect, because we
+# have to take care of space removals tools so we can handle comments inside
+# statements better.
+# The current rule is: We only clear spaces from both sides when we're at the
+# end of the line. Otherwise, we try to remove spaces from the right side,
+# if this doesn't work we try on left side but only if there's a non-character
+# on the right.
+_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
+ r"""(\s*/\*.*\*/\s*$|
+ /\*.*\*/\s+|
+ \s+/\*.*\*/(?=\W)|
+ /\*.*\*/)""", re.VERBOSE)
+
+
+def IsCppString(line):
+ """Does line terminate so, that the next symbol is in string constant.
+
+ This function does not consider single-line nor multi-line comments.
+
+ Args:
+ line: is a partial line of code starting from the 0..n.
+
+ Returns:
+ True, if next character appended to 'line' is inside a
+ string constant.
+ """
+
+ line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
+ return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
+
+
+def FindNextMultiLineCommentStart(lines, lineix):
+ """Find the beginning marker for a multiline comment."""
+ while lineix < len(lines):
+ if lines[lineix].strip().startswith('/*'):
+ # Only return this marker if the comment goes beyond this line
+ if lines[lineix].strip().find('*/', 2) < 0:
+ return lineix
+ lineix += 1
+ return len(lines)
+
+
+def FindNextMultiLineCommentEnd(lines, lineix):
+ """We are inside a comment, find the end marker."""
+ while lineix < len(lines):
+ if lines[lineix].strip().endswith('*/'):
+ return lineix
+ lineix += 1
+ return len(lines)
+
+
+def RemoveMultiLineCommentsFromRange(lines, begin, end):
+ """Clears a range of lines for multi-line comments."""
+ # Having // dummy comments makes the lines non-empty, so we will not get
+ # unnecessary blank line warnings later in the code.
+ for i in range(begin, end):
+ lines[i] = '// dummy'
+
+
+def RemoveMultiLineComments(filename, lines, error):
+ """Removes multiline (c-style) comments from lines."""
+ lineix = 0
+ while lineix < len(lines):
+ lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
+ if lineix_begin >= len(lines):
+ return
+ lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
+ if lineix_end >= len(lines):
+ error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
+ 'Could not find end of multi-line comment')
+ return
+ RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
+ lineix = lineix_end + 1
+
+
+def CleanseComments(line):
+ """Removes //-comments and single-line C-style /* */ comments.
+
+ Args:
+ line: A line of C++ source.
+
+ Returns:
+ The line with single-line comments removed.
+ """
+ commentpos = line.find('//')
+ if commentpos != -1 and not IsCppString(line[:commentpos]):
+ line = line[:commentpos].rstrip()
+ # get rid of /* ... */
+ return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
+
+
+class CleansedLines(object):
+ """Holds 3 copies of all lines with different preprocessing applied to them.
+
+ 1) elided member contains lines without strings and comments,
+ 2) lines member contains lines without comments, and
+ 3) raw_lines member contains all the lines without processing.
+ All these three members are of <type 'list'>, and of the same length.
+ """
+
+ def __init__(self, lines):
+ self.elided = []
+ self.lines = []
+ self.raw_lines = lines
+ self.num_lines = len(lines)
+ for linenum in range(len(lines)):
+ self.lines.append(CleanseComments(lines[linenum]))
+ elided = self._CollapseStrings(lines[linenum])
+ self.elided.append(CleanseComments(elided))
+
+ def NumLines(self):
+ """Returns the number of lines represented."""
+ return self.num_lines
+
+ @staticmethod
+ def _CollapseStrings(elided):
+ """Collapses strings and chars on a line to simple "" or '' blocks.
+
+ We nix strings first so we're not fooled by text like '"http://"'
+
+ Args:
+ elided: The line being processed.
+
+ Returns:
+ The line with collapsed strings.
+ """
+ if not _RE_PATTERN_INCLUDE.match(elided):
+ # Remove escaped characters first to make quote/single quote collapsing
+ # basic. Things that look like escaped characters shouldn't occur
+ # outside of strings and chars.
+ elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
+ elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
+ elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
+ return elided
+
+
+def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):
+ """Find the position just after the matching endchar.
+
+ Args:
+ line: a CleansedLines line.
+ startpos: start searching at this position.
+ depth: nesting level at startpos.
+ startchar: expression opening character.
+ endchar: expression closing character.
+
+ Returns:
+ Index just after endchar.
+ """
+ for i in xrange(startpos, len(line)):
+ if line[i] == startchar:
+ depth += 1
+ elif line[i] == endchar:
+ depth -= 1
+ if depth == 0:
+ return i + 1
+ return -1
+
+
+def CloseExpression(clean_lines, linenum, pos):
+ """If input points to ( or { or [, finds the position that closes it.
+
+ If lines[linenum][pos] points to a '(' or '{' or '[', finds the
+ linenum/pos that correspond to the closing of the expression.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ pos: A position on the line.
+
+ Returns:
+ A tuple (line, linenum, pos) pointer *past* the closing brace, or
+ (line, len(lines), -1) if we never find a close. Note we ignore
+ strings and comments when matching; and the line we return is the
+ 'cleansed' line at linenum.
+ """
+
+ line = clean_lines.elided[linenum]
+ startchar = line[pos]
+ if startchar not in '({[':
+ return (line, clean_lines.NumLines(), -1)
+ if startchar == '(': endchar = ')'
+ if startchar == '[': endchar = ']'
+ if startchar == '{': endchar = '}'
+
+ # Check first line
+ end_pos = FindEndOfExpressionInLine(line, pos, 0, startchar, endchar)
+ if end_pos > -1:
+ return (line, linenum, end_pos)
+ tail = line[pos:]
+ num_open = tail.count(startchar) - tail.count(endchar)
+ while linenum < clean_lines.NumLines() - 1:
+ linenum += 1
+ line = clean_lines.elided[linenum]
+ delta = line.count(startchar) - line.count(endchar)
+ if num_open + delta <= 0:
+ return (line, linenum,
+ FindEndOfExpressionInLine(line, 0, num_open, startchar, endchar))
+ num_open += delta
+
+ # Did not find endchar before end of file, give up
+ return (line, clean_lines.NumLines(), -1)
+
+def CheckForCopyright(filename, lines, error):
+ """Logs an error if no Copyright message appears at the top of the file."""
+
+ # We'll say it should occur by line 10. Don't forget there's a
+ # dummy line at the front.
+ for line in xrange(1, min(len(lines), 11)):
+ if re.search(r'Copyright', lines[line], re.I): break
+ else: # means no copyright line was found
+ error(filename, 0, 'legal/copyright', 5,
+ 'No copyright message found. '
+ 'You should have a line: "Copyright [year] <Copyright Owner>"')
+
+
+def GetHeaderGuardCPPVariable(filename):
+ """Returns the CPP variable that should be used as a header guard.
+
+ Args:
+ filename: The name of a C++ header file.
+
+ Returns:
+ The CPP variable that should be used as a header guard in the
+ named file.
+
+ """
+
+ # Restores original filename in case that cpplint is invoked from Emacs's
+ # flymake.
+ filename = re.sub(r'_flymake\.h$', '.h', filename)
+ filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
+
+ fileinfo = FileInfo(filename)
+ file_path_from_root = fileinfo.RepositoryName()
+ if _root:
+ file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
+ return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
+
+
+def CheckForHeaderGuard(filename, lines, error):
+ """Checks that the file contains a header guard.
+
+ Logs an error if no #ifndef header guard is present. For other
+ headers, checks that the full pathname is used.
+
+ Args:
+ filename: The name of the C++ header file.
+ lines: An array of strings, each representing a line of the file.
+ error: The function to call with any errors found.
+ """
+
+ cppvar = GetHeaderGuardCPPVariable(filename)
+
+ ifndef = None
+ ifndef_linenum = 0
+ define = None
+ endif = None
+ endif_linenum = 0
+ for linenum, line in enumerate(lines):
+ linesplit = line.split()
+ if len(linesplit) >= 2:
+ # find the first occurrence of #ifndef and #define, save arg
+ if not ifndef and linesplit[0] == '#ifndef':
+ # set ifndef to the header guard presented on the #ifndef line.
+ ifndef = linesplit[1]
+ ifndef_linenum = linenum
+ if not define and linesplit[0] == '#define':
+ define = linesplit[1]
+ # find the last occurrence of #endif, save entire line
+ if line.startswith('#endif'):
+ endif = line
+ endif_linenum = linenum
+
+ if not ifndef:
+ error(filename, 0, 'build/header_guard', 5,
+ 'No #ifndef header guard found, suggested CPP variable is: %s' %
+ cppvar)
+ return
+
+ if not define:
+ error(filename, 0, 'build/header_guard', 5,
+ 'No #define header guard found, suggested CPP variable is: %s' %
+ cppvar)
+ return
+
+ # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
+ # for backward compatibility.
+ if ifndef != cppvar:
+ error_level = 0
+ if ifndef != cppvar + '_':
+ error_level = 5
+
+ ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
+ error)
+ error(filename, ifndef_linenum, 'build/header_guard', error_level,
+ '#ifndef header guard has wrong style, please use: %s' % cppvar)
+
+ if define != ifndef:
+ error(filename, 0, 'build/header_guard', 5,
+ '#ifndef and #define don\'t match, suggested CPP variable is: %s' %
+ cppvar)
+ return
+
+ if endif != ('#endif // %s' % cppvar):
+ error_level = 0
+ if endif != ('#endif // %s' % (cppvar + '_')):
+ error_level = 5
+
+ ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
+ error)
+ error(filename, endif_linenum, 'build/header_guard', error_level,
+ '#endif line should be "#endif // %s"' % cppvar)
+
+
+def CheckForUnicodeReplacementCharacters(filename, lines, error):
+ """Logs an error for each line containing Unicode replacement characters.
+
+ These indicate that either the file contained invalid UTF-8 (likely)
+ or Unicode replacement characters (which it shouldn't). Note that
+ it's possible for this to throw off line numbering if the invalid
+ UTF-8 occurred adjacent to a newline.
+
+ Args:
+ filename: The name of the current file.
+ lines: An array of strings, each representing a line of the file.
+ error: The function to call with any errors found.
+ """
+ for linenum, line in enumerate(lines):
+ if u'\ufffd' in line:
+ error(filename, linenum, 'readability/utf8', 5,
+ 'Line contains invalid UTF-8 (or Unicode replacement character).')
+
+
+def CheckForNewlineAtEOF(filename, lines, error):
+ """Logs an error if there is no newline char at the end of the file.
+
+ Args:
+ filename: The name of the current file.
+ lines: An array of strings, each representing a line of the file.
+ error: The function to call with any errors found.
+ """
+
+ # The array lines() was created by adding two newlines to the
+ # original file (go figure), then splitting on \n.
+ # To verify that the file ends in \n, we just have to make sure the
+ # last-but-two element of lines() exists and is empty.
+ if len(lines) < 3 or lines[-2]:
+ error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
+ 'Could not find a newline character at the end of the file.')
+
+
+def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
+ """Logs an error if we see /* ... */ or "..." that extend past one line.
+
+ /* ... */ comments are legit inside macros, for one line.
+ Otherwise, we prefer // comments, so it's ok to warn about the
+ other. Likewise, it's ok for strings to extend across multiple
+ lines, as long as a line continuation character (backslash)
+ terminates each line. Although not currently prohibited by the C++
+ style guide, it's ugly and unnecessary. We don't do well with either
+ in this lint program, so we warn about both.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ line = clean_lines.elided[linenum]
+
+ # Remove all \\ (escaped backslashes) from the line. They are OK, and the
+ # second (escaped) slash may trigger later \" detection erroneously.
+ line = line.replace('\\\\', '')
+
+ if line.count('/*') > line.count('*/'):
+ error(filename, linenum, 'readability/multiline_comment', 5,
+ 'Complex multi-line /*...*/-style comment found. '
+ 'Lint may give bogus warnings. '
+ 'Consider replacing these with //-style comments, '
+ 'with #if 0...#endif, '
+ 'or with more clearly structured multi-line comments.')
+
+ if (line.count('"') - line.count('\\"')) % 2:
+ error(filename, linenum, 'readability/multiline_string', 5,
+ 'Multi-line string ("...") found. This lint script doesn\'t '
+ 'do well with such strings, and may give bogus warnings. They\'re '
+ 'ugly and unnecessary, and you should use concatenation instead".')
+
+
+threading_list = (
+ ('asctime(', 'asctime_r('),
+ ('ctime(', 'ctime_r('),
+ ('getgrgid(', 'getgrgid_r('),
+ ('getgrnam(', 'getgrnam_r('),
+ ('getlogin(', 'getlogin_r('),
+ ('getpwnam(', 'getpwnam_r('),
+ ('getpwuid(', 'getpwuid_r('),
+ ('gmtime(', 'gmtime_r('),
+ ('localtime(', 'localtime_r('),
+ ('rand(', 'rand_r('),
+ ('readdir(', 'readdir_r('),
+ ('strtok(', 'strtok_r('),
+ ('ttyname(', 'ttyname_r('),
+ )
+
+
+def CheckPosixThreading(filename, clean_lines, linenum, error):
+ """Checks for calls to thread-unsafe functions.
+
+ Much code has been originally written without consideration of
+ multi-threading. Also, engineers are relying on their old experience;
+ they have learned posix before threading extensions were added. These
+ tests guide the engineers to use thread-safe functions (when using
+ posix directly).
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ line = clean_lines.elided[linenum]
+ for single_thread_function, multithread_safe_function in threading_list:
+ ix = line.find(single_thread_function)
+ # Comparisons made explicit for clarity -- pylint: disable-msg=C6403
+ if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
+ line[ix - 1] not in ('_', '.', '>'))):
+ error(filename, linenum, 'runtime/threadsafe_fn', 2,
+ 'Consider using ' + multithread_safe_function +
+ '...) instead of ' + single_thread_function +
+ '...) for improved thread safety.')
+
+
+# Matches invalid increment: *count++, which moves pointer instead of
+# incrementing a value.
+_RE_PATTERN_INVALID_INCREMENT = re.compile(
+ r'^\s*\*\w+(\+\+|--);')
+
+
+def CheckInvalidIncrement(filename, clean_lines, linenum, error):
+ """Checks for invalid increment *count++.
+
+ For example following function:
+ void increment_counter(int* count) {
+ *count++;
+ }
+ is invalid, because it effectively does count++, moving pointer, and should
+ be replaced with ++*count, (*count)++ or *count += 1.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ line = clean_lines.elided[linenum]
+ if _RE_PATTERN_INVALID_INCREMENT.match(line):
+ error(filename, linenum, 'runtime/invalid_increment', 5,
+ 'Changing pointer instead of value (or unused value of operator*).')
+
+
+class _BlockInfo(object):
+ """Stores information about a generic block of code."""
+
+ def __init__(self, seen_open_brace):
+ self.seen_open_brace = seen_open_brace
+ self.open_parentheses = 0
+ self.inline_asm = _NO_ASM
+
+ def CheckBegin(self, filename, clean_lines, linenum, error):
+ """Run checks that applies to text up to the opening brace.
+
+ This is mostly for checking the text after the class identifier
+ and the "{", usually where the base class is specified. For other
+ blocks, there isn't much to check, so we always pass.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ pass
+
+ def CheckEnd(self, filename, clean_lines, linenum, error):
+ """Run checks that applies to text after the closing brace.
+
+ This is mostly used for checking end of namespace comments.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ pass
+
+
+class _ClassInfo(_BlockInfo):
+ """Stores information about a class."""
+
+ def __init__(self, name, class_or_struct, clean_lines, linenum):
+ _BlockInfo.__init__(self, False)
+ self.name = name
+ self.starting_linenum = linenum
+ self.is_derived = False
+ if class_or_struct == 'struct':
+ self.access = 'public'
+ else:
+ self.access = 'private'
+
+ # Try to find the end of the class. This will be confused by things like:
+ # class A {
+ # } *x = { ...
+ #
+ # But it's still good enough for CheckSectionSpacing.
+ self.last_line = 0
+ depth = 0
+ for i in range(linenum, clean_lines.NumLines()):
+ line = clean_lines.elided[i]
+ depth += line.count('{') - line.count('}')
+ if not depth:
+ self.last_line = i
+ break
+
+ def CheckBegin(self, filename, clean_lines, linenum, error):
+ # Look for a bare ':'
+ if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
+ self.is_derived = True
+
+
+class _NamespaceInfo(_BlockInfo):
+ """Stores information about a namespace."""
+
+ def __init__(self, name, linenum):
+ _BlockInfo.__init__(self, False)
+ self.name = name or ''
+ self.starting_linenum = linenum
+
+ def CheckEnd(self, filename, clean_lines, linenum, error):
+ """Check end of namespace comments."""
+ line = clean_lines.raw_lines[linenum]
+
+ # Check how many lines is enclosed in this namespace. Don't issue
+ # warning for missing namespace comments if there aren't enough
+ # lines. However, do apply checks if there is already an end of
+ # namespace comment and it's incorrect.
+ #
+ # TODO(unknown): We always want to check end of namespace comments
+ # if a namespace is large, but sometimes we also want to apply the
+ # check if a short namespace contained nontrivial things (something
+ # other than forward declarations). There is currently no logic on
+ # deciding what these nontrivial things are, so this check is
+ # triggered by namespace size only, which works most of the time.
+ if (linenum - self.starting_linenum < 10
+ and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
+ return
+
+ # Look for matching comment at end of namespace.
+ #
+ # Note that we accept C style "/* */" comments for terminating
+ # namespaces, so that code that terminate namespaces inside
+ # preprocessor macros can be cpplint clean. Example: http://go/nxpiz
+ #
+ # We also accept stuff like "// end of namespace <name>." with the
+ # period at the end.
+ #
+ # Besides these, we don't accept anything else, otherwise we might
+ # get false negatives when existing comment is a substring of the
+ # expected namespace. Example: http://go/ldkdc, http://cl/23548205
+ if self.name:
+ # Named namespace
+ if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
+ r'[\*/\.\\\s]*$'),
+ line):
+ error(filename, linenum, 'readability/namespace', 5,
+ 'Namespace should be terminated with "// namespace %s"' %
+ self.name)
+ else:
+ # Anonymous namespace
+ if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
+ error(filename, linenum, 'readability/namespace', 5,
+ 'Namespace should be terminated with "// namespace"')
+
+
+class _PreprocessorInfo(object):
+ """Stores checkpoints of nesting stacks when #if/#else is seen."""
+
+ def __init__(self, stack_before_if):
+ # The entire nesting stack before #if
+ self.stack_before_if = stack_before_if
+
+ # The entire nesting stack up to #else
+ self.stack_before_else = []
+
+ # Whether we have already seen #else or #elif
+ self.seen_else = False
+
+
+class _NestingState(object):
+ """Holds states related to parsing braces."""
+
+ def __init__(self):
+ # Stack for tracking all braces. An object is pushed whenever we
+ # see a "{", and popped when we see a "}". Only 3 types of
+ # objects are possible:
+ # - _ClassInfo: a class or struct.
+ # - _NamespaceInfo: a namespace.
+ # - _BlockInfo: some other type of block.
+ self.stack = []
+
+ # Stack of _PreprocessorInfo objects.
+ self.pp_stack = []
+
+ def SeenOpenBrace(self):
+ """Check if we have seen the opening brace for the innermost block.
+
+ Returns:
+ True if we have seen the opening brace, False if the innermost
+ block is still expecting an opening brace.
+ """
+ return (not self.stack) or self.stack[-1].seen_open_brace
+
+ def InNamespaceBody(self):
+ """Check if we are currently one level inside a namespace body.
+
+ Returns:
+ True if top of the stack is a namespace block, False otherwise.
+ """
+ return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
+
+ def UpdatePreprocessor(self, line):
+ """Update preprocessor stack.
+
+ We need to handle preprocessors due to classes like this:
+ #ifdef SWIG
+ struct ResultDetailsPageElementExtensionPoint {
+ #else
+ struct ResultDetailsPageElementExtensionPoint : public Extension {
+ #endif
+ (see http://go/qwddn for original example)
+
+ We make the following assumptions (good enough for most files):
+ - Preprocessor condition evaluates to true from #if up to first
+ #else/#elif/#endif.
+
+ - Preprocessor condition evaluates to false from #else/#elif up
+ to #endif. We still perform lint checks on these lines, but
+ these do not affect nesting stack.
+
+ Args:
+ line: current line to check.
+ """
+ if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
+ # Beginning of #if block, save the nesting stack here. The saved
+ # stack will allow us to restore the parsing state in the #else case.
+ self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
+ elif Match(r'^\s*#\s*(else|elif)\b', line):
+ # Beginning of #else block
+ if self.pp_stack:
+ if not self.pp_stack[-1].seen_else:
+ # This is the first #else or #elif block. Remember the
+ # whole nesting stack up to this point. This is what we
+ # keep after the #endif.
+ self.pp_stack[-1].seen_else = True
+ self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
+
+ # Restore the stack to how it was before the #if
+ self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
+ else:
+ # TODO(unknown): unexpected #else, issue warning?
+ pass
+ elif Match(r'^\s*#\s*endif\b', line):
+ # End of #if or #else blocks.
+ if self.pp_stack:
+ # If we saw an #else, we will need to restore the nesting
+ # stack to its former state before the #else, otherwise we
+ # will just continue from where we left off.
+ if self.pp_stack[-1].seen_else:
+ # Here we can just use a shallow copy since we are the last
+ # reference to it.
+ self.stack = self.pp_stack[-1].stack_before_else
+ # Drop the corresponding #if
+ self.pp_stack.pop()
+ else:
+ # TODO(unknown): unexpected #endif, issue warning?
+ pass
+
+ def Update(self, filename, clean_lines, linenum, error):
+ """Update nesting state with current line.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ line = clean_lines.elided[linenum]
+
+ # Update pp_stack first
+ self.UpdatePreprocessor(line)
+
+ # Count parentheses. This is to avoid adding struct arguments to
+ # the nesting stack.
+ if self.stack:
+ inner_block = self.stack[-1]
+ depth_change = line.count('(') - line.count(')')
+ inner_block.open_parentheses += depth_change
+
+ # Also check if we are starting or ending an inline assembly block.
+ if inner_block.inline_asm in (_NO_ASM, _END_ASM):
+ if (depth_change != 0 and
+ inner_block.open_parentheses == 1 and
+ _MATCH_ASM.match(line)):
+ # Enter assembly block
+ inner_block.inline_asm = _INSIDE_ASM
+ else:
+ # Not entering assembly block. If previous line was _END_ASM,
+ # we will now shift to _NO_ASM state.
+ inner_block.inline_asm = _NO_ASM
+ elif (inner_block.inline_asm == _INSIDE_ASM and
+ inner_block.open_parentheses == 0):
+ # Exit assembly block
+ inner_block.inline_asm = _END_ASM
+
+ # Consume namespace declaration at the beginning of the line. Do
+ # this in a loop so that we catch same line declarations like this:
+ # namespace proto2 { namespace bridge { class MessageSet; } }
+ while True:
+ # Match start of namespace. The "\b\s*" below catches namespace
+ # declarations even if it weren't followed by a whitespace, this
+ # is so that we don't confuse our namespace checker. The
+ # missing spaces will be flagged by CheckSpacing.
+ namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
+ if not namespace_decl_match:
+ break
+
+ new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
+ self.stack.append(new_namespace)
+
+ line = namespace_decl_match.group(2)
+ if line.find('{') != -1:
+ new_namespace.seen_open_brace = True
+ line = line[line.find('{') + 1:]
+
+ # Look for a class declaration in whatever is left of the line
+ # after parsing namespaces. The regexp accounts for decorated classes
+ # such as in:
+ # class LOCKABLE API Object {
+ # };
+ #
+ # Templates with class arguments may confuse the parser, for example:
+ # template <class T
+ # class Comparator = less<T>,
+ # class Vector = vector<T> >
+ # class HeapQueue {
+ #
+ # Because this parser has no nesting state about templates, by the
+ # time it saw "class Comparator", it may think that it's a new class.
+ # Nested templates have a similar problem:
+ # template <
+ # typename ExportedType,
+ # typename TupleType,
+ # template <typename, typename> class ImplTemplate>
+ #
+ # To avoid these cases, we ignore classes that are followed by '=' or '>'
+ class_decl_match = Match(
+ r'\s*(template\s*<[\w\s<>,:]*>\s*)?'
+ '(class|struct)\s+([A-Z_]+\s+)*(\w+(?:::\w+)*)'
+ '(([^=>]|<[^<>]*>)*)$', line)
+ if (class_decl_match and
+ (not self.stack or self.stack[-1].open_parentheses == 0)):
+ self.stack.append(_ClassInfo(
+ class_decl_match.group(4), class_decl_match.group(2),
+ clean_lines, linenum))
+ line = class_decl_match.group(5)
+
+ # If we have not yet seen the opening brace for the innermost block,
+ # run checks here.
+ if not self.SeenOpenBrace():
+ self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
+
+ # Update access control if we are inside a class/struct
+ if self.stack and isinstance(self.stack[-1], _ClassInfo):
+ access_match = Match(r'\s*(public|private|protected)\s*:', line)
+ if access_match:
+ self.stack[-1].access = access_match.group(1)
+
+ # Consume braces or semicolons from what's left of the line
+ while True:
+ # Match first brace, semicolon, or closed parenthesis.
+ matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
+ if not matched:
+ break
+
+ token = matched.group(1)
+ if token == '{':
+ # If namespace or class hasn't seen a opening brace yet, mark
+ # namespace/class head as complete. Push a new block onto the
+ # stack otherwise.
+ if not self.SeenOpenBrace():
+ self.stack[-1].seen_open_brace = True
+ else:
+ self.stack.append(_BlockInfo(True))
+ if _MATCH_ASM.match(line):
+ self.stack[-1].inline_asm = _BLOCK_ASM
+ elif token == ';' or token == ')':
+ # If we haven't seen an opening brace yet, but we already saw
+ # a semicolon, this is probably a forward declaration. Pop
+ # the stack for these.
+ #
+ # Similarly, if we haven't seen an opening brace yet, but we
+ # already saw a closing parenthesis, then these are probably
+ # function arguments with extra "class" or "struct" keywords.
+ # Also pop these stack for these.
+ if not self.SeenOpenBrace():
+ self.stack.pop()
+ else: # token == '}'
+ # Perform end of block checks and pop the stack.
+ if self.stack:
+ self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
+ self.stack.pop()
+ line = matched.group(2)
+
+ def InnermostClass(self):
+ """Get class info on the top of the stack.
+
+ Returns:
+ A _ClassInfo object if we are inside a class, or None otherwise.
+ """
+ for i in range(len(self.stack), 0, -1):
+ classinfo = self.stack[i - 1]
+ if isinstance(classinfo, _ClassInfo):
+ return classinfo
+ return None
+
+ def CheckClassFinished(self, filename, error):
+ """Checks that all classes have been completely parsed.
+
+ Call this when all lines in a file have been processed.
+ Args:
+ filename: The name of the current file.
+ error: The function to call with any errors found.
+ """
+ # Note: This test can result in false positives if #ifdef constructs
+ # get in the way of brace matching. See the testBuildClass test in
+ # cpplint_unittest.py for an example of this.
+ for obj in self.stack:
+ if isinstance(obj, _ClassInfo):
+ error(filename, obj.starting_linenum, 'build/class', 5,
+ 'Failed to find complete declaration of class %s' %
+ obj.name)
+
+
+def CheckForNonStandardConstructs(filename, clean_lines, linenum,
+ nesting_state, error):
+ """Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
+
+ Complain about several constructs which gcc-2 accepts, but which are
+ not standard C++. Warning about these in lint is one way to ease the
+ transition to new compilers.
+ - put storage class first (e.g. "static const" instead of "const static").
+ - "%lld" instead of %qd" in printf-type functions.
+ - "%1$d" is non-standard in printf-type functions.
+ - "\%" is an undefined character escape sequence.
+ - text after #endif is not allowed.
+ - invalid inner-style forward declaration.
+ - >? and <? operators, and their >?= and <?= cousins.
+
+ Additionally, check for constructor/destructor style violations and reference
+ members, as it is very convenient to do so while checking for
+ gcc-2 compliance.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ nesting_state: A _NestingState instance which maintains information about
+ the current stack of nested blocks being parsed.
+ error: A callable to which errors are reported, which takes 4 arguments:
+ filename, line number, error level, and message
+ """
+
+ # Remove comments from the line, but leave in strings for now.
+ line = clean_lines.lines[linenum]
+
+ if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
+ error(filename, linenum, 'runtime/printf_format', 3,
+ '%q in format strings is deprecated. Use %ll instead.')
+
+ if Search(r'printf\s*\(.*".*%\d+\$', line):
+ error(filename, linenum, 'runtime/printf_format', 2,
+ '%N$ formats are unconventional. Try rewriting to avoid them.')
+
+ # Remove escaped backslashes before looking for undefined escapes.
+ line = line.replace('\\\\', '')
+
+ if Search(r'("|\').*\\(%|\[|\(|{)', line):
+ error(filename, linenum, 'build/printf_format', 3,
+ '%, [, (, and { are undefined character escapes. Unescape them.')
+
+ # For the rest, work with both comments and strings removed.
+ line = clean_lines.elided[linenum]
+
+ if Search(r'\b(const|volatile|void|char|short|int|long'
+ r'|float|double|signed|unsigned'
+ r'|schar|u?int8|u?int16|u?int32|u?int64)'
+ r'\s+(register|static|extern|typedef)\b',
+ line):
+ error(filename, linenum, 'build/storage_class', 5,
+ 'Storage class (static, extern, typedef, etc) should be first.')
+
+ if Match(r'\s*#\s*endif\s*[^/\s]+', line):
+ error(filename, linenum, 'build/endif_comment', 5,
+ 'Uncommented text after #endif is non-standard. Use a comment.')
+
+ if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
+ error(filename, linenum, 'build/forward_decl', 5,
+ 'Inner-style forward declarations are invalid. Remove this line.')
+
+ if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
+ line):
+ error(filename, linenum, 'build/deprecated', 3,
+ '>? and <? (max and min) operators are non-standard and deprecated.')
+
+ if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
+ # TODO(unknown): Could it be expanded safely to arbitrary references,
+ # without triggering too many false positives? The first
+ # attempt triggered 5 warnings for mostly benign code in the regtest, hence
+ # the restriction.
+ # Here's the original regexp, for the reference:
+ # type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
+ # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
+ error(filename, linenum, 'runtime/member_string_references', 2,
+ 'const string& members are dangerous. It is much better to use '
+ 'alternatives, such as pointers or simple constants.')
+
+ # Everything else in this function operates on class declarations.
+ # Return early if the top of the nesting stack is not a class, or if
+ # the class head is not completed yet.
+ classinfo = nesting_state.InnermostClass()
+ if not classinfo or not classinfo.seen_open_brace:
+ return
+
+ # The class may have been declared with namespace or classname qualifiers.
+ # The constructor and destructor will not have those qualifiers.
+ base_classname = classinfo.name.split('::')[-1]
+
+ # Look for single-argument constructors that aren't marked explicit.
+ # Technically a valid construct, but against style.
+ args = Match(r'\s+(?:inline\s+)?%s\s*\(([^,()]+)\)'
+ % re.escape(base_classname),
+ line)
+ if (args and
+ args.group(1) != 'void' and
+ not Match(r'(const\s+)?%s\s*(?:<\w+>\s*)?&' % re.escape(base_classname),
+ args.group(1).strip())):
+ error(filename, linenum, 'runtime/explicit', 5,
+ 'Single-argument constructors should be marked explicit.')
+
+
+def CheckSpacingForFunctionCall(filename, line, linenum, error):
+ """Checks for the correctness of various spacing around function calls.
+
+ Args:
+ filename: The name of the current file.
+ line: The text of the line to check.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ # Since function calls often occur inside if/for/while/switch
+ # expressions - which have their own, more liberal conventions - we
+ # first see if we should be looking inside such an expression for a
+ # function call, to which we can apply more strict standards.
+ fncall = line # if there's no control flow construct, look at whole line
+ for pattern in (r'\bif\s*\((.*)\)\s*{',
+ r'\bfor\s*\((.*)\)\s*{',
+ r'\bwhile\s*\((.*)\)\s*[{;]',
+ r'\bswitch\s*\((.*)\)\s*{'):
+ match = Search(pattern, line)
+ if match:
+ fncall = match.group(1) # look inside the parens for function calls
+ break
+
+ # Except in if/for/while/switch, there should never be space
+ # immediately inside parens (eg "f( 3, 4 )"). We make an exception
+ # for nested parens ( (a+b) + c ). Likewise, there should never be
+ # a space before a ( when it's a function argument. I assume it's a
+ # function argument when the char before the whitespace is legal in
+ # a function name (alnum + _) and we're not starting a macro. Also ignore
+ # pointers and references to arrays and functions coz they're too tricky:
+ # we use a very simple way to recognize these:
+ # " (something)(maybe-something)" or
+ # " (something)(maybe-something," or
+ # " (something)[something]"
+ # Note that we assume the contents of [] to be short enough that
+ # they'll never need to wrap.
+ if ( # Ignore control structures.
+ not Search(r'\b(if|for|while|switch|return|delete)\b', fncall) and
+ # Ignore pointers/references to functions.
+ not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
+ # Ignore pointers/references to arrays.
+ not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
+ if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
+ error(filename, linenum, 'whitespace/parens', 4,
+ 'Extra space after ( in function call')
+ elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
+ error(filename, linenum, 'whitespace/parens', 2,
+ 'Extra space after (')
+ if (Search(r'\w\s+\(', fncall) and
+ not Search(r'#\s*define|typedef', fncall) and
+ not Search(r'\w\s+\((\w+::)?\*\w+\)\(', fncall)):
+ error(filename, linenum, 'whitespace/parens', 4,
+ 'Extra space before ( in function call')
+ # If the ) is followed only by a newline or a { + newline, assume it's
+ # part of a control statement (if/while/etc), and don't complain
+ if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
+ # If the closing parenthesis is preceded by only whitespaces,
+ # try to give a more descriptive error message.
+ if Search(r'^\s+\)', fncall):
+ error(filename, linenum, 'whitespace/parens', 2,
+ 'Closing ) should be moved to the previous line')
+ else:
+ error(filename, linenum, 'whitespace/parens', 2,
+ 'Extra space before )')
+
+
+def IsBlankLine(line):
+ """Returns true if the given line is blank.
+
+ We consider a line to be blank if the line is empty or consists of
+ only white spaces.
+
+ Args:
+ line: A line of a string.
+
+ Returns:
+ True, if the given line is blank.
+ """
+ return not line or line.isspace()
+
+
+def CheckForFunctionLengths(filename, clean_lines, linenum,
+ function_state, error):
+ """Reports for long function bodies.
+
+ For an overview why this is done, see:
+ http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
+
+ Uses a simplistic algorithm assuming other style guidelines
+ (especially spacing) are followed.
+ Only checks unindented functions, so class members are unchecked.
+ Trivial bodies are unchecked, so constructors with huge initializer lists
+ may be missed.
+ Blank/comment lines are not counted so as to avoid encouraging the removal
+ of vertical space and comments just to get through a lint check.
+ NOLINT *on the last line of a function* disables this check.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ function_state: Current function name and lines in body so far.
+ error: The function to call with any errors found.
+ """
+ lines = clean_lines.lines
+ line = lines[linenum]
+ raw = clean_lines.raw_lines
+ raw_line = raw[linenum]
+ joined_line = ''
+
+ starting_func = False
+ regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
+ match_result = Match(regexp, line)
+ if match_result:
+ # If the name is all caps and underscores, figure it's a macro and
+ # ignore it, unless it's TEST or TEST_F.
+ function_name = match_result.group(1).split()[-1]
+ if function_name == 'TEST' or function_name == 'TEST_F' or (
+ not Match(r'[A-Z_]+$', function_name)):
+ starting_func = True
+
+ if starting_func:
+ body_found = False
+ for start_linenum in xrange(linenum, clean_lines.NumLines()):
+ start_line = lines[start_linenum]
+ joined_line += ' ' + start_line.lstrip()
+ if Search(r'(;|})', start_line): # Declarations and trivial functions
+ body_found = True
+ break # ... ignore
+ elif Search(r'{', start_line):
+ body_found = True
+ function = Search(r'((\w|:)*)\(', line).group(1)
+ if Match(r'TEST', function): # Handle TEST... macros
+ parameter_regexp = Search(r'(\(.*\))', joined_line)
+ if parameter_regexp: # Ignore bad syntax
+ function += parameter_regexp.group(1)
+ else:
+ function += '()'
+ function_state.Begin(function)
+ break
+ if not body_found:
+ # No body for the function (or evidence of a non-function) was found.
+ error(filename, linenum, 'readability/fn_size', 5,
+ 'Lint failed to find start of function body.')
+ elif Match(r'^\}\s*$', line): # function end
+ function_state.Check(error, filename, linenum)
+ function_state.End()
+ elif not Match(r'^\s*$', line):
+ function_state.Count() # Count non-blank/non-comment lines.
+
+
+_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
+
+
+def CheckComment(comment, filename, linenum, error):
+ """Checks for common mistakes in TODO comments.
+
+ Args:
+ comment: The text of the comment from the line in question.
+ filename: The name of the current file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ match = _RE_PATTERN_TODO.match(comment)
+ if match:
+ # One whitespace is correct; zero whitespace is handled elsewhere.
+ leading_whitespace = match.group(1)
+ if len(leading_whitespace) > 1:
+ error(filename, linenum, 'whitespace/todo', 2,
+ 'Too many spaces before TODO')
+
+ username = match.group(2)
+ if not username:
+ error(filename, linenum, 'readability/todo', 2,
+ 'Missing username in TODO; it should look like '
+ '"// TODO(my_username): Stuff."')
+
+ middle_whitespace = match.group(3)
+ # Comparisons made explicit for correctness -- pylint: disable-msg=C6403
+ if middle_whitespace != ' ' and middle_whitespace != '':
+ error(filename, linenum, 'whitespace/todo', 2,
+ 'TODO(my_username) should be followed by a space')
+
+def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
+ """Checks for improper use of DISALLOW* macros.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ nesting_state: A _NestingState instance which maintains information about
+ the current stack of nested blocks being parsed.
+ error: The function to call with any errors found.
+ """
+ line = clean_lines.elided[linenum] # get rid of comments and strings
+
+ matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
+ r'DISALLOW_EVIL_CONSTRUCTORS|'
+ r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
+ if not matched:
+ return
+ if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
+ if nesting_state.stack[-1].access != 'private':
+ error(filename, linenum, 'readability/constructors', 3,
+ '%s must be in the private: section' % matched.group(1))
+
+ else:
+ # Found DISALLOW* macro outside a class declaration, or perhaps it
+ # was used inside a function when it should have been part of the
+ # class declaration. We could issue a warning here, but it
+ # probably resulted in a compiler error already.
+ pass
+
+
+def FindNextMatchingAngleBracket(clean_lines, linenum, init_suffix):
+ """Find the corresponding > to close a template.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: Current line number.
+ init_suffix: Remainder of the current line after the initial <.
+
+ Returns:
+ True if a matching bracket exists.
+ """
+ line = init_suffix
+ nesting_stack = ['<']
+ while True:
+ # Find the next operator that can tell us whether < is used as an
+ # opening bracket or as a less-than operator. We only want to
+ # warn on the latter case.
+ #
+ # We could also check all other operators and terminate the search
+ # early, e.g. if we got something like this "a<b+c", the "<" is
+ # most likely a less-than operator, but then we will get false
+ # positives for default arguments (e.g. http://go/prccd) and
+ # other template expressions (e.g. http://go/oxcjq).
+ match = Search(r'^[^<>(),;\[\]]*([<>(),;\[\]])(.*)$', line)
+ if match:
+ # Found an operator, update nesting stack
+ operator = match.group(1)
+ line = match.group(2)
+
+ if nesting_stack[-1] == '<':
+ # Expecting closing angle bracket
+ if operator in ('<', '(', '['):
+ nesting_stack.append(operator)
+ elif operator == '>':
+ nesting_stack.pop()
+ if not nesting_stack:
+ # Found matching angle bracket
+ return True
+ elif operator == ',':
+ # Got a comma after a bracket, this is most likely a template
+ # argument. We have not seen a closing angle bracket yet, but
+ # it's probably a few lines later if we look for it, so just
+ # return early here.
+ return True
+ else:
+ # Got some other operator.
+ return False
+
+ else:
+ # Expecting closing parenthesis or closing bracket
+ if operator in ('<', '(', '['):
+ nesting_stack.append(operator)
+ elif operator in (')', ']'):
+ # We don't bother checking for matching () or []. If we got
+ # something like (] or [), it would have been a syntax error.
+ nesting_stack.pop()
+
+ else:
+ # Scan the next line
+ linenum += 1
+ if linenum >= len(clean_lines.elided):
+ break
+ line = clean_lines.elided[linenum]
+
+ # Exhausted all remaining lines and still no matching angle bracket.
+ # Most likely the input was incomplete, otherwise we should have
+ # seen a semicolon and returned early.
+ return True
+
+
+def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix):
+ """Find the corresponding < that started a template.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: Current line number.
+ init_prefix: Part of the current line before the initial >.
+
+ Returns:
+ True if a matching bracket exists.
+ """
+ line = init_prefix
+ nesting_stack = ['>']
+ while True:
+ # Find the previous operator
+ match = Search(r'^(.*)([<>(),;\[\]])[^<>(),;\[\]]*$', line)
+ if match:
+ # Found an operator, update nesting stack
+ operator = match.group(2)
+ line = match.group(1)
+
+ if nesting_stack[-1] == '>':
+ # Expecting opening angle bracket
+ if operator in ('>', ')', ']'):
+ nesting_stack.append(operator)
+ elif operator == '<':
+ nesting_stack.pop()
+ if not nesting_stack:
+ # Found matching angle bracket
+ return True
+ elif operator == ',':
+ # Got a comma before a bracket, this is most likely a
+ # template argument. The opening angle bracket is probably
+ # there if we look for it, so just return early here.
+ return True
+ else:
+ # Got some other operator.
+ return False
+
+ else:
+ # Expecting opening parenthesis or opening bracket
+ if operator in ('>', ')', ']'):
+ nesting_stack.append(operator)
+ elif operator in ('(', '['):
+ nesting_stack.pop()
+
+ else:
+ # Scan the previous line
+ linenum -= 1
+ if linenum < 0:
+ break
+ line = clean_lines.elided[linenum]
+
+ # Exhausted all earlier lines and still no matching angle bracket.
+ return False
+
+
+def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
+ """Checks for the correctness of various spacing issues in the code.
+
+ Things we check for: spaces around operators, spaces after
+ if/for/while/switch, no spaces around parens in function calls, two
+ spaces between code and comment, don't start a block with a blank
+ line, don't end a function with a blank line, don't add a blank line
+ after public/protected/private, don't have too many blank lines in a row.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ nesting_state: A _NestingState instance which maintains information about
+ the current stack of nested blocks being parsed.
+ error: The function to call with any errors found.
+ """
+
+ raw = clean_lines.raw_lines
+ line = raw[linenum]
+
+ # Before nixing comments, check if the line is blank for no good
+ # reason. This includes the first line after a block is opened, and
+ # blank lines at the end of a function (ie, right before a line like '}'
+ #
+ # Skip all the blank line checks if we are immediately inside a
+ # namespace body. In other words, don't issue blank line warnings
+ # for this block:
+ # namespace {
+ #
+ # }
+ #
+ # A warning about missing end of namespace comments will be issued instead.
+ if IsBlankLine(line) and not nesting_state.InNamespaceBody():
+ elided = clean_lines.elided
+ prev_line = elided[linenum - 1]
+ prevbrace = prev_line.rfind('{')
+ # TODO(unknown): Don't complain if line before blank line, and line after,
+ # both start with alnums and are indented the same amount.
+ # This ignores whitespace at the start of a namespace block
+ # because those are not usually indented.
+ if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
+ # OK, we have a blank line at the start of a code block. Before we
+ # complain, we check if it is an exception to the rule: The previous
+ # non-empty line has the parameters of a function header that are indented
+ # 4 spaces (because they did not fit in a 80 column line when placed on
+ # the same line as the function name). We also check for the case where
+ # the previous line is indented 6 spaces, which may happen when the
+ # initializers of a constructor do not fit into a 80 column line.
+ exception = False
+ if Match(r' {6}\w', prev_line): # Initializer list?
+ # We are looking for the opening column of initializer list, which
+ # should be indented 4 spaces to cause 6 space indentation afterwards.
+ search_position = linenum-2
+ while (search_position >= 0
+ and Match(r' {6}\w', elided[search_position])):
+ search_position -= 1
+ exception = (search_position >= 0
+ and elided[search_position][:5] == ' :')
+ else:
+ # Search for the function arguments or an initializer list. We use a
+ # simple heuristic here: If the line is indented 4 spaces; and we have a
+ # closing paren, without the opening paren, followed by an opening brace
+ # or colon (for initializer lists) we assume that it is the last line of
+ # a function header. If we have a colon indented 4 spaces, it is an
+ # initializer list.
+ exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
+ prev_line)
+ or Match(r' {4}:', prev_line))
+
+ if not exception:
+ error(filename, linenum, 'whitespace/blank_line', 2,
+ 'Blank line at the start of a code block. Is this needed?')
+ # Ignore blank lines at the end of a block in a long if-else
+ # chain, like this:
+ # if (condition1) {
+ # // Something followed by a blank line
+ #
+ # } else if (condition2) {
+ # // Something else
+ # }
+ if linenum + 1 < clean_lines.NumLines():
+ next_line = raw[linenum + 1]
+ if (next_line
+ and Match(r'\s*}', next_line)
+ and next_line.find('} else ') == -1):
+ error(filename, linenum, 'whitespace/blank_line', 3,
+ 'Blank line at the end of a code block. Is this needed?')
+
+ matched = Match(r'\s*(public|protected|private):', prev_line)
+ if matched:
+ error(filename, linenum, 'whitespace/blank_line', 3,
+ 'Do not leave a blank line after "%s:"' % matched.group(1))
+
+ # Next, we complain if there's a comment too near the text
+ commentpos = line.find('//')
+ if commentpos != -1:
+ # Check if the // may be in quotes. If so, ignore it
+ # Comparisons made explicit for clarity -- pylint: disable-msg=C6403
+ if (line.count('"', 0, commentpos) -
+ line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
+ # Allow one space for new scopes, two spaces otherwise:
+ if (not Match(r'^\s*{ //', line) and
+ ((commentpos >= 1 and
+ line[commentpos-1] not in string.whitespace) or
+ (commentpos >= 2 and
+ line[commentpos-2] not in string.whitespace))):
+ error(filename, linenum, 'whitespace/comments', 2,
+ 'At least two spaces is best between code and comments')
+ # There should always be a space between the // and the comment
+ commentend = commentpos + 2
+ if commentend < len(line) and not line[commentend] == ' ':
+ # but some lines are exceptions -- e.g. if they're big
+ # comment delimiters like:
+ # //----------------------------------------------------------
+ # or are an empty C++ style Doxygen comment, like:
+ # ///
+ # or they begin with multiple slashes followed by a space:
+ # //////// Header comment
+ match = (Search(r'[=/-]{4,}\s*$', line[commentend:]) or
+ Search(r'^/$', line[commentend:]) or
+ Search(r'^/+ ', line[commentend:]))
+ if not match:
+ error(filename, linenum, 'whitespace/comments', 4,
+ 'Should have a space between // and comment')
+ CheckComment(line[commentpos:], filename, linenum, error)
+
+ line = clean_lines.elided[linenum] # get rid of comments and strings
+
+ # Don't try to do spacing checks for operator methods
+ line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line)
+
+ # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
+ # Otherwise not. Note we only check for non-spaces on *both* sides;
+ # sometimes people put non-spaces on one side when aligning ='s among
+ # many lines (not that this is behavior that I approve of...)
+ if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
+ error(filename, linenum, 'whitespace/operators', 4,
+ 'Missing spaces around =')
+
+ # It's ok not to have spaces around binary operators like + - * /, but if
+ # there's too little whitespace, we get concerned. It's hard to tell,
+ # though, so we punt on this one for now. TODO.
+
+ # You should always have whitespace around binary operators.
+ #
+ # Check <= and >= first to avoid false positives with < and >, then
+ # check non-include lines for spacing around < and >.
+ match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line)
+ if match:
+ error(filename, linenum, 'whitespace/operators', 3,
+ 'Missing spaces around %s' % match.group(1))
+ # We allow no-spaces around << when used like this: 10<<20, but
+ # not otherwise (particularly, not when used as streams)
+ match = Search(r'(\S)(?:L|UL|ULL|l|ul|ull)?<<(\S)', line)
+ if match and not (match.group(1).isdigit() and match.group(2).isdigit()):
+ error(filename, linenum, 'whitespace/operators', 3,
+ 'Missing spaces around <<')
+ elif not Match(r'#.*include', line):
+ # Avoid false positives on ->
+ reduced_line = line.replace('->', '')
+
+ # Look for < that is not surrounded by spaces. This is only
+ # triggered if both sides are missing spaces, even though
+ # technically should should flag if at least one side is missing a
+ # space. This is done to avoid some false positives with shifts.
+ match = Search(r'[^\s<]<([^\s=<].*)', reduced_line)
+ if (match and
+ not FindNextMatchingAngleBracket(clean_lines, linenum, match.group(1))):
+ error(filename, linenum, 'whitespace/operators', 3,
+ 'Missing spaces around <')
+
+ # Look for > that is not surrounded by spaces. Similar to the
+ # above, we only trigger if both sides are missing spaces to avoid
+ # false positives with shifts.
+ match = Search(r'^(.*[^\s>])>[^\s=>]', reduced_line)
+ if (match and
+ not FindPreviousMatchingAngleBracket(clean_lines, linenum,
+ match.group(1))):
+ error(filename, linenum, 'whitespace/operators', 3,
+ 'Missing spaces around >')
+
+ # We allow no-spaces around >> for almost anything. This is because
+ # C++11 allows ">>" to close nested templates, which accounts for
+ # most cases when ">>" is not followed by a space.
+ #
+ # We still warn on ">>" followed by alpha character, because that is
+ # likely due to ">>" being used for right shifts, e.g.:
+ # value >> alpha
+ #
+ # When ">>" is used to close templates, the alphanumeric letter that
+ # follows would be part of an identifier, and there should still be
+ # a space separating the template type and the identifier.
+ # type<type<type>> alpha
+ match = Search(r'>>[a-zA-Z_]', line)
+ if match:
+ error(filename, linenum, 'whitespace/operators', 3,
+ 'Missing spaces around >>')
+
+ # There shouldn't be space around unary operators
+ match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
+ if match:
+ error(filename, linenum, 'whitespace/operators', 4,
+ 'Extra space for operator %s' % match.group(1))
+
+ # A pet peeve of mine: no spaces after an if, while, switch, or for
+ match = Search(r' (if\(|for\(|while\(|switch\()', line)
+ if match:
+ error(filename, linenum, 'whitespace/parens', 5,
+ 'Missing space before ( in %s' % match.group(1))
+
+ # For if/for/while/switch, the left and right parens should be
+ # consistent about how many spaces are inside the parens, and
+ # there should either be zero or one spaces inside the parens.
+ # We don't want: "if ( foo)" or "if ( foo )".
+ # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
+ match = Search(r'\b(if|for|while|switch)\s*'
+ r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
+ line)
+ if match:
+ if len(match.group(2)) != len(match.group(4)):
+ if not (match.group(3) == ';' and
+ len(match.group(2)) == 1 + len(match.group(4)) or
+ not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
+ error(filename, linenum, 'whitespace/parens', 5,
+ 'Mismatching spaces inside () in %s' % match.group(1))
+ if not len(match.group(2)) in [0, 1]:
+ error(filename, linenum, 'whitespace/parens', 5,
+ 'Should have zero or one spaces inside ( and ) in %s' %
+ match.group(1))
+
+ # You should always have a space after a comma (either as fn arg or operator)
+ if Search(r',[^\s]', line):
+ error(filename, linenum, 'whitespace/comma', 3,
+ 'Missing space after ,')
+
+ # You should always have a space after a semicolon
+ # except for few corner cases
+ # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
+ # space after ;
+ if Search(r';[^\s};\\)/]', line):
+ error(filename, linenum, 'whitespace/semicolon', 3,
+ 'Missing space after ;')
+
+ # Next we will look for issues with function calls.
+ CheckSpacingForFunctionCall(filename, line, linenum, error)
+
+ # Except after an opening paren, or after another opening brace (in case of
+ # an initializer list, for instance), you should have spaces before your
+ # braces. And since you should never have braces at the beginning of a line,
+ # this is an easy test.
+ if Search(r'[^ ({]{', line):
+ error(filename, linenum, 'whitespace/braces', 5,
+ 'Missing space before {')
+
+ # Make sure '} else {' has spaces.
+ if Search(r'}else', line):
+ error(filename, linenum, 'whitespace/braces', 5,
+ 'Missing space before else')
+
+ # You shouldn't have spaces before your brackets, except maybe after
+ # 'delete []' or 'new char * []'.
+ if Search(r'\w\s+\[', line) and not Search(r'delete\s+\[', line):
+ error(filename, linenum, 'whitespace/braces', 5,
+ 'Extra space before [')
+
+ # You shouldn't have a space before a semicolon at the end of the line.
+ # There's a special case for "for" since the style guide allows space before
+ # the semicolon there.
+ if Search(r':\s*;\s*$', line):
+ error(filename, linenum, 'whitespace/semicolon', 5,
+ 'Semicolon defining empty statement. Use {} instead.')
+ elif Search(r'^\s*;\s*$', line):
+ error(filename, linenum, 'whitespace/semicolon', 5,
+ 'Line contains only semicolon. If this should be an empty statement, '
+ 'use {} instead.')
+ elif (Search(r'\s+;\s*$', line) and
+ not Search(r'\bfor\b', line)):
+ error(filename, linenum, 'whitespace/semicolon', 5,
+ 'Extra space before last semicolon. If this should be an empty '
+ 'statement, use {} instead.')
+
+ # In range-based for, we wanted spaces before and after the colon, but
+ # not around "::" tokens that might appear.
+ if (Search('for *\(.*[^:]:[^: ]', line) or
+ Search('for *\(.*[^: ]:[^:]', line)):
+ error(filename, linenum, 'whitespace/forcolon', 2,
+ 'Missing space around colon in range-based for loop')
+
+
+def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
+ """Checks for additional blank line issues related to sections.
+
+ Currently the only thing checked here is blank line before protected/private.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ class_info: A _ClassInfo objects.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ # Skip checks if the class is small, where small means 25 lines or less.
+ # 25 lines seems like a good cutoff since that's the usual height of
+ # terminals, and any class that can't fit in one screen can't really
+ # be considered "small".
+ #
+ # Also skip checks if we are on the first line. This accounts for
+ # classes that look like
+ # class Foo { public: ... };
+ #
+ # If we didn't find the end of the class, last_line would be zero,
+ # and the check will be skipped by the first condition.
+ if (class_info.last_line - class_info.starting_linenum <= 24 or
+ linenum <= class_info.starting_linenum):
+ return
+
+ matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
+ if matched:
+ # Issue warning if the line before public/protected/private was
+ # not a blank line, but don't do this if the previous line contains
+ # "class" or "struct". This can happen two ways:
+ # - We are at the beginning of the class.
+ # - We are forward-declaring an inner class that is semantically
+ # private, but needed to be public for implementation reasons.
+ # Also ignores cases where the previous line ends with a backslash as can be
+ # common when defining classes in C macros.
+ prev_line = clean_lines.lines[linenum - 1]
+ if (not IsBlankLine(prev_line) and
+ not Search(r'\b(class|struct)\b', prev_line) and
+ not Search(r'\\$', prev_line)):
+ # Try a bit harder to find the beginning of the class. This is to
+ # account for multi-line base-specifier lists, e.g.:
+ # class Derived
+ # : public Base {
+ end_class_head = class_info.starting_linenum
+ for i in range(class_info.starting_linenum, linenum):
+ if Search(r'\{\s*$', clean_lines.lines[i]):
+ end_class_head = i
+ break
+ if end_class_head < linenum - 1:
+ error(filename, linenum, 'whitespace/blank_line', 3,
+ '"%s:" should be preceded by a blank line' % matched.group(1))
+
+
+def GetPreviousNonBlankLine(clean_lines, linenum):
+ """Return the most recent non-blank line and its line number.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file contents.
+ linenum: The number of the line to check.
+
+ Returns:
+ A tuple with two elements. The first element is the contents of the last
+ non-blank line before the current line, or the empty string if this is the
+ first non-blank line. The second is the line number of that line, or -1
+ if this is the first non-blank line.
+ """
+
+ prevlinenum = linenum - 1
+ while prevlinenum >= 0:
+ prevline = clean_lines.elided[prevlinenum]
+ if not IsBlankLine(prevline): # if not a blank line...
+ return (prevline, prevlinenum)
+ prevlinenum -= 1
+ return ('', -1)
+
+
+def CheckBraces(filename, clean_lines, linenum, error):
+ """Looks for misplaced braces (e.g. at the end of line).
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ line = clean_lines.elided[linenum] # get rid of comments and strings
+
+ if Match(r'\s*{\s*$', line):
+ # We allow an open brace to start a line in the case where someone
+ # is using braces in a block to explicitly create a new scope,
+ # which is commonly used to control the lifetime of
+ # stack-allocated variables. We don't detect this perfectly: we
+ # just don't complain if the last non-whitespace character on the
+ # previous non-blank line is ';', ':', '{', or '}', or if the previous
+ # line starts a preprocessor block.
+ prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
+ if (not Search(r'[;:}{]\s*$', prevline) and
+ not Match(r'\s*#', prevline)):
+ error(filename, linenum, 'whitespace/braces', 4,
+ '{ should almost always be at the end of the previous line')
+
+ # An else clause should be on the same line as the preceding closing brace.
+ if Match(r'\s*else\s*', line):
+ prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
+ if Match(r'\s*}\s*$', prevline):
+ error(filename, linenum, 'whitespace/newline', 4,
+ 'An else should appear on the same line as the preceding }')
+
+ # If braces come on one side of an else, they should be on both.
+ # However, we have to worry about "else if" that spans multiple lines!
+ if Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
+ if Search(r'}\s*else if([^{]*)$', line): # could be multi-line if
+ # find the ( after the if
+ pos = line.find('else if')
+ pos = line.find('(', pos)
+ if pos > 0:
+ (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
+ if endline[endpos:].find('{') == -1: # must be brace after if
+ error(filename, linenum, 'readability/braces', 5,
+ 'If an else has a brace on one side, it should have it on both')
+ else: # common case: else not followed by a multi-line if
+ error(filename, linenum, 'readability/braces', 5,
+ 'If an else has a brace on one side, it should have it on both')
+
+ # Likewise, an else should never have the else clause on the same line
+ if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
+ error(filename, linenum, 'whitespace/newline', 4,
+ 'Else clause should never be on same line as else (use 2 lines)')
+
+ # In the same way, a do/while should never be on one line
+ if Match(r'\s*do [^\s{]', line):
+ error(filename, linenum, 'whitespace/newline', 4,
+ 'do/while clauses should not be on a single line')
+
+ # Braces shouldn't be followed by a ; unless they're defining a struct
+ # or initializing an array.
+ # We can't tell in general, but we can for some common cases.
+ prevlinenum = linenum
+ while True:
+ (prevline, prevlinenum) = GetPreviousNonBlankLine(clean_lines, prevlinenum)
+ if Match(r'\s+{.*}\s*;', line) and not prevline.count(';'):
+ line = prevline + line
+ else:
+ break
+ if (Search(r'{.*}\s*;', line) and
+ line.count('{') == line.count('}') and
+ not Search(r'struct|class|enum|\s*=\s*{', line)):
+ error(filename, linenum, 'readability/braces', 4,
+ "You don't need a ; after a }")
+
+
+def CheckEmptyLoopBody(filename, clean_lines, linenum, error):
+ """Loop for empty loop body with only a single semicolon.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ # Search for loop keywords at the beginning of the line. Because only
+ # whitespaces are allowed before the keywords, this will also ignore most
+ # do-while-loops, since those lines should start with closing brace.
+ line = clean_lines.elided[linenum]
+ if Match(r'\s*(for|while)\s*\(', line):
+ # Find the end of the conditional expression
+ (end_line, end_linenum, end_pos) = CloseExpression(
+ clean_lines, linenum, line.find('('))
+
+ # Output warning if what follows the condition expression is a semicolon.
+ # No warning for all other cases, including whitespace or newline, since we
+ # have a separate check for semicolons preceded by whitespace.
+ if end_pos >= 0 and Match(r';', end_line[end_pos:]):
+ error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
+ 'Empty loop bodies should use {} or continue')
+
+
+def ReplaceableCheck(operator, macro, line):
+ """Determine whether a basic CHECK can be replaced with a more specific one.
+
+ For example suggest using CHECK_EQ instead of CHECK(a == b) and
+ similarly for CHECK_GE, CHECK_GT, CHECK_LE, CHECK_LT, CHECK_NE.
+
+ Args:
+ operator: The C++ operator used in the CHECK.
+ macro: The CHECK or EXPECT macro being called.
+ line: The current source line.
+
+ Returns:
+ True if the CHECK can be replaced with a more specific one.
+ """
+
+ # This matches decimal and hex integers, strings, and chars (in that order).
+ match_constant = r'([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')'
+
+ # Expression to match two sides of the operator with something that
+ # looks like a literal, since CHECK(x == iterator) won't compile.
+ # This means we can't catch all the cases where a more specific
+ # CHECK is possible, but it's less annoying than dealing with
+ # extraneous warnings.
+ match_this = (r'\s*' + macro + r'\((\s*' +
+ match_constant + r'\s*' + operator + r'[^<>].*|'
+ r'.*[^<>]' + operator + r'\s*' + match_constant +
+ r'\s*\))')
+
+ # Don't complain about CHECK(x == NULL) or similar because
+ # CHECK_EQ(x, NULL) won't compile (requires a cast).
+ # Also, don't complain about more complex boolean expressions
+ # involving && or || such as CHECK(a == b || c == d).
+ return Match(match_this, line) and not Search(r'NULL|&&|\|\|', line)
+
+
+def CheckCheck(filename, clean_lines, linenum, error):
+ """Checks the use of CHECK and EXPECT macros.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ # Decide the set of replacement macros that should be suggested
+ raw_lines = clean_lines.raw_lines
+ current_macro = ''
+ for macro in _CHECK_MACROS:
+ if raw_lines[linenum].find(macro) >= 0:
+ current_macro = macro
+ break
+ if not current_macro:
+ # Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
+ return
+
+ line = clean_lines.elided[linenum] # get rid of comments and strings
+
+ # Encourage replacing plain CHECKs with CHECK_EQ/CHECK_NE/etc.
+ for operator in ['==', '!=', '>=', '>', '<=', '<']:
+ if ReplaceableCheck(operator, current_macro, line):
+ error(filename, linenum, 'readability/check', 2,
+ 'Consider using %s instead of %s(a %s b)' % (
+ _CHECK_REPLACEMENT[current_macro][operator],
+ current_macro, operator))
+ break
+
+
+def CheckAltTokens(filename, clean_lines, linenum, error):
+ """Check alternative keywords being used in boolean expressions.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ line = clean_lines.elided[linenum]
+
+ # Avoid preprocessor lines
+ if Match(r'^\s*#', line):
+ return
+
+ # Last ditch effort to avoid multi-line comments. This will not help
+ # if the comment started before the current line or ended after the
+ # current line, but it catches most of the false positives. At least,
+ # it provides a way to workaround this warning for people who use
+ # multi-line comments in preprocessor macros.
+ #
+ # TODO(unknown): remove this once cpplint has better support for
+ # multi-line comments.
+ if line.find('/*') >= 0 or line.find('*/') >= 0:
+ return
+
+ for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
+ error(filename, linenum, 'readability/alt_tokens', 2,
+ 'Use operator %s instead of %s' % (
+ _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
+
+
+def GetLineWidth(line):
+ """Determines the width of the line in column positions.
+
+ Args:
+ line: A string, which may be a Unicode string.
+
+ Returns:
+ The width of the line in column positions, accounting for Unicode
+ combining characters and wide characters.
+ """
+ if isinstance(line, unicode):
+ width = 0
+ for uc in unicodedata.normalize('NFC', line):
+ if unicodedata.east_asian_width(uc) in ('W', 'F'):
+ width += 2
+ elif not unicodedata.combining(uc):
+ width += 1
+ return width
+ else:
+ return len(line)
+
+
+def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
+ error):
+ """Checks rules from the 'C++ style rules' section of cppguide.html.
+
+ Most of these rules are hard to test (naming, comment style), but we
+ do what we can. In particular we check for 2-space indents, line lengths,
+ tab usage, spaces inside code, etc.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ file_extension: The extension (without the dot) of the filename.
+ nesting_state: A _NestingState instance which maintains information about
+ the current stack of nested blocks being parsed.
+ error: The function to call with any errors found.
+ """
+
+ raw_lines = clean_lines.raw_lines
+ line = raw_lines[linenum]
+
+ if line.find('\t') != -1:
+ error(filename, linenum, 'whitespace/tab', 1,
+ 'Tab found; better to use spaces')
+
+ # One or three blank spaces at the beginning of the line is weird; it's
+ # hard to reconcile that with 2-space indents.
+ # NOTE: here are the conditions rob pike used for his tests. Mine aren't
+ # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
+ # if(RLENGTH > 20) complain = 0;
+ # if(match($0, " +(error|private|public|protected):")) complain = 0;
+ # if(match(prev, "&& *$")) complain = 0;
+ # if(match(prev, "\\|\\| *$")) complain = 0;
+ # if(match(prev, "[\",=><] *$")) complain = 0;
+ # if(match($0, " <<")) complain = 0;
+ # if(match(prev, " +for \\(")) complain = 0;
+ # if(prevodd && match(prevprev, " +for \\(")) complain = 0;
+ initial_spaces = 0
+ cleansed_line = clean_lines.elided[linenum]
+ while initial_spaces < len(line) and line[initial_spaces] == ' ':
+ initial_spaces += 1
+ if line and line[-1].isspace():
+ error(filename, linenum, 'whitespace/end_of_line', 4,
+ 'Line ends in whitespace. Consider deleting these extra spaces.')
+ # There are certain situations we allow one space, notably for labels
+ elif ((initial_spaces == 1 or initial_spaces == 3) and
+ not Match(r'\s*\w+\s*:\s*$', cleansed_line)):
+ error(filename, linenum, 'whitespace/indent', 3,
+ 'Weird number of spaces at line-start. '
+ 'Are you using a 2-space indent?')
+ # Labels should always be indented at least one space.
+ elif not initial_spaces and line[:2] != '//' and Search(r'[^:]:\s*$',
+ line):
+ error(filename, linenum, 'whitespace/labels', 4,
+ 'Labels should always be indented at least one space. '
+ 'If this is a member-initializer list in a constructor or '
+ 'the base class list in a class definition, the colon should '
+ 'be on the following line.')
+
+
+ # Check if the line is a header guard.
+ is_header_guard = False
+ if file_extension == 'h':
+ cppvar = GetHeaderGuardCPPVariable(filename)
+ if (line.startswith('#ifndef %s' % cppvar) or
+ line.startswith('#define %s' % cppvar) or
+ line.startswith('#endif // %s' % cppvar)):
+ is_header_guard = True
+ # #include lines and header guards can be long, since there's no clean way to
+ # split them.
+ #
+ # URLs can be long too. It's possible to split these, but it makes them
+ # harder to cut&paste.
+ #
+ # The "$Id:...$" comment may also get very long without it being the
+ # developers fault.
+ if (not line.startswith('#include') and not is_header_guard and
+ not Match(r'^\s*//.*http(s?)://\S*$', line) and
+ not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
+ line_width = GetLineWidth(line)
+ if line_width > 100:
+ error(filename, linenum, 'whitespace/line_length', 4,
+ 'Lines should very rarely be longer than 100 characters')
+ elif line_width > 80:
+ error(filename, linenum, 'whitespace/line_length', 2,
+ 'Lines should be <= 80 characters long')
+
+ if (cleansed_line.count(';') > 1 and
+ # for loops are allowed two ;'s (and may run over two lines).
+ cleansed_line.find('for') == -1 and
+ (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
+ GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
+ # It's ok to have many commands in a switch case that fits in 1 line
+ not ((cleansed_line.find('case ') != -1 or
+ cleansed_line.find('default:') != -1) and
+ cleansed_line.find('break;') != -1)):
+ error(filename, linenum, 'whitespace/newline', 0,
+ 'More than one command on the same line')
+
+ # Some more style checks
+ CheckBraces(filename, clean_lines, linenum, error)
+ CheckEmptyLoopBody(filename, clean_lines, linenum, error)
+ CheckAccess(filename, clean_lines, linenum, nesting_state, error)
+ CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
+ CheckCheck(filename, clean_lines, linenum, error)
+ CheckAltTokens(filename, clean_lines, linenum, error)
+ classinfo = nesting_state.InnermostClass()
+ if classinfo:
+ CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
+
+
+_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
+_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
+# Matches the first component of a filename delimited by -s and _s. That is:
+# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
+# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
+# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
+# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
+_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
+
+
+def _DropCommonSuffixes(filename):
+ """Drops common suffixes like _test.cc or -inl.h from filename.
+
+ For example:
+ >>> _DropCommonSuffixes('foo/foo-inl.h')
+ 'foo/foo'
+ >>> _DropCommonSuffixes('foo/bar/foo.cc')
+ 'foo/bar/foo'
+ >>> _DropCommonSuffixes('foo/foo_internal.h')
+ 'foo/foo'
+ >>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
+ 'foo/foo_unusualinternal'
+
+ Args:
+ filename: The input filename.
+
+ Returns:
+ The filename with the common suffix removed.
+ """
+ for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
+ 'inl.h', 'impl.h', 'internal.h'):
+ if (filename.endswith(suffix) and len(filename) > len(suffix) and
+ filename[-len(suffix) - 1] in ('-', '_')):
+ return filename[:-len(suffix) - 1]
+ return os.path.splitext(filename)[0]
+
+
+def _IsTestFilename(filename):
+ """Determines if the given filename has a suffix that identifies it as a test.
+
+ Args:
+ filename: The input filename.
+
+ Returns:
+ True if 'filename' looks like a test, False otherwise.
+ """
+ if (filename.endswith('_test.cc') or
+ filename.endswith('_unittest.cc') or
+ filename.endswith('_regtest.cc')):
+ return True
+ else:
+ return False
+
+
+def _ClassifyInclude(fileinfo, include, is_system):
+ """Figures out what kind of header 'include' is.
+
+ Args:
+ fileinfo: The current file cpplint is running over. A FileInfo instance.
+ include: The path to a #included file.
+ is_system: True if the #include used <> rather than "".
+
+ Returns:
+ One of the _XXX_HEADER constants.
+
+ For example:
+ >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
+ _C_SYS_HEADER
+ >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
+ _CPP_SYS_HEADER
+ >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
+ _LIKELY_MY_HEADER
+ >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
+ ... 'bar/foo_other_ext.h', False)
+ _POSSIBLE_MY_HEADER
+ >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
+ _OTHER_HEADER
+ """
+ # This is a list of all standard c++ header files, except
+ # those already checked for above.
+ is_stl_h = include in _STL_HEADERS
+ is_cpp_h = is_stl_h or include in _CPP_HEADERS
+
+ if is_system:
+ if is_cpp_h:
+ return _CPP_SYS_HEADER
+ else:
+ return _C_SYS_HEADER
+
+ # If the target file and the include we're checking share a
+ # basename when we drop common extensions, and the include
+ # lives in . , then it's likely to be owned by the target file.
+ target_dir, target_base = (
+ os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
+ include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
+ if target_base == include_base and (
+ include_dir == target_dir or
+ include_dir == os.path.normpath(target_dir + '/../public')):
+ return _LIKELY_MY_HEADER
+
+ # If the target and include share some initial basename
+ # component, it's possible the target is implementing the
+ # include, so it's allowed to be first, but we'll never
+ # complain if it's not there.
+ target_first_component = _RE_FIRST_COMPONENT.match(target_base)
+ include_first_component = _RE_FIRST_COMPONENT.match(include_base)
+ if (target_first_component and include_first_component and
+ target_first_component.group(0) ==
+ include_first_component.group(0)):
+ return _POSSIBLE_MY_HEADER
+
+ return _OTHER_HEADER
+
+
+
+def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
+ """Check rules that are applicable to #include lines.
+
+ Strings on #include lines are NOT removed from elided line, to make
+ certain tasks easier. However, to prevent false positives, checks
+ applicable to #include lines in CheckLanguage must be put here.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ include_state: An _IncludeState instance in which the headers are inserted.
+ error: The function to call with any errors found.
+ """
+ fileinfo = FileInfo(filename)
+
+ line = clean_lines.lines[linenum]
+
+ # "include" should use the new style "foo/bar.h" instead of just "bar.h"
+ if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line):
+ error(filename, linenum, 'build/include', 4,
+ 'Include the directory when naming .h files')
+
+ # we shouldn't include a file more than once. actually, there are a
+ # handful of instances where doing so is okay, but in general it's
+ # not.
+ match = _RE_PATTERN_INCLUDE.search(line)
+ if match:
+ include = match.group(2)
+ is_system = (match.group(1) == '<')
+ if include in include_state:
+ error(filename, linenum, 'build/include', 4,
+ '"%s" already included at %s:%s' %
+ (include, filename, include_state[include]))
+ else:
+ include_state[include] = linenum
+
+ # We want to ensure that headers appear in the right order:
+ # 1) for foo.cc, foo.h (preferred location)
+ # 2) c system files
+ # 3) cpp system files
+ # 4) for foo.cc, foo.h (deprecated location)
+ # 5) other google headers
+ #
+ # We classify each include statement as one of those 5 types
+ # using a number of techniques. The include_state object keeps
+ # track of the highest type seen, and complains if we see a
+ # lower type after that.
+ error_message = include_state.CheckNextIncludeOrder(
+ _ClassifyInclude(fileinfo, include, is_system))
+ if error_message:
+ error(filename, linenum, 'build/include_order', 4,
+ '%s. Should be: %s.h, c system, c++ system, other.' %
+ (error_message, fileinfo.BaseName()))
+ if not include_state.IsInAlphabeticalOrder(include):
+ error(filename, linenum, 'build/include_alpha', 4,
+ 'Include "%s" not in alphabetical order' % include)
+
+ # Look for any of the stream classes that are part of standard C++.
+ match = _RE_PATTERN_INCLUDE.match(line)
+ if match:
+ include = match.group(2)
+ if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
+ # Many unit tests use cout, so we exempt them.
+ if not _IsTestFilename(filename):
+ error(filename, linenum, 'readability/streams', 3,
+ 'Streams are highly discouraged.')
+
+
+def _GetTextInside(text, start_pattern):
+ """Retrieves all the text between matching open and close parentheses.
+
+ Given a string of lines and a regular expression string, retrieve all the text
+ following the expression and between opening punctuation symbols like
+ (, [, or {, and the matching close-punctuation symbol. This properly nested
+ occurrences of the punctuations, so for the text like
+ printf(a(), b(c()));
+ a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
+ start_pattern must match string having an open punctuation symbol at the end.
+
+ Args:
+ text: The lines to extract text. Its comments and strings must be elided.
+ It can be single line and can span multiple lines.
+ start_pattern: The regexp string indicating where to start extracting
+ the text.
+ Returns:
+ The extracted text.
+ None if either the opening string or ending punctuation could not be found.
+ """
+ # TODO(sugawarayu): Audit cpplint.py to see what places could be profitably
+ # rewritten to use _GetTextInside (and use inferior regexp matching today).
+
+ # Give opening punctuations to get the matching close-punctuations.
+ matching_punctuation = {'(': ')', '{': '}', '[': ']'}
+ closing_punctuation = set(matching_punctuation.itervalues())
+
+ # Find the position to start extracting text.
+ match = re.search(start_pattern, text, re.M)
+ if not match: # start_pattern not found in text.
+ return None
+ start_position = match.end(0)
+
+ assert start_position > 0, (
+ 'start_pattern must ends with an opening punctuation.')
+ assert text[start_position - 1] in matching_punctuation, (
+ 'start_pattern must ends with an opening punctuation.')
+ # Stack of closing punctuations we expect to have in text after position.
+ punctuation_stack = [matching_punctuation[text[start_position - 1]]]
+ position = start_position
+ while punctuation_stack and position < len(text):
+ if text[position] == punctuation_stack[-1]:
+ punctuation_stack.pop()
+ elif text[position] in closing_punctuation:
+ # A closing punctuation without matching opening punctuations.
+ return None
+ elif text[position] in matching_punctuation:
+ punctuation_stack.append(matching_punctuation[text[position]])
+ position += 1
+ if punctuation_stack:
+ # Opening punctuations left without matching close-punctuations.
+ return None
+ # punctuations match.
+ return text[start_position:position - 1]
+
+
+def CheckLanguage(filename, clean_lines, linenum, file_extension, include_state,
+ error):
+ """Checks rules from the 'C++ language rules' section of cppguide.html.
+
+ Some of these rules are hard to test (function overloading, using
+ uint32 inappropriately), but we do the best we can.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ file_extension: The extension (without the dot) of the filename.
+ include_state: An _IncludeState instance in which the headers are inserted.
+ error: The function to call with any errors found.
+ """
+ # If the line is empty or consists of entirely a comment, no need to
+ # check it.
+ line = clean_lines.elided[linenum]
+ if not line:
+ return
+
+ match = _RE_PATTERN_INCLUDE.search(line)
+ if match:
+ CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
+ return
+
+ # Create an extended_line, which is the concatenation of the current and
+ # next lines, for more effective checking of code that may span more than one
+ # line.
+ if linenum + 1 < clean_lines.NumLines():
+ extended_line = line + clean_lines.elided[linenum + 1]
+ else:
+ extended_line = line
+
+ # Make Windows paths like Unix.
+ fullname = os.path.abspath(filename).replace('\\', '/')
+
+ # TODO(unknown): figure out if they're using default arguments in fn proto.
+
+ # Check for non-const references in functions. This is tricky because &
+ # is also used to take the address of something. We allow <> for templates,
+ # (ignoring whatever is between the braces) and : for classes.
+ # These are complicated re's. They try to capture the following:
+ # paren (for fn-prototype start), typename, &, varname. For the const
+ # version, we're willing for const to be before typename or after
+ # Don't check the implementation on same line.
+ fnline = line.split('{', 1)[0]
+ if (len(re.findall(r'\([^()]*\b(?:[\w:]|<[^()]*>)+(\s?&|&\s?)\w+', fnline)) >
+ len(re.findall(r'\([^()]*\bconst\s+(?:typename\s+)?(?:struct\s+)?'
+ r'(?:[\w:]|<[^()]*>)+(\s?&|&\s?)\w+', fnline)) +
+ len(re.findall(r'\([^()]*\b(?:[\w:]|<[^()]*>)+\s+const(\s?&|&\s?)[\w]+',
+ fnline))):
+
+ # We allow non-const references in a few standard places, like functions
+ # called "swap()" or iostream operators like "<<" or ">>". We also filter
+ # out for loops, which lint otherwise mistakenly thinks are functions.
+ if not Search(
+ r'(for|swap|Swap|operator[<>][<>])\s*\(\s*'
+ r'(?:(?:typename\s*)?[\w:]|<.*>)+\s*&',
+ fnline):
+ error(filename, linenum, 'runtime/references', 2,
+ 'Is this a non-const reference? '
+ 'If so, make const or use a pointer.')
+
+ # Check to see if they're using an conversion function cast.
+ # I just try to capture the most common basic types, though there are more.
+ # Parameterless conversion functions, such as bool(), are allowed as they are
+ # probably a member operator declaration or default constructor.
+ match = Search(
+ r'(\bnew\s+)?\b' # Grab 'new' operator, if it's there
+ r'(int|float|double|bool|char|int32|uint32|int64|uint64)\([^)]', line)
+ if match:
+ # gMock methods are defined using some variant of MOCK_METHODx(name, type)
+ # where type may be float(), int(string), etc. Without context they are
+ # virtually indistinguishable from int(x) casts. Likewise, gMock's
+ # MockCallback takes a template parameter of the form return_type(arg_type),
+ # which looks much like the cast we're trying to detect.
+ if (match.group(1) is None and # If new operator, then this isn't a cast
+ not (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
+ Match(r'^\s*MockCallback<.*>', line))):
+ # Try a bit harder to catch gmock lines: the only place where
+ # something looks like an old-style cast is where we declare the
+ # return type of the mocked method, and the only time when we
+ # are missing context is if MOCK_METHOD was split across
+ # multiple lines (for example http://go/hrfhr ), so we only need
+ # to check the previous line for MOCK_METHOD.
+ if (linenum == 0 or
+ not Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(\S+,\s*$',
+ clean_lines.elided[linenum - 1])):
+ error(filename, linenum, 'readability/casting', 4,
+ 'Using deprecated casting style. '
+ 'Use static_cast<%s>(...) instead' %
+ match.group(2))
+
+ CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
+ 'static_cast',
+ r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
+
+ # This doesn't catch all cases. Consider (const char * const)"hello".
+ #
+ # (char *) "foo" should always be a const_cast (reinterpret_cast won't
+ # compile).
+ if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
+ 'const_cast', r'\((char\s?\*+\s?)\)\s*"', error):
+ pass
+ else:
+ # Check pointer casts for other than string constants
+ CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
+ 'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
+
+ # In addition, we look for people taking the address of a cast. This
+ # is dangerous -- casts can assign to temporaries, so the pointer doesn't
+ # point where you think.
+ if Search(
+ r'(&\([^)]+\)[\w(])|(&(static|dynamic|reinterpret)_cast\b)', line):
+ error(filename, linenum, 'runtime/casting', 4,
+ ('Are you taking an address of a cast? '
+ 'This is dangerous: could be a temp var. '
+ 'Take the address before doing the cast, rather than after'))
+
+ # Check for people declaring static/global STL strings at the top level.
+ # This is dangerous because the C++ language does not guarantee that
+ # globals with constructors are initialized before the first access.
+ match = Match(
+ r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
+ line)
+ # Make sure it's not a function.
+ # Function template specialization looks like: "string foo<Type>(...".
+ # Class template definitions look like: "string Foo<Type>::Method(...".
+ if match and not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)',
+ match.group(3)):
+ error(filename, linenum, 'runtime/string', 4,
+ 'For a static/global string constant, use a C style string instead: '
+ '"%schar %s[]".' %
+ (match.group(1), match.group(2)))
+
+ # Check that we're not using RTTI outside of testing code.
+ if Search(r'\bdynamic_cast<', line) and not _IsTestFilename(filename):
+ error(filename, linenum, 'runtime/rtti', 5,
+ 'Do not use dynamic_cast<>. If you need to cast within a class '
+ "hierarchy, use static_cast<> to upcast. Google doesn't support "
+ 'RTTI.')
+
+ if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
+ error(filename, linenum, 'runtime/init', 4,
+ 'You seem to be initializing a member variable with itself.')
+
+ if file_extension == 'h':
+ # TODO(unknown): check that 1-arg constructors are explicit.
+ # How to tell it's a constructor?
+ # (handled in CheckForNonStandardConstructs for now)
+ # TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS
+ # (level 1 error)
+ pass
+
+ # Check if people are using the verboten C basic types. The only exception
+ # we regularly allow is "unsigned short port" for port.
+ if Search(r'\bshort port\b', line):
+ if not Search(r'\bunsigned short port\b', line):
+ error(filename, linenum, 'runtime/int', 4,
+ 'Use "unsigned short" for ports, not "short"')
+ else:
+ match = Search(r'\b(short|long(?! +double)|long long)\b', line)
+ if match:
+ error(filename, linenum, 'runtime/int', 4,
+ 'Use int16/int64/etc, rather than the C type %s' % match.group(1))
+
+ # When snprintf is used, the second argument shouldn't be a literal.
+ match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
+ if match and match.group(2) != '0':
+ # If 2nd arg is zero, snprintf is used to calculate size.
+ error(filename, linenum, 'runtime/printf', 3,
+ 'If you can, use sizeof(%s) instead of %s as the 2nd arg '
+ 'to snprintf.' % (match.group(1), match.group(2)))
+
+ # Check if some verboten C functions are being used.
+ if Search(r'\bsprintf\b', line):
+ error(filename, linenum, 'runtime/printf', 5,
+ 'Never use sprintf. Use snprintf instead.')
+ match = Search(r'\b(strcpy|strcat)\b', line)
+ if match:
+ error(filename, linenum, 'runtime/printf', 4,
+ 'Almost always, snprintf is better than %s' % match.group(1))
+
+ if Search(r'\bsscanf\b', line):
+ error(filename, linenum, 'runtime/printf', 1,
+ 'sscanf can be ok, but is slow and can overflow buffers.')
+
+ # Check if some verboten operator overloading is going on
+ # TODO(unknown): catch out-of-line unary operator&:
+ # class X {};
+ # int operator&(const X& x) { return 42; } // unary operator&
+ # The trick is it's hard to tell apart from binary operator&:
+ # class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
+ if Search(r'\boperator\s*&\s*\(\s*\)', line):
+ error(filename, linenum, 'runtime/operator', 4,
+ 'Unary operator& is dangerous. Do not use it.')
+
+ # Check for suspicious usage of "if" like
+ # } if (a == b) {
+ if Search(r'\}\s*if\s*\(', line):
+ error(filename, linenum, 'readability/braces', 4,
+ 'Did you mean "else if"? If not, start a new line for "if".')
+
+ # Check for potential format string bugs like printf(foo).
+ # We constrain the pattern not to pick things like DocidForPrintf(foo).
+ # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
+ # TODO(sugawarayu): Catch the following case. Need to change the calling
+ # convention of the whole function to process multiple line to handle it.
+ # printf(
+ # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
+ printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
+ if printf_args:
+ match = Match(r'([\w.\->()]+)$', printf_args)
+ if match and match.group(1) != '__VA_ARGS__':
+ function_name = re.search(r'\b((?:string)?printf)\s*\(',
+ line, re.I).group(1)
+ error(filename, linenum, 'runtime/printf', 4,
+ 'Potential format string bug. Do %s("%%s", %s) instead.'
+ % (function_name, match.group(1)))
+
+ # Check for potential memset bugs like memset(buf, sizeof(buf), 0).
+ match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
+ if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
+ error(filename, linenum, 'runtime/memset', 4,
+ 'Did you mean "memset(%s, 0, %s)"?'
+ % (match.group(1), match.group(2)))
+
+ if Search(r'\busing namespace\b', line):
+ error(filename, linenum, 'build/namespaces', 5,
+ 'Do not use namespace using-directives. '
+ 'Use using-declarations instead.')
+
+ # Detect variable-length arrays.
+ match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
+ if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
+ match.group(3).find(']') == -1):
+ # Split the size using space and arithmetic operators as delimiters.
+ # If any of the resulting tokens are not compile time constants then
+ # report the error.
+ tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
+ is_const = True
+ skip_next = False
+ for tok in tokens:
+ if skip_next:
+ skip_next = False
+ continue
+
+ if Search(r'sizeof\(.+\)', tok): continue
+ if Search(r'arraysize\(\w+\)', tok): continue
+
+ tok = tok.lstrip('(')
+ tok = tok.rstrip(')')
+ if not tok: continue
+ if Match(r'\d+', tok): continue
+ if Match(r'0[xX][0-9a-fA-F]+', tok): continue
+ if Match(r'k[A-Z0-9]\w*', tok): continue
+ if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
+ if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
+ # A catch all for tricky sizeof cases, including 'sizeof expression',
+ # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
+ # requires skipping the next token because we split on ' ' and '*'.
+ if tok.startswith('sizeof'):
+ skip_next = True
+ continue
+ is_const = False
+ break
+ if not is_const:
+ error(filename, linenum, 'runtime/arrays', 1,
+ 'Do not use variable-length arrays. Use an appropriately named '
+ "('k' followed by CamelCase) compile-time constant for the size.")
+
+ # If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or
+ # DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing
+ # in the class declaration.
+ match = Match(
+ (r'\s*'
+ r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
+ r'\(.*\);$'),
+ line)
+ if match and linenum + 1 < clean_lines.NumLines():
+ next_line = clean_lines.elided[linenum + 1]
+ # We allow some, but not all, declarations of variables to be present
+ # in the statement that defines the class. The [\w\*,\s]* fragment of
+ # the regular expression below allows users to declare instances of
+ # the class or pointers to instances, but not less common types such
+ # as function pointers or arrays. It's a tradeoff between allowing
+ # reasonable code and avoiding trying to parse more C++ using regexps.
+ if not Search(r'^\s*}[\w\*,\s]*;', next_line):
+ error(filename, linenum, 'readability/constructors', 3,
+ match.group(1) + ' should be the last thing in the class')
+
+ # Check for use of unnamed namespaces in header files. Registration
+ # macros are typically OK, so we allow use of "namespace {" on lines
+ # that end with backslashes.
+ if (file_extension == 'h'
+ and Search(r'\bnamespace\s*{', line)
+ and line[-1] != '\\'):
+ error(filename, linenum, 'build/namespaces', 4,
+ 'Do not use unnamed namespaces in header files. See '
+ 'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
+ ' for more information.')
+
+
+def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern,
+ error):
+ """Checks for a C-style cast by looking for the pattern.
+
+ This also handles sizeof(type) warnings, due to similarity of content.
+
+ Args:
+ filename: The name of the current file.
+ linenum: The number of the line to check.
+ line: The line of code to check.
+ raw_line: The raw line of code to check, with comments.
+ cast_type: The string for the C++ cast to recommend. This is either
+ reinterpret_cast, static_cast, or const_cast, depending.
+ pattern: The regular expression used to find C-style casts.
+ error: The function to call with any errors found.
+
+ Returns:
+ True if an error was emitted.
+ False otherwise.
+ """
+ match = Search(pattern, line)
+ if not match:
+ return False
+
+ # e.g., sizeof(int)
+ sizeof_match = Match(r'.*sizeof\s*$', line[0:match.start(1) - 1])
+ if sizeof_match:
+ error(filename, linenum, 'runtime/sizeof', 1,
+ 'Using sizeof(type). Use sizeof(varname) instead if possible')
+ return True
+
+ # operator++(int) and operator--(int)
+ if (line[0:match.start(1) - 1].endswith(' operator++') or
+ line[0:match.start(1) - 1].endswith(' operator--')):
+ return False
+
+ remainder = line[match.end(0):]
+
+ # The close paren is for function pointers as arguments to a function.
+ # eg, void foo(void (*bar)(int));
+ # The semicolon check is a more basic function check; also possibly a
+ # function pointer typedef.
+ # eg, void foo(int); or void foo(int) const;
+ # The equals check is for function pointer assignment.
+ # eg, void *(*foo)(int) = ...
+ # The > is for MockCallback<...> ...
+ #
+ # Right now, this will only catch cases where there's a single argument, and
+ # it's unnamed. It should probably be expanded to check for multiple
+ # arguments with some unnamed.
+ function_match = Match(r'\s*(\)|=|(const)?\s*(;|\{|throw\(\)|>))', remainder)
+ if function_match:
+ if (not function_match.group(3) or
+ function_match.group(3) == ';' or
+ ('MockCallback<' not in raw_line and
+ '/*' not in raw_line)):
+ error(filename, linenum, 'readability/function', 3,
+ 'All parameters should be named in a function')
+ return True
+
+ # At this point, all that should be left is actual casts.
+ error(filename, linenum, 'readability/casting', 4,
+ 'Using C-style cast. Use %s<%s>(...) instead' %
+ (cast_type, match.group(1)))
+
+ return True
+
+
+_HEADERS_CONTAINING_TEMPLATES = (
+ ('<deque>', ('deque',)),
+ ('<functional>', ('unary_function', 'binary_function',
+ 'plus', 'minus', 'multiplies', 'divides', 'modulus',
+ 'negate',
+ 'equal_to', 'not_equal_to', 'greater', 'less',
+ 'greater_equal', 'less_equal',
+ 'logical_and', 'logical_or', 'logical_not',
+ 'unary_negate', 'not1', 'binary_negate', 'not2',
+ 'bind1st', 'bind2nd',
+ 'pointer_to_unary_function',
+ 'pointer_to_binary_function',
+ 'ptr_fun',
+ 'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
+ 'mem_fun_ref_t',
+ 'const_mem_fun_t', 'const_mem_fun1_t',
+ 'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
+ 'mem_fun_ref',
+ )),
+ ('<limits>', ('numeric_limits',)),
+ ('<list>', ('list',)),
+ ('<map>', ('map', 'multimap',)),
+ ('<memory>', ('allocator',)),
+ ('<queue>', ('queue', 'priority_queue',)),
+ ('<set>', ('set', 'multiset',)),
+ ('<stack>', ('stack',)),
+ ('<string>', ('char_traits', 'basic_string',)),
+ ('<utility>', ('pair',)),
+ ('<vector>', ('vector',)),
+
+ # gcc extensions.
+ # Note: std::hash is their hash, ::hash is our hash
+ ('<hash_map>', ('hash_map', 'hash_multimap',)),
+ ('<hash_set>', ('hash_set', 'hash_multiset',)),
+ ('<slist>', ('slist',)),
+ )
+
+_RE_PATTERN_STRING = re.compile(r'\bstring\b')
+
+_re_pattern_algorithm_header = []
+for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
+ 'transform'):
+ # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
+ # type::max().
+ _re_pattern_algorithm_header.append(
+ (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
+ _template,
+ '<algorithm>'))
+
+_re_pattern_templates = []
+for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
+ for _template in _templates:
+ _re_pattern_templates.append(
+ (re.compile(r'(\<|\b)' + _template + r'\s*\<'),
+ _template + '<>',
+ _header))
+
+
+def FilesBelongToSameModule(filename_cc, filename_h):
+ """Check if these two filenames belong to the same module.
+
+ The concept of a 'module' here is a as follows:
+ foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
+ same 'module' if they are in the same directory.
+ some/path/public/xyzzy and some/path/internal/xyzzy are also considered
+ to belong to the same module here.
+
+ If the filename_cc contains a longer path than the filename_h, for example,
+ '/absolute/path/to/base/sysinfo.cc', and this file would include
+ 'base/sysinfo.h', this function also produces the prefix needed to open the
+ header. This is used by the caller of this function to more robustly open the
+ header file. We don't have access to the real include paths in this context,
+ so we need this guesswork here.
+
+ Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
+ according to this implementation. Because of this, this function gives
+ some false positives. This should be sufficiently rare in practice.
+
+ Args:
+ filename_cc: is the path for the .cc file
+ filename_h: is the path for the header path
+
+ Returns:
+ Tuple with a bool and a string:
+ bool: True if filename_cc and filename_h belong to the same module.
+ string: the additional prefix needed to open the header file.
+ """
+
+ if not filename_cc.endswith('.cc'):
+ return (False, '')
+ filename_cc = filename_cc[:-len('.cc')]
+ if filename_cc.endswith('_unittest'):
+ filename_cc = filename_cc[:-len('_unittest')]
+ elif filename_cc.endswith('_test'):
+ filename_cc = filename_cc[:-len('_test')]
+ filename_cc = filename_cc.replace('/public/', '/')
+ filename_cc = filename_cc.replace('/internal/', '/')
+
+ if not filename_h.endswith('.h'):
+ return (False, '')
+ filename_h = filename_h[:-len('.h')]
+ if filename_h.endswith('-inl'):
+ filename_h = filename_h[:-len('-inl')]
+ filename_h = filename_h.replace('/public/', '/')
+ filename_h = filename_h.replace('/internal/', '/')
+
+ files_belong_to_same_module = filename_cc.endswith(filename_h)
+ common_path = ''
+ if files_belong_to_same_module:
+ common_path = filename_cc[:-len(filename_h)]
+ return files_belong_to_same_module, common_path
+
+
+def UpdateIncludeState(filename, include_state, io=codecs):
+ """Fill up the include_state with new includes found from the file.
+
+ Args:
+ filename: the name of the header to read.
+ include_state: an _IncludeState instance in which the headers are inserted.
+ io: The io factory to use to read the file. Provided for testability.
+
+ Returns:
+ True if a header was succesfully added. False otherwise.
+ """
+ headerfile = None
+ try:
+ headerfile = io.open(filename, 'r', 'utf8', 'replace')
+ except IOError:
+ return False
+ linenum = 0
+ for line in headerfile:
+ linenum += 1
+ clean_line = CleanseComments(line)
+ match = _RE_PATTERN_INCLUDE.search(clean_line)
+ if match:
+ include = match.group(2)
+ # The value formatting is cute, but not really used right now.
+ # What matters here is that the key is in include_state.
+ include_state.setdefault(include, '%s:%d' % (filename, linenum))
+ return True
+
+
+def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
+ io=codecs):
+ """Reports for missing stl includes.
+
+ This function will output warnings to make sure you are including the headers
+ necessary for the stl containers and functions that you use. We only give one
+ reason to include a header. For example, if you use both equal_to<> and
+ less<> in a .h file, only one (the latter in the file) of these will be
+ reported as a reason to include the <functional>.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ include_state: An _IncludeState instance.
+ error: The function to call with any errors found.
+ io: The IO factory to use to read the header file. Provided for unittest
+ injection.
+ """
+ required = {} # A map of header name to linenumber and the template entity.
+ # Example of required: { '<functional>': (1219, 'less<>') }
+
+ for linenum in xrange(clean_lines.NumLines()):
+ line = clean_lines.elided[linenum]
+ if not line or line[0] == '#':
+ continue
+
+ # String is special -- it is a non-templatized type in STL.
+ matched = _RE_PATTERN_STRING.search(line)
+ if matched:
+ # Don't warn about strings in non-STL namespaces:
+ # (We check only the first match per line; good enough.)
+ prefix = line[:matched.start()]
+ if prefix.endswith('std::') or not prefix.endswith('::'):
+ required['<string>'] = (linenum, 'string')
+
+ for pattern, template, header in _re_pattern_algorithm_header:
+ if pattern.search(line):
+ required[header] = (linenum, template)
+
+ # The following function is just a speed up, no semantics are changed.
+ if not '<' in line: # Reduces the cpu time usage by skipping lines.
+ continue
+
+ for pattern, template, header in _re_pattern_templates:
+ if pattern.search(line):
+ required[header] = (linenum, template)
+
+ # The policy is that if you #include something in foo.h you don't need to
+ # include it again in foo.cc. Here, we will look at possible includes.
+ # Let's copy the include_state so it is only messed up within this function.
+ include_state = include_state.copy()
+
+ # Did we find the header for this file (if any) and succesfully load it?
+ header_found = False
+
+ # Use the absolute path so that matching works properly.
+ abs_filename = FileInfo(filename).FullName()
+
+ # For Emacs's flymake.
+ # If cpplint is invoked from Emacs's flymake, a temporary file is generated
+ # by flymake and that file name might end with '_flymake.cc'. In that case,
+ # restore original file name here so that the corresponding header file can be
+ # found.
+ # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
+ # instead of 'foo_flymake.h'
+ abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
+
+ # include_state is modified during iteration, so we iterate over a copy of
+ # the keys.
+ header_keys = include_state.keys()
+ for header in header_keys:
+ (same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
+ fullpath = common_path + header
+ if same_module and UpdateIncludeState(fullpath, include_state, io):
+ header_found = True
+
+ # If we can't find the header file for a .cc, assume it's because we don't
+ # know where to look. In that case we'll give up as we're not sure they
+ # didn't include it in the .h file.
+ # TODO(unknown): Do a better job of finding .h files so we are confident that
+ # not having the .h file means there isn't one.
+ if filename.endswith('.cc') and not header_found:
+ return
+
+ # All the lines have been processed, report the errors found.
+ for required_header_unstripped in required:
+ template = required[required_header_unstripped][1]
+ if required_header_unstripped.strip('<>"') not in include_state:
+ error(filename, required[required_header_unstripped][0],
+ 'build/include_what_you_use', 4,
+ 'Add #include ' + required_header_unstripped + ' for ' + template)
+
+
+_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
+
+
+def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
+ """Check that make_pair's template arguments are deduced.
+
+ G++ 4.6 in C++0x mode fails badly if make_pair's template arguments are
+ specified explicitly, and such use isn't intended in any case.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ raw = clean_lines.raw_lines
+ line = raw[linenum]
+ match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
+ if match:
+ error(filename, linenum, 'build/explicit_make_pair',
+ 4, # 4 = high confidence
+ 'For C++11-compatibility, omit template arguments from make_pair'
+ ' OR use pair directly OR if appropriate, construct a pair directly')
+
+
+def ProcessLine(filename, file_extension, clean_lines, line,
+ include_state, function_state, nesting_state, error,
+ extra_check_functions=[]):
+ """Processes a single line in the file.
+
+ Args:
+ filename: Filename of the file that is being processed.
+ file_extension: The extension (dot not included) of the file.
+ clean_lines: An array of strings, each representing a line of the file,
+ with comments stripped.
+ line: Number of line being processed.
+ include_state: An _IncludeState instance in which the headers are inserted.
+ function_state: A _FunctionState instance which counts function lines, etc.
+ nesting_state: A _NestingState instance which maintains information about
+ the current stack of nested blocks being parsed.
+ error: A callable to which errors are reported, which takes 4 arguments:
+ filename, line number, error level, and message
+ extra_check_functions: An array of additional check functions that will be
+ run on each source line. Each function takes 4
+ arguments: filename, clean_lines, line, error
+ """
+ raw_lines = clean_lines.raw_lines
+ ParseNolintSuppressions(filename, raw_lines[line], line, error)
+ nesting_state.Update(filename, clean_lines, line, error)
+ if nesting_state.stack and nesting_state.stack[-1].inline_asm != _NO_ASM:
+ return
+ CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
+ CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
+ CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
+ CheckLanguage(filename, clean_lines, line, file_extension, include_state,
+ error)
+ CheckForNonStandardConstructs(filename, clean_lines, line,
+ nesting_state, error)
+ CheckPosixThreading(filename, clean_lines, line, error)
+ CheckInvalidIncrement(filename, clean_lines, line, error)
+ CheckMakePairUsesDeduction(filename, clean_lines, line, error)
+ for check_fn in extra_check_functions:
+ check_fn(filename, clean_lines, line, error)
+
+def ProcessFileData(filename, file_extension, lines, error,
+ extra_check_functions=[]):
+ """Performs lint checks and reports any errors to the given error function.
+
+ Args:
+ filename: Filename of the file that is being processed.
+ file_extension: The extension (dot not included) of the file.
+ lines: An array of strings, each representing a line of the file, with the
+ last element being empty if the file is terminated with a newline.
+ error: A callable to which errors are reported, which takes 4 arguments:
+ filename, line number, error level, and message
+ extra_check_functions: An array of additional check functions that will be
+ run on each source line. Each function takes 4
+ arguments: filename, clean_lines, line, error
+ """
+ lines = (['// marker so line numbers and indices both start at 1'] + lines +
+ ['// marker so line numbers end in a known way'])
+
+ include_state = _IncludeState()
+ function_state = _FunctionState()
+ nesting_state = _NestingState()
+
+ ResetNolintSuppressions()
+
+ CheckForCopyright(filename, lines, error)
+
+ if file_extension == 'h':
+ CheckForHeaderGuard(filename, lines, error)
+
+ RemoveMultiLineComments(filename, lines, error)
+ clean_lines = CleansedLines(lines)
+ for line in xrange(clean_lines.NumLines()):
+ ProcessLine(filename, file_extension, clean_lines, line,
+ include_state, function_state, nesting_state, error,
+ extra_check_functions)
+ nesting_state.CheckClassFinished(filename, error)
+
+ CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
+
+ # We check here rather than inside ProcessLine so that we see raw
+ # lines rather than "cleaned" lines.
+ CheckForUnicodeReplacementCharacters(filename, lines, error)
+
+ CheckForNewlineAtEOF(filename, lines, error)
+
+def ProcessFile(filename, vlevel, extra_check_functions=[]):
+ """Does google-lint on a single file.
+
+ Args:
+ filename: The name of the file to parse.
+
+ vlevel: The level of errors to report. Every error of confidence
+ >= verbose_level will be reported. 0 is a good default.
+
+ extra_check_functions: An array of additional check functions that will be
+ run on each source line. Each function takes 4
+ arguments: filename, clean_lines, line, error
+ """
+
+ _SetVerboseLevel(vlevel)
+
+ try:
+ # Support the UNIX convention of using "-" for stdin. Note that
+ # we are not opening the file with universal newline support
+ # (which codecs doesn't support anyway), so the resulting lines do
+ # contain trailing '\r' characters if we are reading a file that
+ # has CRLF endings.
+ # If after the split a trailing '\r' is present, it is removed
+ # below. If it is not expected to be present (i.e. os.linesep !=
+ # '\r\n' as in Windows), a warning is issued below if this file
+ # is processed.
+
+ if filename == '-':
+ lines = codecs.StreamReaderWriter(sys.stdin,
+ codecs.getreader('utf8'),
+ codecs.getwriter('utf8'),
+ 'replace').read().split('\n')
+ else:
+ lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
+
+ carriage_return_found = False
+ # Remove trailing '\r'.
+ for linenum in range(len(lines)):
+ if lines[linenum].endswith('\r'):
+ lines[linenum] = lines[linenum].rstrip('\r')
+ carriage_return_found = True
+
+ except IOError:
+ sys.stderr.write(
+ "Skipping input '%s': Can't open for reading\n" % filename)
+ return
+
+ # Note, if no dot is found, this will give the entire filename as the ext.
+ file_extension = filename[filename.rfind('.') + 1:]
+
+ # When reading from stdin, the extension is unknown, so no cpplint tests
+ # should rely on the extension.
+ if (filename != '-' and file_extension != 'cc' and file_extension != 'h'
+ and file_extension != 'cpp'):
+ sys.stderr.write('Ignoring %s; not a .cc or .h file\n' % filename)
+ else:
+ ProcessFileData(filename, file_extension, lines, Error,
+ extra_check_functions)
+ if carriage_return_found and os.linesep != '\r\n':
+ # Use 0 for linenum since outputting only one error for potentially
+ # several lines.
+ Error(filename, 0, 'whitespace/newline', 1,
+ 'One or more unexpected \\r (^M) found;'
+ 'better to use only a \\n')
+
+ sys.stderr.write('Done processing %s\n' % filename)
+
+
+def PrintUsage(message):
+ """Prints a brief usage string and exits, optionally with an error message.
+
+ Args:
+ message: The optional error message.
+ """
+ sys.stderr.write(_USAGE)
+ if message:
+ sys.exit('\nFATAL ERROR: ' + message)
+ else:
+ sys.exit(1)
+
+
+def PrintCategories():
+ """Prints a list of all the error-categories used by error messages.
+
+ These are the categories used to filter messages via --filter.
+ """
+ sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
+ sys.exit(0)
+
+
+def ParseArguments(args):
+ """Parses the command line arguments.
+
+ This may set the output format and verbosity level as side-effects.
+
+ Args:
+ args: The command line arguments:
+
+ Returns:
+ The list of filenames to lint.
+ """
+ try:
+ (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
+ 'counting=',
+ 'filter=',
+ 'root='])
+ except getopt.GetoptError:
+ PrintUsage('Invalid arguments.')
+
+ verbosity = _VerboseLevel()
+ output_format = _OutputFormat()
+ filters = ''
+ counting_style = ''
+
+ for (opt, val) in opts:
+ if opt == '--help':
+ PrintUsage(None)
+ elif opt == '--output':
+ if not val in ('emacs', 'vs7'):
+ PrintUsage('The only allowed output formats are emacs and vs7.')
+ output_format = val
+ elif opt == '--verbose':
+ verbosity = int(val)
+ elif opt == '--filter':
+ filters = val
+ if not filters:
+ PrintCategories()
+ elif opt == '--counting':
+ if val not in ('total', 'toplevel', 'detailed'):
+ PrintUsage('Valid counting options are total, toplevel, and detailed')
+ counting_style = val
+ elif opt == '--root':
+ global _root
+ _root = val
+
+ if not filenames:
+ PrintUsage('No files were specified.')
+
+ _SetOutputFormat(output_format)
+ _SetVerboseLevel(verbosity)
+ _SetFilters(filters)
+ _SetCountingStyle(counting_style)
+
+ return filenames
+
+
+def main():
+ filenames = ParseArguments(sys.argv[1:])
+
+ # Change stderr to write with replacement characters so we don't die
+ # if we try to print something containing non-ASCII characters.
+ sys.stderr = codecs.StreamReaderWriter(sys.stderr,
+ codecs.getreader('utf8'),
+ codecs.getwriter('utf8'),
+ 'replace')
+
+ _cpplint_state.ResetErrorCounts()
+ for filename in filenames:
+ ProcessFile(filename, _cpplint_state.verbose_level)
+ _cpplint_state.PrintErrorCounts()
+
+ sys.exit(_cpplint_state.error_count > 0)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/libvpx/tools/diff.py b/libvpx/tools/diff.py
new file mode 100644
index 0000000..a42a4dc
--- /dev/null
+++ b/libvpx/tools/diff.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python
+## Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+##
+## Use of this source code is governed by a BSD-style license
+## that can be found in the LICENSE file in the root of the source
+## tree. An additional intellectual property rights grant can be found
+## in the file PATENTS. All contributing project authors may
+## be found in the AUTHORS file in the root of the source tree.
+##
+"""Classes for representing diff pieces."""
+
+__author__ = "jkoleszar@google.com"
+
+import re
+
+
+class DiffLines(object):
+ """A container for one half of a diff."""
+
+ def __init__(self, filename, offset, length):
+ self.filename = filename
+ self.offset = offset
+ self.length = length
+ self.lines = []
+ self.delta_line_nums = []
+
+ def Append(self, line):
+ l = len(self.lines)
+ if line[0] != " ":
+ self.delta_line_nums.append(self.offset + l)
+ self.lines.append(line[1:])
+ assert l+1 <= self.length
+
+ def Complete(self):
+ return len(self.lines) == self.length
+
+ def __contains__(self, item):
+ return item >= self.offset and item <= self.offset + self.length - 1
+
+
+class DiffHunk(object):
+ """A container for one diff hunk, consisting of two DiffLines."""
+
+ def __init__(self, header, file_a, file_b, start_a, len_a, start_b, len_b):
+ self.header = header
+ self.left = DiffLines(file_a, start_a, len_a)
+ self.right = DiffLines(file_b, start_b, len_b)
+ self.lines = []
+
+ def Append(self, line):
+ """Adds a line to the DiffHunk and its DiffLines children."""
+ if line[0] == "-":
+ self.left.Append(line)
+ elif line[0] == "+":
+ self.right.Append(line)
+ elif line[0] == " ":
+ self.left.Append(line)
+ self.right.Append(line)
+ else:
+ assert False, ("Unrecognized character at start of diff line "
+ "%r" % line[0])
+ self.lines.append(line)
+
+ def Complete(self):
+ return self.left.Complete() and self.right.Complete()
+
+ def __repr__(self):
+ return "DiffHunk(%s, %s, len %d)" % (
+ self.left.filename, self.right.filename,
+ max(self.left.length, self.right.length))
+
+
+def ParseDiffHunks(stream):
+ """Walk a file-like object, yielding DiffHunks as they're parsed."""
+
+ file_regex = re.compile(r"(\+\+\+|---) (\S+)")
+ range_regex = re.compile(r"@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))?")
+ hunk = None
+ while True:
+ line = stream.readline()
+ if not line:
+ break
+
+ if hunk is None:
+ # Parse file names
+ diff_file = file_regex.match(line)
+ if diff_file:
+ if line.startswith("---"):
+ a_line = line
+ a = diff_file.group(2)
+ continue
+ if line.startswith("+++"):
+ b_line = line
+ b = diff_file.group(2)
+ continue
+
+ # Parse offset/lengths
+ diffrange = range_regex.match(line)
+ if diffrange:
+ if diffrange.group(2):
+ start_a = int(diffrange.group(1))
+ len_a = int(diffrange.group(3))
+ else:
+ start_a = 1
+ len_a = int(diffrange.group(1))
+
+ if diffrange.group(5):
+ start_b = int(diffrange.group(4))
+ len_b = int(diffrange.group(6))
+ else:
+ start_b = 1
+ len_b = int(diffrange.group(4))
+
+ header = [a_line, b_line, line]
+ hunk = DiffHunk(header, a, b, start_a, len_a, start_b, len_b)
+ else:
+ # Add the current line to the hunk
+ hunk.Append(line)
+
+ # See if the whole hunk has been parsed. If so, yield it and prepare
+ # for the next hunk.
+ if hunk.Complete():
+ yield hunk
+ hunk = None
+
+ # Partial hunks are a parse error
+ assert hunk is None
diff --git a/libvpx/tools/ftfy.sh b/libvpx/tools/ftfy.sh
index c5cfdea..92059f5 100755
--- a/libvpx/tools/ftfy.sh
+++ b/libvpx/tools/ftfy.sh
@@ -29,12 +29,13 @@ log() {
vpx_style() {
- astyle --style=bsd --min-conditional-indent=0 --break-blocks \
- --pad-oper --pad-header --unpad-paren \
- --align-pointer=name \
- --indent-preprocessor --convert-tabs --indent-labels \
- --suffix=none --quiet "$@"
- sed -i "" 's/[[:space:]]\{1,\},/,/g' "$@"
+ for f; do
+ case "$f" in
+ *.h|*.c|*.cc)
+ "${dirname_self}"/vpx-astyle.sh "$f"
+ ;;
+ esac
+ done
}
@@ -119,8 +120,7 @@ cd "$(git rev-parse --show-toplevel)"
git show > "${ORIG_DIFF}"
# Apply the style guide on new and modified files and collect its diff
-for f in $(git diff HEAD^ --name-only -M90 --diff-filter=AM \
- | grep '\.[ch]$'); do
+for f in $(git diff HEAD^ --name-only -M90 --diff-filter=AM); do
case "$f" in
third_party/*) continue;;
nestegg/*) continue;;
diff --git a/libvpx/tools/intersect-diffs.py b/libvpx/tools/intersect-diffs.py
index be9dea5..4dbafa9 100755
--- a/libvpx/tools/intersect-diffs.py
+++ b/libvpx/tools/intersect-diffs.py
@@ -16,121 +16,9 @@ are relevant to A. The resulting file can be applied with patch(1) on top of A.
__author__ = "jkoleszar@google.com"
-import re
import sys
-
-class DiffLines(object):
- """A container for one half of a diff."""
-
- def __init__(self, filename, offset, length):
- self.filename = filename
- self.offset = offset
- self.length = length
- self.lines = []
- self.delta_line_nums = []
-
- def Append(self, line):
- l = len(self.lines)
- if line[0] != " ":
- self.delta_line_nums.append(self.offset + l)
- self.lines.append(line[1:])
- assert l+1 <= self.length
-
- def Complete(self):
- return len(self.lines) == self.length
-
- def __contains__(self, item):
- return item >= self.offset and item <= self.offset + self.length - 1
-
-
-class DiffHunk(object):
- """A container for one diff hunk, consisting of two DiffLines."""
-
- def __init__(self, header, file_a, file_b, start_a, len_a, start_b, len_b):
- self.header = header
- self.left = DiffLines(file_a, start_a, len_a)
- self.right = DiffLines(file_b, start_b, len_b)
- self.lines = []
-
- def Append(self, line):
- """Adds a line to the DiffHunk and its DiffLines children."""
- if line[0] == "-":
- self.left.Append(line)
- elif line[0] == "+":
- self.right.Append(line)
- elif line[0] == " ":
- self.left.Append(line)
- self.right.Append(line)
- else:
- assert False, ("Unrecognized character at start of diff line "
- "%r" % line[0])
- self.lines.append(line)
-
- def Complete(self):
- return self.left.Complete() and self.right.Complete()
-
- def __repr__(self):
- return "DiffHunk(%s, %s, len %d)" % (
- self.left.filename, self.right.filename,
- max(self.left.length, self.right.length))
-
-
-def ParseDiffHunks(stream):
- """Walk a file-like object, yielding DiffHunks as they're parsed."""
-
- file_regex = re.compile(r"(\+\+\+|---) (\S+)")
- range_regex = re.compile(r"@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))?")
- hunk = None
- while True:
- line = stream.readline()
- if not line:
- break
-
- if hunk is None:
- # Parse file names
- diff_file = file_regex.match(line)
- if diff_file:
- if line.startswith("---"):
- a_line = line
- a = diff_file.group(2)
- continue
- if line.startswith("+++"):
- b_line = line
- b = diff_file.group(2)
- continue
-
- # Parse offset/lengths
- diffrange = range_regex.match(line)
- if diffrange:
- if diffrange.group(2):
- start_a = int(diffrange.group(1))
- len_a = int(diffrange.group(3))
- else:
- start_a = 1
- len_a = int(diffrange.group(1))
-
- if diffrange.group(5):
- start_b = int(diffrange.group(4))
- len_b = int(diffrange.group(6))
- else:
- start_b = 1
- len_b = int(diffrange.group(4))
-
- header = [a_line, b_line, line]
- hunk = DiffHunk(header, a, b, start_a, len_a, start_b, len_b)
- else:
- # Add the current line to the hunk
- hunk.Append(line)
-
- # See if the whole hunk has been parsed. If so, yield it and prepare
- # for the next hunk.
- if hunk.Complete():
- yield hunk
- hunk = None
-
- # Partial hunks are a parse error
- assert hunk is None
+import diff
def FormatDiffHunks(hunks):
@@ -162,8 +50,8 @@ def ZipHunks(rhs_hunks, lhs_hunks):
def main():
- old_hunks = [x for x in ParseDiffHunks(open(sys.argv[1], "r"))]
- new_hunks = [x for x in ParseDiffHunks(open(sys.argv[2], "r"))]
+ old_hunks = [x for x in diff.ParseDiffHunks(open(sys.argv[1], "r"))]
+ new_hunks = [x for x in diff.ParseDiffHunks(open(sys.argv[2], "r"))]
out_hunks = []
# Join the right hand side of the older diff with the left hand side of the
diff --git a/libvpx/tools/lint-hunks.py b/libvpx/tools/lint-hunks.py
new file mode 100755
index 0000000..b15a691
--- /dev/null
+++ b/libvpx/tools/lint-hunks.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+## Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+##
+## Use of this source code is governed by a BSD-style license
+## that can be found in the LICENSE file in the root of the source
+## tree. An additional intellectual property rights grant can be found
+## in the file PATENTS. All contributing project authors may
+## be found in the AUTHORS file in the root of the source tree.
+##
+"""Performs style checking on each diff hunk."""
+import getopt
+import os
+import StringIO
+import subprocess
+import sys
+
+import diff
+
+
+SHORT_OPTIONS = "h"
+LONG_OPTIONS = ["help"]
+
+TOPLEVEL_CMD = ["git", "rev-parse", "--show-toplevel"]
+DIFF_CMD = ["git", "diff"]
+DIFF_INDEX_CMD = ["git", "diff-index", "-u", "HEAD", "--"]
+SHOW_CMD = ["git", "show"]
+CPPLINT_FILTERS = ["-readability/casting", "-runtime/int"]
+
+
+class Usage(Exception):
+ pass
+
+
+class SubprocessException(Exception):
+ def __init__(self, args):
+ msg = "Failed to execute '%s'"%(" ".join(args))
+ super(SubprocessException, self).__init__(msg)
+
+
+class Subprocess(subprocess.Popen):
+ """Adds the notion of an expected returncode to Popen."""
+
+ def __init__(self, args, expected_returncode=0, **kwargs):
+ self._args = args
+ self._expected_returncode = expected_returncode
+ super(Subprocess, self).__init__(args, **kwargs)
+
+ def communicate(self, *args, **kwargs):
+ result = super(Subprocess, self).communicate(*args, **kwargs)
+ if self._expected_returncode is not None:
+ try:
+ ok = self.returncode in self._expected_returncode
+ except TypeError:
+ ok = self.returncode == self._expected_returncode
+ if not ok:
+ raise SubprocessException(self._args)
+ return result
+
+
+def main(argv=None):
+ if argv is None:
+ argv = sys.argv
+ try:
+ try:
+ opts, args = getopt.getopt(argv[1:], SHORT_OPTIONS, LONG_OPTIONS)
+ except getopt.error, msg:
+ raise Usage(msg)
+
+ # process options
+ for o, _ in opts:
+ if o in ("-h", "--help"):
+ print __doc__
+ sys.exit(0)
+
+ if args and len(args) > 1:
+ print __doc__
+ sys.exit(0)
+
+ # Find the fully qualified path to the root of the tree
+ tl = Subprocess(TOPLEVEL_CMD, stdout=subprocess.PIPE)
+ tl = tl.communicate()[0].strip()
+
+ # See if we're working on the index or not.
+ if args:
+ diff_cmd = DIFF_CMD + [args[0] + "^!"]
+ else:
+ diff_cmd = DIFF_INDEX_CMD
+
+ # Build the command line to execute cpplint
+ cpplint_cmd = [os.path.join(tl, "tools", "cpplint.py"),
+ "--filter=" + ",".join(CPPLINT_FILTERS),
+ "-"]
+
+ # Get a list of all affected lines
+ file_affected_line_map = {}
+ p = Subprocess(diff_cmd, stdout=subprocess.PIPE)
+ stdout = p.communicate()[0]
+ for hunk in diff.ParseDiffHunks(StringIO.StringIO(stdout)):
+ filename = hunk.right.filename[2:]
+ if filename not in file_affected_line_map:
+ file_affected_line_map[filename] = set()
+ file_affected_line_map[filename].update(hunk.right.delta_line_nums)
+
+ # Run each affected file through cpplint
+ lint_failed = False
+ for filename, affected_lines in file_affected_line_map.iteritems():
+ if filename.split(".")[-1] not in ("c", "h", "cc"):
+ continue
+
+ if args:
+ # File contents come from git
+ show_cmd = SHOW_CMD + [args[0] + ":" + filename]
+ show = Subprocess(show_cmd, stdout=subprocess.PIPE)
+ lint = Subprocess(cpplint_cmd, expected_returncode=(0, 1),
+ stdin=show.stdout, stderr=subprocess.PIPE)
+ lint_out = lint.communicate()[1]
+ else:
+ # File contents come from the working tree
+ lint = Subprocess(cpplint_cmd, expected_returncode=(0, 1),
+ stdin=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdin = open(os.path.join(tl, filename)).read()
+ lint_out = lint.communicate(stdin)[1]
+
+ for line in lint_out.split("\n"):
+ fields = line.split(":")
+ if fields[0] != "-":
+ continue
+ warning_line_num = int(fields[1])
+ if warning_line_num in affected_lines:
+ print "%s:%d:%s"%(filename, warning_line_num,
+ ":".join(fields[2:]))
+ lint_failed = True
+
+ # Set exit code if any relevant lint errors seen
+ if lint_failed:
+ return 1
+
+ except Usage, err:
+ print >>sys.stderr, err
+ print >>sys.stderr, "for help use --help"
+ return 2
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/libvpx/tools/vpx-astyle.sh b/libvpx/tools/vpx-astyle.sh
new file mode 100755
index 0000000..6340426
--- /dev/null
+++ b/libvpx/tools/vpx-astyle.sh
@@ -0,0 +1,27 @@
+#!/bin/sh
+set -e
+astyle --style=java --indent=spaces=2 --indent-switches\
+ --min-conditional-indent=0 \
+ --pad-oper --pad-header --unpad-paren \
+ --align-pointer=name \
+ --indent-preprocessor --convert-tabs --indent-labels \
+ --suffix=none --quiet --max-instatement-indent=80 "$@"
+# Disabled, too greedy?
+#sed -i 's;[[:space:]]\{1,\}\[;[;g' "$@"
+
+sed_i() {
+ # Incompatible sed parameter parsing.
+ if sed -i 2>&1 | grep -q 'requires an argument'; then
+ sed -i '' "$@"
+ else
+ sed -i "$@"
+ fi
+}
+
+sed_i -e 's/[[:space:]]\{1,\}\([,;]\)/\1/g' \
+ -e 's/[[:space:]]\{1,\}\([+-]\{2\};\)/\1/g' \
+ -e 's/,[[:space:]]*}/}/g' \
+ -e 's;//\([^/[:space:]].*$\);// \1;g' \
+ -e 's/^\(public\|private\|protected\):$/ \1:/g' \
+ -e 's/[[:space:]]\{1,\}$//g' \
+ "$@"
diff --git a/libvpx/tools_common.c b/libvpx/tools_common.c
index 6f95028..92de794 100644
--- a/libvpx/tools_common.c
+++ b/libvpx/tools_common.c
@@ -20,11 +20,10 @@
#endif
#endif
-FILE* set_binary_mode(FILE *stream)
-{
- (void)stream;
+FILE *set_binary_mode(FILE *stream) {
+ (void)stream;
#if defined(_WIN32) || defined(__OS2__)
- _setmode(_fileno(stream), _O_BINARY);
+ _setmode(_fileno(stream), _O_BINARY);
#endif
- return stream;
+ return stream;
}
diff --git a/libvpx/tools_common.h b/libvpx/tools_common.h
index 80c9747..9e56149 100644
--- a/libvpx/tools_common.h
+++ b/libvpx/tools_common.h
@@ -11,6 +11,6 @@
#define TOOLS_COMMON_H
/* Sets a stdio stream into binary mode */
-FILE* set_binary_mode(FILE *stream);
+FILE *set_binary_mode(FILE *stream);
#endif
diff --git a/libvpx/vp8/common/arm/armv6/filter_v6.asm b/libvpx/vp8/common/arm/armv6/filter_v6.asm
index 1ba91dd..eb4b75b 100644
--- a/libvpx/vp8/common/arm/armv6/filter_v6.asm
+++ b/libvpx/vp8/common/arm/armv6/filter_v6.asm
@@ -394,7 +394,7 @@
mov r4, #0x40 ; rounding factor (for smlad{x})
|height_loop_2nd_4|
- ldrd r8, [r0, #-4] ; load the data
+ ldrd r8, r9, [r0, #-4] ; load the data
orr r7, r7, r3, lsr #1 ; loop counter
|width_loop_2nd_4|
diff --git a/libvpx/vp8/common/arm/armv6/idct_blk_v6.c b/libvpx/vp8/common/arm/armv6/idct_blk_v6.c
index 6002c0f..c94f84a 100644
--- a/libvpx/vp8/common/arm/armv6/idct_blk_v6.c
+++ b/libvpx/vp8/common/arm/armv6/idct_blk_v6.c
@@ -9,7 +9,7 @@
*/
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
void vp8_dequant_idct_add_y_block_v6(short *q, short *dq,
diff --git a/libvpx/vp8/common/arm/bilinearfilter_arm.c b/libvpx/vp8/common/arm/bilinearfilter_arm.c
index c63073c..799c8bd 100644
--- a/libvpx/vp8/common/arm/bilinearfilter_arm.c
+++ b/libvpx/vp8/common/arm/bilinearfilter_arm.c
@@ -9,7 +9,7 @@
*/
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include <math.h>
#include "vp8/common/filter.h"
#include "bilinearfilter_arm.h"
diff --git a/libvpx/vp8/common/arm/filter_arm.c b/libvpx/vp8/common/arm/filter_arm.c
index 148951a..7fe3967 100644
--- a/libvpx/vp8/common/arm/filter_arm.c
+++ b/libvpx/vp8/common/arm/filter_arm.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include <math.h>
#include "vp8/common/filter.h"
#include "vpx_ports/mem.h"
diff --git a/libvpx/vp8/common/arm/loopfilter_arm.c b/libvpx/vp8/common/arm/loopfilter_arm.c
index b8f9bd9..3bdc967 100644
--- a/libvpx/vp8/common/arm/loopfilter_arm.c
+++ b/libvpx/vp8/common/arm/loopfilter_arm.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vp8/common/loopfilter.h"
#include "vp8/common/onyxc_int.h"
diff --git a/libvpx/vp8/common/arm/neon/idct_blk_neon.c b/libvpx/vp8/common/arm/neon/idct_blk_neon.c
index ee7f223..fb327a7 100644
--- a/libvpx/vp8/common/arm/neon/idct_blk_neon.c
+++ b/libvpx/vp8/common/arm/neon/idct_blk_neon.c
@@ -9,7 +9,7 @@
*/
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
/* place these declarations here because we don't want to maintain them
* outside of this scope
diff --git a/libvpx/vp8/common/arm/neon/vp8_subpixelvariance16x16_neon.asm b/libvpx/vp8/common/arm/neon/vp8_subpixelvariance16x16_neon.asm
index e7a3ed1..9d22c52 100644
--- a/libvpx/vp8/common/arm/neon/vp8_subpixelvariance16x16_neon.asm
+++ b/libvpx/vp8/common/arm/neon/vp8_subpixelvariance16x16_neon.asm
@@ -9,9 +9,6 @@
;
-bilinear_taps_coeff
- DCD 128, 0, 112, 16, 96, 32, 80, 48, 64, 64, 48, 80, 32, 96, 16, 112
-
;-----------------
EXPORT |vp8_sub_pixel_variance16x16_neon_func|
@@ -29,6 +26,9 @@ bilinear_taps_coeff
; stack(r6) unsigned int *sse
;note: most of the code is copied from bilinear_predict16x16_neon and vp8_variance16x16_neon.
+bilinear_taps_coeff
+ DCD 128, 0, 112, 16, 96, 32, 80, 48, 64, 64, 48, 80, 32, 96, 16, 112
+
|vp8_sub_pixel_variance16x16_neon_func| PROC
push {r4-r6, lr}
diff --git a/libvpx/vp8/common/arm/reconintra_arm.c b/libvpx/vp8/common/arm/reconintra_arm.c
index 121e090..2874896 100644
--- a/libvpx/vp8/common/arm/reconintra_arm.c
+++ b/libvpx/vp8/common/arm/reconintra_arm.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vp8/common/blockd.h"
#include "vpx_mem/vpx_mem.h"
diff --git a/libvpx/vp8/common/arm/variance_arm.c b/libvpx/vp8/common/arm/variance_arm.c
index 891d767..467a509 100644
--- a/libvpx/vp8/common/arm/variance_arm.c
+++ b/libvpx/vp8/common/arm/variance_arm.c
@@ -9,7 +9,7 @@
*/
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vp8/common/variance.h"
#include "vp8/common/filter.h"
diff --git a/libvpx/vp8/common/dequantize.c b/libvpx/vp8/common/dequantize.c
index 8eda486..6e2f69a 100644
--- a/libvpx/vp8/common/dequantize.c
+++ b/libvpx/vp8/common/dequantize.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vp8/common/blockd.h"
#include "vpx_mem/vpx_mem.h"
diff --git a/libvpx/vp8/common/generic/systemdependent.c b/libvpx/vp8/common/generic/systemdependent.c
index 5a6ac7b..d84df33 100644
--- a/libvpx/vp8/common/generic/systemdependent.c
+++ b/libvpx/vp8/common/generic/systemdependent.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#if ARCH_ARM
#include "vpx_ports/arm.h"
#elif ARCH_X86 || ARCH_X86_64
@@ -82,6 +82,7 @@ static int get_cpu_count()
}
#endif
+void vp8_clear_system_state_c() {};
void vp8_machine_specific_config(VP8_COMMON *ctx)
{
diff --git a/libvpx/vp8/common/idct_blk.c b/libvpx/vp8/common/idct_blk.c
index 0b058c7..8edfffb 100644
--- a/libvpx/vp8/common/idct_blk.c
+++ b/libvpx/vp8/common/idct_blk.c
@@ -9,7 +9,7 @@
*/
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
void vp8_dequant_idct_add_c(short *input, short *dq,
unsigned char *dest, int stride);
diff --git a/libvpx/vp8/common/invtrans.h b/libvpx/vp8/common/invtrans.h
index d048665..9262640 100644
--- a/libvpx/vp8/common/invtrans.h
+++ b/libvpx/vp8/common/invtrans.h
@@ -13,7 +13,7 @@
#define __INC_INVTRANS_H
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "blockd.h"
#include "onyxc_int.h"
diff --git a/libvpx/vp8/common/loopfilter.c b/libvpx/vp8/common/loopfilter.c
index 41b4f12..19857a7 100644
--- a/libvpx/vp8/common/loopfilter.c
+++ b/libvpx/vp8/common/loopfilter.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "loopfilter.h"
#include "onyxc_int.h"
#include "vpx_mem/vpx_mem.h"
@@ -156,39 +156,38 @@ void vp8_loop_filter_frame_init(VP8_COMMON *cm,
continue;
}
- lvl_ref = lvl_seg;
-
/* INTRA_FRAME */
ref = INTRA_FRAME;
/* Apply delta for reference frame */
- lvl_ref += mbd->ref_lf_deltas[ref];
+ lvl_ref = lvl_seg + mbd->ref_lf_deltas[ref];
/* Apply delta for Intra modes */
mode = 0; /* B_PRED */
/* Only the split mode BPRED has a further special case */
- lvl_mode = lvl_ref + mbd->mode_lf_deltas[mode];
- lvl_mode = (lvl_mode > 0) ? (lvl_mode > 63 ? 63 : lvl_mode) : 0; /* clamp */
+ lvl_mode = lvl_ref + mbd->mode_lf_deltas[mode];
+ /* clamp */
+ lvl_mode = (lvl_mode > 0) ? (lvl_mode > 63 ? 63 : lvl_mode) : 0;
lfi->lvl[seg][ref][mode] = lvl_mode;
mode = 1; /* all the rest of Intra modes */
- lvl_mode = (lvl_ref > 0) ? (lvl_ref > 63 ? 63 : lvl_ref) : 0; /* clamp */
+ /* clamp */
+ lvl_mode = (lvl_ref > 0) ? (lvl_ref > 63 ? 63 : lvl_ref) : 0;
lfi->lvl[seg][ref][mode] = lvl_mode;
/* LAST, GOLDEN, ALT */
for(ref = 1; ref < MAX_REF_FRAMES; ref++)
{
- int lvl_ref = lvl_seg;
-
/* Apply delta for reference frame */
- lvl_ref += mbd->ref_lf_deltas[ref];
+ lvl_ref = lvl_seg + mbd->ref_lf_deltas[ref];
/* Apply delta for Inter modes */
for (mode = 1; mode < 4; mode++)
{
lvl_mode = lvl_ref + mbd->mode_lf_deltas[mode];
- lvl_mode = (lvl_mode > 0) ? (lvl_mode > 63 ? 63 : lvl_mode) : 0; /* clamp */
+ /* clamp */
+ lvl_mode = (lvl_mode > 0) ? (lvl_mode > 63 ? 63 : lvl_mode) : 0;
lfi->lvl[seg][ref][mode] = lvl_mode;
}
@@ -567,46 +566,28 @@ void vp8_loop_filter_partial_frame
int mb_cols = post->y_width >> 4;
int mb_rows = post->y_height >> 4;
- int linestocopy, i;
+ int linestocopy;
loop_filter_info_n *lfi_n = &cm->lf_info;
loop_filter_info lfi;
int filter_level;
- int alt_flt_enabled = mbd->segmentation_enabled;
FRAME_TYPE frame_type = cm->frame_type;
const MODE_INFO *mode_info_context;
- int lvl_seg[MAX_MB_SEGMENTS];
+#if 0
+ if(default_filt_lvl == 0) /* no filter applied */
+ return;
+#endif
+
+ /* Initialize the loop filter for this frame. */
+ vp8_loop_filter_frame_init( cm, mbd, default_filt_lvl);
/* number of MB rows to use in partial filtering */
linestocopy = mb_rows / PARTIAL_FRAME_FRACTION;
linestocopy = linestocopy ? linestocopy << 4 : 16; /* 16 lines per MB */
- /* Note the baseline filter values for each segment */
- /* See vp8_loop_filter_frame_init. Rather than call that for each change
- * to default_filt_lvl, copy the relevant calculation here.
- */
- if (alt_flt_enabled)
- {
- for (i = 0; i < MAX_MB_SEGMENTS; i++)
- { /* Abs value */
- if (mbd->mb_segement_abs_delta == SEGMENT_ABSDATA)
- {
- lvl_seg[i] = mbd->segment_feature_data[MB_LVL_ALT_LF][i];
- }
- /* Delta Value */
- else
- {
- lvl_seg[i] = default_filt_lvl
- + mbd->segment_feature_data[MB_LVL_ALT_LF][i];
- lvl_seg[i] = (lvl_seg[i] > 0) ?
- ((lvl_seg[i] > 63) ? 63: lvl_seg[i]) : 0;
- }
- }
- }
-
/* Set up the buffer pointers; partial image starts at ~middle of frame */
y_ptr = post->y_buffer + ((post->y_height >> 5) * 16) * post->y_stride;
mode_info_context = cm->mi + (post->y_height >> 5) * (mb_cols + 1);
@@ -620,10 +601,12 @@ void vp8_loop_filter_partial_frame
mode_info_context->mbmi.mode != SPLITMV &&
mode_info_context->mbmi.mb_skip_coeff);
- if (alt_flt_enabled)
- filter_level = lvl_seg[mode_info_context->mbmi.segment_id];
- else
- filter_level = default_filt_lvl;
+ const int mode_index =
+ lfi_n->mode_lf_lut[mode_info_context->mbmi.mode];
+ const int seg = mode_info_context->mbmi.segment_id;
+ const int ref_frame = mode_info_context->mbmi.ref_frame;
+
+ filter_level = lfi_n->lvl[seg][ref_frame][mode_index];
if (filter_level)
{
diff --git a/libvpx/vp8/common/loopfilter.h b/libvpx/vp8/common/loopfilter.h
index b3af2d6..1e47f34 100644
--- a/libvpx/vp8/common/loopfilter.h
+++ b/libvpx/vp8/common/loopfilter.h
@@ -14,7 +14,7 @@
#include "vpx_ports/mem.h"
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#define MAX_LOOP_FILTER 63
/* fraction of total macroblock rows to be used in fast filter level picking */
diff --git a/libvpx/vp8/common/loopfilter_filters.c b/libvpx/vp8/common/loopfilter_filters.c
index 8235f6e..1d51696 100644
--- a/libvpx/vp8/common/loopfilter_filters.c
+++ b/libvpx/vp8/common/loopfilter_filters.c
@@ -54,7 +54,7 @@ static void vp8_filter(signed char mask, uc hev, uc *op1,
{
signed char ps0, qs0;
signed char ps1, qs1;
- signed char vp8_filter, Filter1, Filter2;
+ signed char filter_value, Filter1, Filter2;
signed char u;
ps1 = (signed char) * op1 ^ 0x80;
@@ -63,35 +63,35 @@ static void vp8_filter(signed char mask, uc hev, uc *op1,
qs1 = (signed char) * oq1 ^ 0x80;
/* add outer taps if we have high edge variance */
- vp8_filter = vp8_signed_char_clamp(ps1 - qs1);
- vp8_filter &= hev;
+ filter_value = vp8_signed_char_clamp(ps1 - qs1);
+ filter_value &= hev;
/* inner taps */
- vp8_filter = vp8_signed_char_clamp(vp8_filter + 3 * (qs0 - ps0));
- vp8_filter &= mask;
+ filter_value = vp8_signed_char_clamp(filter_value + 3 * (qs0 - ps0));
+ filter_value &= mask;
/* save bottom 3 bits so that we round one side +4 and the other +3
* if it equals 4 we'll set to adjust by -1 to account for the fact
* we'd round 3 the other way
*/
- Filter1 = vp8_signed_char_clamp(vp8_filter + 4);
- Filter2 = vp8_signed_char_clamp(vp8_filter + 3);
+ Filter1 = vp8_signed_char_clamp(filter_value + 4);
+ Filter2 = vp8_signed_char_clamp(filter_value + 3);
Filter1 >>= 3;
Filter2 >>= 3;
u = vp8_signed_char_clamp(qs0 - Filter1);
*oq0 = u ^ 0x80;
u = vp8_signed_char_clamp(ps0 + Filter2);
*op0 = u ^ 0x80;
- vp8_filter = Filter1;
+ filter_value = Filter1;
/* outer tap adjustments */
- vp8_filter += 1;
- vp8_filter >>= 1;
- vp8_filter &= ~hev;
+ filter_value += 1;
+ filter_value >>= 1;
+ filter_value &= ~hev;
- u = vp8_signed_char_clamp(qs1 - vp8_filter);
+ u = vp8_signed_char_clamp(qs1 - filter_value);
*oq1 = u ^ 0x80;
- u = vp8_signed_char_clamp(ps1 + vp8_filter);
+ u = vp8_signed_char_clamp(ps1 + filter_value);
*op1 = u ^ 0x80;
}
@@ -162,7 +162,7 @@ static void vp8_mbfilter(signed char mask, uc hev,
uc *op2, uc *op1, uc *op0, uc *oq0, uc *oq1, uc *oq2)
{
signed char s, u;
- signed char vp8_filter, Filter1, Filter2;
+ signed char filter_value, Filter1, Filter2;
signed char ps2 = (signed char) * op2 ^ 0x80;
signed char ps1 = (signed char) * op1 ^ 0x80;
signed char ps0 = (signed char) * op0 ^ 0x80;
@@ -171,11 +171,11 @@ static void vp8_mbfilter(signed char mask, uc hev,
signed char qs2 = (signed char) * oq2 ^ 0x80;
/* add outer taps if we have high edge variance */
- vp8_filter = vp8_signed_char_clamp(ps1 - qs1);
- vp8_filter = vp8_signed_char_clamp(vp8_filter + 3 * (qs0 - ps0));
- vp8_filter &= mask;
+ filter_value = vp8_signed_char_clamp(ps1 - qs1);
+ filter_value = vp8_signed_char_clamp(filter_value + 3 * (qs0 - ps0));
+ filter_value &= mask;
- Filter2 = vp8_filter;
+ Filter2 = filter_value;
Filter2 &= hev;
/* save bottom 3 bits so that we round one side +4 and the other +3 */
@@ -188,8 +188,8 @@ static void vp8_mbfilter(signed char mask, uc hev,
/* only apply wider filter if not high edge variance */
- vp8_filter &= ~hev;
- Filter2 = vp8_filter;
+ filter_value &= ~hev;
+ Filter2 = filter_value;
/* roughly 3/7th difference across boundary */
u = vp8_signed_char_clamp((63 + Filter2 * 27) >> 7);
@@ -291,24 +291,24 @@ static signed char vp8_simple_filter_mask(uc blimit, uc p1, uc p0, uc q0, uc q1)
static void vp8_simple_filter(signed char mask, uc *op1, uc *op0, uc *oq0, uc *oq1)
{
- signed char vp8_filter, Filter1, Filter2;
+ signed char filter_value, Filter1, Filter2;
signed char p1 = (signed char) * op1 ^ 0x80;
signed char p0 = (signed char) * op0 ^ 0x80;
signed char q0 = (signed char) * oq0 ^ 0x80;
signed char q1 = (signed char) * oq1 ^ 0x80;
signed char u;
- vp8_filter = vp8_signed_char_clamp(p1 - q1);
- vp8_filter = vp8_signed_char_clamp(vp8_filter + 3 * (q0 - p0));
- vp8_filter &= mask;
+ filter_value = vp8_signed_char_clamp(p1 - q1);
+ filter_value = vp8_signed_char_clamp(filter_value + 3 * (q0 - p0));
+ filter_value &= mask;
/* save bottom 3 bits so that we round one side +4 and the other +3 */
- Filter1 = vp8_signed_char_clamp(vp8_filter + 4);
+ Filter1 = vp8_signed_char_clamp(filter_value + 4);
Filter1 >>= 3;
u = vp8_signed_char_clamp(q0 - Filter1);
*oq0 = u ^ 0x80;
- Filter2 = vp8_signed_char_clamp(vp8_filter + 3);
+ Filter2 = vp8_signed_char_clamp(filter_value + 3);
Filter2 >>= 3;
u = vp8_signed_char_clamp(p0 + Filter2);
*op0 = u ^ 0x80;
diff --git a/libvpx/vp8/common/mfqe.c b/libvpx/vp8/common/mfqe.c
index 3dff150..0693326 100644
--- a/libvpx/vp8/common/mfqe.c
+++ b/libvpx/vp8/common/mfqe.c
@@ -20,7 +20,7 @@
#include "postproc.h"
#include "variance.h"
#include "vpx_mem/vpx_mem.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx_scale/yv12config.h"
#include <limits.h>
@@ -280,7 +280,7 @@ void vp8_multiframe_quality_enhance
FRAME_TYPE frame_type = cm->frame_type;
/* Point at base of Mb MODE_INFO list has motion vectors etc */
- const MODE_INFO *mode_info_context = cm->mi;
+ const MODE_INFO *mode_info_context = cm->show_frame_mi;
int mb_row;
int mb_col;
int totmap, map[4];
diff --git a/libvpx/vp8/common/mips/dspr2/dequantize_dspr2.c b/libvpx/vp8/common/mips/dspr2/dequantize_dspr2.c
index 6823325..619ee80 100644
--- a/libvpx/vp8/common/mips/dspr2/dequantize_dspr2.c
+++ b/libvpx/vp8/common/mips/dspr2/dequantize_dspr2.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx_mem/vpx_mem.h"
#if HAVE_DSPR2
diff --git a/libvpx/vp8/common/mips/dspr2/filter_dspr2.c b/libvpx/vp8/common/mips/dspr2/filter_dspr2.c
index 71fdcd7..ace5d40 100644
--- a/libvpx/vp8/common/mips/dspr2/filter_dspr2.c
+++ b/libvpx/vp8/common/mips/dspr2/filter_dspr2.c
@@ -10,7 +10,7 @@
#include <stdlib.h>
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx_ports/mem.h"
#if HAVE_DSPR2
diff --git a/libvpx/vp8/common/mips/dspr2/idct_blk_dspr2.c b/libvpx/vp8/common/mips/dspr2/idct_blk_dspr2.c
index 1e0ebd1..ab938cd 100644
--- a/libvpx/vp8/common/mips/dspr2/idct_blk_dspr2.c
+++ b/libvpx/vp8/common/mips/dspr2/idct_blk_dspr2.c
@@ -9,7 +9,7 @@
*/
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#if HAVE_DSPR2
diff --git a/libvpx/vp8/common/mips/dspr2/idctllm_dspr2.c b/libvpx/vp8/common/mips/dspr2/idctllm_dspr2.c
index 25b7936..2eff710 100644
--- a/libvpx/vp8/common/mips/dspr2/idctllm_dspr2.c
+++ b/libvpx/vp8/common/mips/dspr2/idctllm_dspr2.c
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#if HAVE_DSPR2
#define CROP_WIDTH 256
diff --git a/libvpx/vp8/common/mips/dspr2/loopfilter_filters_dspr2.c b/libvpx/vp8/common/mips/dspr2/loopfilter_filters_dspr2.c
index b8e5e4d..9ae6bc8 100644
--- a/libvpx/vp8/common/mips/dspr2/loopfilter_filters_dspr2.c
+++ b/libvpx/vp8/common/mips/dspr2/loopfilter_filters_dspr2.c
@@ -10,7 +10,7 @@
#include <stdlib.h>
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vp8/common/onyxc_int.h"
#if HAVE_DSPR2
diff --git a/libvpx/vp8/common/mips/dspr2/reconinter_dspr2.c b/libvpx/vp8/common/mips/dspr2/reconinter_dspr2.c
index a5239a3..a14b397 100644
--- a/libvpx/vp8/common/mips/dspr2/reconinter_dspr2.c
+++ b/libvpx/vp8/common/mips/dspr2/reconinter_dspr2.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx/vpx_integer.h"
#if HAVE_DSPR2
diff --git a/libvpx/vp8/common/onyxc_int.h b/libvpx/vp8/common/onyxc_int.h
index 5325bac..276dd72 100644
--- a/libvpx/vp8/common/onyxc_int.h
+++ b/libvpx/vp8/common/onyxc_int.h
@@ -13,7 +13,7 @@
#define __INC_VP8C_INT_H
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx/internal/vpx_codec_internal.h"
#include "loopfilter.h"
#include "entropymv.h"
@@ -127,7 +127,8 @@ typedef struct VP8Common
MODE_INFO *prev_mip; /* MODE_INFO array 'mip' from last decoded frame */
MODE_INFO *prev_mi; /* 'mi' from last frame (points into prev_mip) */
#endif
-
+ MODE_INFO *show_frame_mi; /* MODE_INFO for the last decoded frame
+ to show */
LOOPFILTERTYPE filter_type;
loop_filter_info_n lf_info;
diff --git a/libvpx/vp8/common/onyxd.h b/libvpx/vp8/common/onyxd.h
index fd7e051..97c81c1 100644
--- a/libvpx/vp8/common/onyxd.h
+++ b/libvpx/vp8/common/onyxd.h
@@ -34,7 +34,6 @@ extern "C"
int postprocess;
int max_threads;
int error_concealment;
- int input_fragments;
} VP8D_CONFIG;
typedef enum
@@ -56,10 +55,6 @@ extern "C"
vpx_codec_err_t vp8dx_get_reference(struct VP8D_COMP* comp, enum vpx_ref_frame_type ref_frame_flag, YV12_BUFFER_CONFIG *sd);
vpx_codec_err_t vp8dx_set_reference(struct VP8D_COMP* comp, enum vpx_ref_frame_type ref_frame_flag, YV12_BUFFER_CONFIG *sd);
- struct VP8D_COMP* vp8dx_create_decompressor(VP8D_CONFIG *oxcf);
-
- void vp8dx_remove_decompressor(struct VP8D_COMP* comp);
-
#ifdef __cplusplus
}
#endif
diff --git a/libvpx/vp8/common/postproc.c b/libvpx/vp8/common/postproc.c
index 80fa530..0266f4c 100644
--- a/libvpx/vp8/common/postproc.c
+++ b/libvpx/vp8/common/postproc.c
@@ -10,11 +10,12 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
+#include "vpx_scale_rtcd.h"
#include "vpx_scale/yv12config.h"
#include "postproc.h"
#include "common.h"
-#include "vpx_scale/vpxscale.h"
+#include "vpx_scale/vpx_scale.h"
#include "systemdependent.h"
#include <limits.h>
@@ -333,7 +334,7 @@ void vp8_deblock(VP8_COMMON *cm,
double level = 6.0e-05 * q * q * q - .0067 * q * q + .306 * q + .0065;
int ppl = (int)(level + .5);
- const MODE_INFO *mode_info_context = cm->mi;
+ const MODE_INFO *mode_info_context = cm->show_frame_mi;
int mbr, mbc;
/* The pixel thresholds are adjusted according to if or not the macroblock
@@ -438,29 +439,28 @@ static void fillrd(struct postproc_state *state, int q, int a)
char char_dist[300];
double sigma;
- int ai = a, qi = q, i;
+ int i;
vp8_clear_system_state();
- sigma = ai + .5 + .6 * (63 - qi) / 63.0;
+ sigma = a + .5 + .6 * (63 - q) / 63.0;
/* set up a lookup table of 256 entries that matches
* a gaussian distribution with sigma determined by q.
*/
{
- double i;
int next, j;
next = 0;
for (i = -32; i < 32; i++)
{
- int a = (int)(.5 + 256 * vp8_gaussian(sigma, 0, i));
+ const int v = (int)(.5 + 256 * vp8_gaussian(sigma, 0, i));
- if (a)
+ if (v)
{
- for (j = 0; j < a; j++)
+ for (j = 0; j < v; j++)
{
char_dist[next+j] = (char) i;
}
@@ -543,12 +543,12 @@ void vp8_plane_add_noise_c(unsigned char *Start, char *noise,
* filled with the same color block.
*/
void vp8_blend_mb_inner_c (unsigned char *y, unsigned char *u, unsigned char *v,
- int y1, int u1, int v1, int alpha, int stride)
+ int y_1, int u_1, int v_1, int alpha, int stride)
{
int i, j;
- int y1_const = y1*((1<<16)-alpha);
- int u1_const = u1*((1<<16)-alpha);
- int v1_const = v1*((1<<16)-alpha);
+ int y1_const = y_1*((1<<16)-alpha);
+ int u1_const = u_1*((1<<16)-alpha);
+ int v1_const = v_1*((1<<16)-alpha);
y += 2*stride + 2;
for (i = 0; i < 12; i++)
@@ -581,12 +581,12 @@ void vp8_blend_mb_inner_c (unsigned char *y, unsigned char *u, unsigned char *v,
* unblended to allow for other visualizations to be layered.
*/
void vp8_blend_mb_outer_c (unsigned char *y, unsigned char *u, unsigned char *v,
- int y1, int u1, int v1, int alpha, int stride)
+ int y_1, int u_1, int v_1, int alpha, int stride)
{
int i, j;
- int y1_const = y1*((1<<16)-alpha);
- int u1_const = u1*((1<<16)-alpha);
- int v1_const = v1*((1<<16)-alpha);
+ int y1_const = y_1*((1<<16)-alpha);
+ int u1_const = u_1*((1<<16)-alpha);
+ int v1_const = v_1*((1<<16)-alpha);
for (i = 0; i < 2; i++)
{
@@ -645,12 +645,12 @@ void vp8_blend_mb_outer_c (unsigned char *y, unsigned char *u, unsigned char *v,
}
void vp8_blend_b_c (unsigned char *y, unsigned char *u, unsigned char *v,
- int y1, int u1, int v1, int alpha, int stride)
+ int y_1, int u_1, int v_1, int alpha, int stride)
{
int i, j;
- int y1_const = y1*((1<<16)-alpha);
- int u1_const = u1*((1<<16)-alpha);
- int v1_const = v1*((1<<16)-alpha);
+ int y1_const = y_1*((1<<16)-alpha);
+ int u1_const = u_1*((1<<16)-alpha);
+ int v1_const = v_1*((1<<16)-alpha);
for (i = 0; i < 4; i++)
{
@@ -675,46 +675,46 @@ void vp8_blend_b_c (unsigned char *y, unsigned char *u, unsigned char *v,
}
}
-static void constrain_line (int x0, int *x1, int y0, int *y1, int width, int height)
+static void constrain_line (int x_0, int *x_1, int y_0, int *y_1, int width, int height)
{
int dx;
int dy;
- if (*x1 > width)
+ if (*x_1 > width)
{
- dx = *x1 - x0;
- dy = *y1 - y0;
+ dx = *x_1 - x_0;
+ dy = *y_1 - y_0;
- *x1 = width;
+ *x_1 = width;
if (dx)
- *y1 = ((width-x0)*dy)/dx + y0;
+ *y_1 = ((width-x_0)*dy)/dx + y_0;
}
- if (*x1 < 0)
+ if (*x_1 < 0)
{
- dx = *x1 - x0;
- dy = *y1 - y0;
+ dx = *x_1 - x_0;
+ dy = *y_1 - y_0;
- *x1 = 0;
+ *x_1 = 0;
if (dx)
- *y1 = ((0-x0)*dy)/dx + y0;
+ *y_1 = ((0-x_0)*dy)/dx + y_0;
}
- if (*y1 > height)
+ if (*y_1 > height)
{
- dx = *x1 - x0;
- dy = *y1 - y0;
+ dx = *x_1 - x_0;
+ dy = *y_1 - y_0;
- *y1 = height;
+ *y_1 = height;
if (dy)
- *x1 = ((height-y0)*dx)/dy + x0;
+ *x_1 = ((height-y_0)*dx)/dy + x_0;
}
- if (*y1 < 0)
+ if (*y_1 < 0)
{
- dx = *x1 - x0;
- dy = *y1 - y0;
+ dx = *x_1 - x_0;
+ dy = *y_1 - y_0;
- *y1 = 0;
+ *y_1 = 0;
if (dy)
- *x1 = ((0-y0)*dx)/dy + x0;
+ *x_1 = ((0-y_0)*dx)/dy + x_0;
}
}
diff --git a/libvpx/vp8/common/ppc/systemdependent.c b/libvpx/vp8/common/ppc/systemdependent.c
index 87f4cac..6899c0e 100644
--- a/libvpx/vp8/common/ppc/systemdependent.c
+++ b/libvpx/vp8/common/ppc/systemdependent.c
@@ -12,13 +12,8 @@
#include "subpixel.h"
#include "loopfilter.h"
#include "recon.h"
-#include "idct.h"
#include "onyxc_int.h"
-void (*vp8_short_idct4x4)(short *input, short *output, int pitch);
-void (*vp8_short_idct4x4_1)(short *input, short *output, int pitch);
-void (*vp8_dc_only_idct)(short input_dc, short *output, int pitch);
-
extern void (*vp8_post_proc_down_and_across_mb_row)(
unsigned char *src_ptr,
unsigned char *dst_ptr,
diff --git a/libvpx/vp8/common/reconinter.c b/libvpx/vp8/common/reconinter.c
index 3da3bc7..43f84d0 100644
--- a/libvpx/vp8/common/reconinter.c
+++ b/libvpx/vp8/common/reconinter.c
@@ -11,7 +11,7 @@
#include <limits.h>
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx/vpx_integer.h"
#include "blockd.h"
#include "reconinter.h"
diff --git a/libvpx/vp8/common/reconintra.c b/libvpx/vp8/common/reconintra.c
index 4067a68..ec51ffe 100644
--- a/libvpx/vp8/common/reconintra.c
+++ b/libvpx/vp8/common/reconintra.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx_mem/vpx_mem.h"
#include "blockd.h"
@@ -36,7 +36,6 @@ void vp8_build_intra_predictors_mby_s_c(MACROBLOCKD *x,
case DC_PRED:
{
int expected_dc;
- int i;
int shift;
int average = 0;
@@ -168,7 +167,6 @@ void vp8_build_intra_predictors_mbuv_s_c(MACROBLOCKD *x,
{
int expected_udc;
int expected_vdc;
- int i;
int shift;
int Uaverage = 0;
int Vaverage = 0;
@@ -217,8 +215,6 @@ void vp8_build_intra_predictors_mbuv_s_c(MACROBLOCKD *x,
break;
case V_PRED:
{
- int i;
-
for (i = 0; i < 8; i++)
{
vpx_memcpy(upred_ptr, uabove_row, 8);
@@ -231,8 +227,6 @@ void vp8_build_intra_predictors_mbuv_s_c(MACROBLOCKD *x,
break;
case H_PRED:
{
- int i;
-
for (i = 0; i < 8; i++)
{
vpx_memset(upred_ptr, uleft_col[i], 8);
@@ -245,8 +239,6 @@ void vp8_build_intra_predictors_mbuv_s_c(MACROBLOCKD *x,
break;
case TM_PRED:
{
- int i;
-
for (i = 0; i < 8; i++)
{
for (j = 0; j < 8; j++)
diff --git a/libvpx/vp8/common/reconintra4x4.c b/libvpx/vp8/common/reconintra4x4.c
index 7bb8d0a..3d4f2c4 100644
--- a/libvpx/vp8/common/reconintra4x4.c
+++ b/libvpx/vp8/common/reconintra4x4.c
@@ -10,17 +10,17 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "blockd.h"
void vp8_intra4x4_predict_c(unsigned char *Above,
unsigned char *yleft, int left_stride,
- B_PREDICTION_MODE b_mode,
+ int _b_mode,
unsigned char *dst, int dst_stride,
unsigned char top_left)
{
int i, r, c;
-
+ B_PREDICTION_MODE b_mode = (B_PREDICTION_MODE)_b_mode;
unsigned char Left[4];
Left[0] = yleft[0];
Left[1] = yleft[left_stride];
diff --git a/libvpx/vp8/common/rtcd.c b/libvpx/vp8/common/rtcd.c
index 01dad46..0b371b0 100644
--- a/libvpx/vp8/common/rtcd.c
+++ b/libvpx/vp8/common/rtcd.c
@@ -9,97 +9,13 @@
*/
#include "vpx_config.h"
#define RTCD_C
-#include "vpx_rtcd.h"
-
-#if CONFIG_MULTITHREAD && defined(_WIN32)
-#include <windows.h>
-#include <stdlib.h>
-static void once(void (*func)(void))
-{
- static CRITICAL_SECTION *lock;
- static LONG waiters;
- static int done;
- void *lock_ptr = &lock;
-
- /* If the initialization is complete, return early. This isn't just an
- * optimization, it prevents races on the destruction of the global
- * lock.
- */
- if(done)
- return;
-
- InterlockedIncrement(&waiters);
-
- /* Get a lock. We create one and try to make it the one-true-lock,
- * throwing it away if we lost the race.
- */
-
- {
- /* Scope to protect access to new_lock */
- CRITICAL_SECTION *new_lock = malloc(sizeof(CRITICAL_SECTION));
- InitializeCriticalSection(new_lock);
- if (InterlockedCompareExchangePointer(lock_ptr, new_lock, NULL) != NULL)
- {
- DeleteCriticalSection(new_lock);
- free(new_lock);
- }
- }
-
- /* At this point, we have a lock that can be synchronized on. We don't
- * care which thread actually performed the allocation.
- */
-
- EnterCriticalSection(lock);
-
- if (!done)
- {
- func();
- done = 1;
- }
-
- LeaveCriticalSection(lock);
-
- /* Last one out should free resources. The destructed objects are
- * protected by checking if(done) above.
- */
- if(!InterlockedDecrement(&waiters))
- {
- DeleteCriticalSection(lock);
- free(lock);
- lock = NULL;
- }
-}
-
-
-#elif CONFIG_MULTITHREAD && HAVE_PTHREAD_H
-#include <pthread.h>
-static void once(void (*func)(void))
-{
- static pthread_once_t lock = PTHREAD_ONCE_INIT;
- pthread_once(&lock, func);
-}
-
-
-#else
-/* No-op version that performs no synchronization. vpx_rtcd() is idempotent,
- * so as long as your platform provides atomic loads/stores of pointers
- * no synchronization is strictly necessary.
- */
-
-static void once(void (*func)(void))
-{
- static int done;
-
- if(!done)
- {
- func();
- done = 1;
- }
-}
-#endif
+#include "vp8_rtcd.h"
+#include "vpx_ports/vpx_once.h"
+extern void vpx_scale_rtcd(void);
-void vpx_rtcd()
+void vp8_rtcd()
{
+ vpx_scale_rtcd();
once(setup_rtcd_internal);
}
diff --git a/libvpx/vp8/common/rtcd_defs.sh b/libvpx/vp8/common/rtcd_defs.sh
index 0f950f8..9ebf389 100644
--- a/libvpx/vp8/common/rtcd_defs.sh
+++ b/libvpx/vp8/common/rtcd_defs.sh
@@ -1,6 +1,8 @@
-common_forward_decls() {
+vp8_common_forward_decls() {
cat <<EOF
-#include "vp8/common/blockd.h"
+/*
+ * VP8
+ */
struct blockd;
struct macroblockd;
@@ -14,7 +16,14 @@ union int_mv;
struct yv12_buffer_config;
EOF
}
-forward_decls common_forward_decls
+forward_decls vp8_common_forward_decls
+
+#
+# system state
+#
+prototype void vp8_clear_system_state ""
+specialize vp8_clear_system_state mmx
+vp8_clear_system_state_mmx=vpx_reset_mmx_state
#
# Dequant
@@ -146,7 +155,7 @@ specialize vp8_build_intra_predictors_mby_s sse2 ssse3
prototype void vp8_build_intra_predictors_mbuv_s "struct macroblockd *x, unsigned char * uabove_row, unsigned char * vabove_row, unsigned char *uleft, unsigned char *vleft, int left_stride, unsigned char * upred_ptr, unsigned char * vpred_ptr, int pred_stride"
specialize vp8_build_intra_predictors_mbuv_s sse2 ssse3
-prototype void vp8_intra4x4_predict "unsigned char *Above, unsigned char *yleft, int left_stride, B_PREDICTION_MODE b_mode, unsigned char *dst, int dst_stride, unsigned char top_left"
+prototype void vp8_intra4x4_predict "unsigned char *Above, unsigned char *yleft, int left_stride, int b_mode, unsigned char *dst, int dst_stride, unsigned char top_left"
specialize vp8_intra4x4_predict media
vp8_intra4x4_predict_media=vp8_intra4x4_predict_armv6
@@ -442,8 +451,9 @@ vp8_short_walsh4x4_media=vp8_short_walsh4x4_armv6
# Quantizer
#
prototype void vp8_regular_quantize_b "struct block *, struct blockd *"
-specialize vp8_regular_quantize_b sse2 sse4_1
-vp8_regular_quantize_b_sse4_1=vp8_regular_quantize_b_sse4
+specialize vp8_regular_quantize_b sse2 #sse4_1
+# TODO(johann) Update sse4 implementation and re-enable
+#vp8_regular_quantize_b_sse4_1=vp8_regular_quantize_b_sse4
prototype void vp8_fast_quantize_b "struct block *, struct blockd *"
specialize vp8_fast_quantize_b sse2 ssse3 media neon
@@ -530,39 +540,3 @@ fi
# End of encoder only functions
fi
-
-# Scaler functions
-if [ "CONFIG_SPATIAL_RESAMPLING" != "yes" ]; then
- prototype void vp8_horizontal_line_4_5_scale "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width"
- prototype void vp8_vertical_band_4_5_scale "unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_last_vertical_band_4_5_scale "unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_horizontal_line_2_3_scale "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width"
- prototype void vp8_vertical_band_2_3_scale "unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_last_vertical_band_2_3_scale "unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_horizontal_line_3_5_scale "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width"
- prototype void vp8_vertical_band_3_5_scale "unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_last_vertical_band_3_5_scale "unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_horizontal_line_3_4_scale "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width"
- prototype void vp8_vertical_band_3_4_scale "unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_last_vertical_band_3_4_scale "unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_horizontal_line_1_2_scale "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width"
- prototype void vp8_vertical_band_1_2_scale "unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_last_vertical_band_1_2_scale "unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_horizontal_line_5_4_scale "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width"
- prototype void vp8_vertical_band_5_4_scale "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_horizontal_line_5_3_scale "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width"
- prototype void vp8_vertical_band_5_3_scale "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_horizontal_line_2_1_scale "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width"
- prototype void vp8_vertical_band_2_1_scale "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
- prototype void vp8_vertical_band_2_1_scale_i "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
-fi
-
-prototype void vp8_yv12_extend_frame_borders "struct yv12_buffer_config *ybf"
-specialize vp8_yv12_extend_frame_borders neon
-
-prototype void vp8_yv12_copy_frame "struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc"
-specialize vp8_yv12_copy_frame neon
-
-prototype void vp8_yv12_copy_y "struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc"
-specialize vp8_yv12_copy_y neon
-
diff --git a/libvpx/vp8/common/systemdependent.h b/libvpx/vp8/common/systemdependent.h
index f99c4bb..e6b0456 100644
--- a/libvpx/vp8/common/systemdependent.h
+++ b/libvpx/vp8/common/systemdependent.h
@@ -10,12 +10,6 @@
#include "vpx_config.h"
-#if ARCH_X86 || ARCH_X86_64
-void vpx_reset_mmx_state(void);
-#define vp8_clear_system_state() vpx_reset_mmx_state()
-#else
-#define vp8_clear_system_state()
-#endif
struct VP8Common;
void vp8_machine_specific_config(struct VP8Common *);
diff --git a/libvpx/vp8/common/variance_c.c b/libvpx/vp8/common/variance_c.c
index da08aff..773b655 100644
--- a/libvpx/vp8/common/variance_c.c
+++ b/libvpx/vp8/common/variance_c.c
@@ -75,7 +75,7 @@ unsigned int vp8_variance16x16_c(
variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 16, &var, &avg);
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 8));
+ return (var - (((unsigned int)avg * avg) >> 8));
}
unsigned int vp8_variance8x16_c(
@@ -91,7 +91,7 @@ unsigned int vp8_variance8x16_c(
variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 16, &var, &avg);
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 7));
+ return (var - (((unsigned int)avg * avg) >> 7));
}
unsigned int vp8_variance16x8_c(
@@ -107,7 +107,7 @@ unsigned int vp8_variance16x8_c(
variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 8, &var, &avg);
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 7));
+ return (var - (((unsigned int)avg * avg) >> 7));
}
@@ -124,7 +124,7 @@ unsigned int vp8_variance8x8_c(
variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 8, &var, &avg);
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 6));
+ return (var - (((unsigned int)avg * avg) >> 6));
}
unsigned int vp8_variance4x4_c(
@@ -140,7 +140,7 @@ unsigned int vp8_variance4x4_c(
variance(src_ptr, source_stride, ref_ptr, recon_stride, 4, 4, &var, &avg);
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 4));
+ return (var - (((unsigned int)avg * avg) >> 4));
}
diff --git a/libvpx/vp8/common/vp8_asm_com_offsets.c b/libvpx/vp8/common/vp8_asm_com_offsets.c
new file mode 100644
index 0000000..7bab90f
--- /dev/null
+++ b/libvpx/vp8/common/vp8_asm_com_offsets.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vpx_config.h"
+#include "vpx/vpx_codec.h"
+#include "vpx_ports/asm_offsets.h"
+#include "vp8/common/blockd.h"
+
+#if CONFIG_POSTPROC
+#include "postproc.h"
+#endif /* CONFIG_POSTPROC */
+
+BEGIN
+
+#if CONFIG_POSTPROC
+/* mfqe.c / filter_by_weight */
+DEFINE(MFQE_PRECISION_VAL, MFQE_PRECISION);
+#endif /* CONFIG_POSTPROC */
+
+END
+
+/* add asserts for any offset that is not supported by assembly code */
+/* add asserts for any size that is not supported by assembly code */
+
+#if HAVE_MEDIA
+/* switch case in vp8_intra4x4_predict_armv6 is based on these enumerated values */
+ct_assert(B_DC_PRED, B_DC_PRED == 0);
+ct_assert(B_TM_PRED, B_TM_PRED == 1);
+ct_assert(B_VE_PRED, B_VE_PRED == 2);
+ct_assert(B_HE_PRED, B_HE_PRED == 3);
+ct_assert(B_LD_PRED, B_LD_PRED == 4);
+ct_assert(B_RD_PRED, B_RD_PRED == 5);
+ct_assert(B_VR_PRED, B_VR_PRED == 6);
+ct_assert(B_VL_PRED, B_VL_PRED == 7);
+ct_assert(B_HD_PRED, B_HD_PRED == 8);
+ct_assert(B_HU_PRED, B_HU_PRED == 9);
+#endif
+
+#if HAVE_SSE2
+#if CONFIG_POSTPROC
+/* vp8_filter_by_weight16x16 and 8x8 */
+ct_assert(MFQE_PRECISION_VAL, MFQE_PRECISION == 4)
+#endif /* CONFIG_POSTPROC */
+#endif /* HAVE_SSE2 */
diff --git a/libvpx/vp8/common/x86/idct_blk_mmx.c b/libvpx/vp8/common/x86/idct_blk_mmx.c
index 4adf3f5..49b2013 100644
--- a/libvpx/vp8/common/x86/idct_blk_mmx.c
+++ b/libvpx/vp8/common/x86/idct_blk_mmx.c
@@ -9,7 +9,7 @@
*/
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vp8/common/blockd.h"
extern void vp8_dequantize_b_impl_mmx(short *sq, short *dq, short *q);
diff --git a/libvpx/vp8/common/x86/idct_blk_sse2.c b/libvpx/vp8/common/x86/idct_blk_sse2.c
index 056e052..ae96ec8 100644
--- a/libvpx/vp8/common/x86/idct_blk_sse2.c
+++ b/libvpx/vp8/common/x86/idct_blk_sse2.c
@@ -9,7 +9,7 @@
*/
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
void vp8_idct_dequant_0_2x_sse2
(short *q, short *dq ,
diff --git a/libvpx/vp8/common/x86/iwalsh_mmx.asm b/libvpx/vp8/common/x86/iwalsh_mmx.asm
index 4aac094..158c3b7 100644
--- a/libvpx/vp8/common/x86/iwalsh_mmx.asm
+++ b/libvpx/vp8/common/x86/iwalsh_mmx.asm
@@ -24,7 +24,7 @@ sym(vp8_short_inv_walsh4x4_mmx):
movq mm0, [rdx + 0] ;ip[0]
movq mm1, [rdx + 8] ;ip[4]
- movd mm7, rax
+ movq mm7, rax
movq mm2, [rdx + 16] ;ip[8]
movq mm3, [rdx + 24] ;ip[12]
diff --git a/libvpx/vp8/common/x86/loopfilter_block_sse2.asm b/libvpx/vp8/common/x86/loopfilter_block_sse2.asm
index 1c445ef..6d5aaa1 100644
--- a/libvpx/vp8/common/x86/loopfilter_block_sse2.asm
+++ b/libvpx/vp8/common/x86/loopfilter_block_sse2.asm
@@ -136,7 +136,7 @@
global sym(vp8_loop_filter_bh_y_sse2) PRIVATE
sym(vp8_loop_filter_bh_y_sse2):
-%ifidn __OUTPUT_FORMAT__,x64
+%if LIBVPX_YASM_WIN64
%define src rcx ; src_ptr
%define stride rdx ; src_pixel_step
%define blimit r8
@@ -150,6 +150,7 @@ sym(vp8_loop_filter_bh_y_sse2):
push rbp
mov rbp, rsp
+ SAVE_XMM 11
push r12
push r13
mov thresh, arg(4)
@@ -255,9 +256,10 @@ LF_FILTER xmm0, xmm1, xmm3, xmm8, xmm4, xmm2
movdqa i12, xmm3
movdqa i13, xmm8
-%ifidn __OUTPUT_FORMAT__,x64
+%if LIBVPX_YASM_WIN64
pop r13
pop r12
+ RESTORE_XMM
pop rbp
%endif
@@ -276,7 +278,7 @@ LF_FILTER xmm0, xmm1, xmm3, xmm8, xmm4, xmm2
global sym(vp8_loop_filter_bv_y_sse2) PRIVATE
sym(vp8_loop_filter_bv_y_sse2):
-%ifidn __OUTPUT_FORMAT__,x64
+%if LIBVPX_YASM_WIN64
%define src rcx ; src_ptr
%define stride rdx ; src_pixel_step
%define blimit r8
@@ -777,7 +779,7 @@ LF_FILTER xmm0, xmm1, xmm4, xmm8, xmm3, xmm2
; un-ALIGN_STACK
pop rsp
-%ifidn __OUTPUT_FORMAT__,x64
+%if LIBVPX_YASM_WIN64
pop r13
pop r12
RESTORE_XMM
diff --git a/libvpx/vp8/common/x86/mfqe_sse2.asm b/libvpx/vp8/common/x86/mfqe_sse2.asm
index c1d2174..a8a7f56 100644
--- a/libvpx/vp8/common/x86/mfqe_sse2.asm
+++ b/libvpx/vp8/common/x86/mfqe_sse2.asm
@@ -271,7 +271,13 @@ sym(vp8_variance_and_sad_16x16_sse2):
SECTION_RODATA
align 16
t128:
+%ifndef __NASM_VER__
ddq 128
+%elif CONFIG_BIG_ENDIAN
+ dq 0, 128
+%else
+ dq 128, 0
+%endif
align 16
tMFQE: ; 1 << MFQE_PRECISION
times 8 dw 0x10
diff --git a/libvpx/vp8/common/x86/postproc_mmx.asm b/libvpx/vp8/common/x86/postproc_mmx.asm
index 966c586..5cf110b 100644
--- a/libvpx/vp8/common/x86/postproc_mmx.asm
+++ b/libvpx/vp8/common/x86/postproc_mmx.asm
@@ -61,7 +61,7 @@ sym(vp8_mbpost_proc_down_mmx):
mov rcx, 8
.init_borderd ; initialize borders
lea rdi, [rdi + rax]
- movq [rdi], xmm1
+ movq [rdi], mm1
dec rcx
jne .init_borderd
@@ -193,7 +193,6 @@ sym(vp8_mbpost_proc_down_mmx):
movq mm4, [sym(vp8_rv) + rcx*2]
%endif
paddw mm1, mm4
- ;paddw xmm1, eight8s
psraw mm1, 4
packuswb mm1, mm0
diff --git a/libvpx/vp8/common/x86/recon_sse2.asm b/libvpx/vp8/common/x86/recon_sse2.asm
index fe77450..1434bcd 100644
--- a/libvpx/vp8/common/x86/recon_sse2.asm
+++ b/libvpx/vp8/common/x86/recon_sse2.asm
@@ -890,6 +890,7 @@ sym(vp8_intra_pred_y_tm_%1):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5
+ SAVE_XMM 7
push rsi
push rdi
GET_GOT rbx
@@ -957,6 +958,7 @@ vp8_intra_pred_y_tm_%1_loop:
RESTORE_GOT
pop rdi
pop rsi
+ RESTORE_XMM
UNSHADOW_ARGS
pop rbp
ret
diff --git a/libvpx/vp8/common/x86/recon_wrapper_sse2.c b/libvpx/vp8/common/x86/recon_wrapper_sse2.c
index b482faa..65f4251 100644
--- a/libvpx/vp8/common/x86/recon_wrapper_sse2.c
+++ b/libvpx/vp8/common/x86/recon_wrapper_sse2.c
@@ -9,7 +9,7 @@
*/
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx_mem/vpx_mem.h"
#include "vp8/common/blockd.h"
diff --git a/libvpx/vp8/common/x86/sad_sse3.asm b/libvpx/vp8/common/x86/sad_sse3.asm
index f90a589..69c8d37 100644
--- a/libvpx/vp8/common/x86/sad_sse3.asm
+++ b/libvpx/vp8/common/x86/sad_sse3.asm
@@ -33,7 +33,7 @@
movsxd rax, dword ptr arg(1) ; src_stride
movsxd rdx, dword ptr arg(3) ; ref_stride
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
SAVE_XMM 7, u
%define src_ptr rcx
%define src_stride rdx
@@ -76,7 +76,7 @@
pop rsi
pop rbp
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
RESTORE_XMM
%endif
%endif
@@ -111,7 +111,7 @@
xchg rbx, rax
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
SAVE_XMM 7, u
%define src_ptr rcx
%define src_stride rdx
@@ -156,7 +156,7 @@
pop rsi
pop rbp
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
pop rsi
RESTORE_XMM
%endif
diff --git a/libvpx/vp8/common/x86/subpixel_ssse3.asm b/libvpx/vp8/common/x86/subpixel_ssse3.asm
index 13bcaf6..c06f245 100644
--- a/libvpx/vp8/common/x86/subpixel_ssse3.asm
+++ b/libvpx/vp8/common/x86/subpixel_ssse3.asm
@@ -352,6 +352,7 @@ sym(vp8_filter_block1d4_h6_ssse3):
pop rdi
pop rsi
RESTORE_GOT
+ RESTORE_XMM
UNSHADOW_ARGS
pop rbp
ret
diff --git a/libvpx/vp8/common/x86/variance_mmx.c b/libvpx/vp8/common/x86/variance_mmx.c
index 0c4dd4a..36995db 100644
--- a/libvpx/vp8/common/x86/variance_mmx.c
+++ b/libvpx/vp8/common/x86/variance_mmx.c
@@ -91,7 +91,7 @@ unsigned int vp8_variance4x4_mmx(
vp8_get4x4var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 4));
+ return (var - (((unsigned int)avg * avg) >> 4));
}
@@ -108,7 +108,7 @@ unsigned int vp8_variance8x8_mmx(
vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 6));
+ return (var - (((unsigned int)avg * avg) >> 6));
}
@@ -153,7 +153,7 @@ unsigned int vp8_variance16x16_mmx(
var = sse0 + sse1 + sse2 + sse3;
avg = sum0 + sum1 + sum2 + sum3;
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 8));
+ return (var - (((unsigned int)avg * avg) >> 8));
}
unsigned int vp8_variance16x8_mmx(
@@ -172,7 +172,7 @@ unsigned int vp8_variance16x8_mmx(
var = sse0 + sse1;
avg = sum0 + sum1;
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 7));
+ return (var - (((unsigned int)avg * avg) >> 7));
}
@@ -194,7 +194,7 @@ unsigned int vp8_variance8x16_mmx(
avg = sum0 + sum1;
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 7));
+ return (var - (((unsigned int)avg * avg) >> 7));
}
@@ -219,7 +219,7 @@ unsigned int vp8_sub_pixel_variance4x4_mmx
&xsum, &xxsum
);
*sse = xxsum;
- return (xxsum - ((unsigned int)(xsum * xsum) >> 4));
+ return (xxsum - (((unsigned int)xsum * xsum) >> 4));
}
@@ -244,7 +244,7 @@ unsigned int vp8_sub_pixel_variance8x8_mmx
&xsum, &xxsum
);
*sse = xxsum;
- return (xxsum - ((unsigned int)(xsum * xsum) >> 6));
+ return (xxsum - (((unsigned int)xsum * xsum) >> 6));
}
unsigned int vp8_sub_pixel_variance16x16_mmx
@@ -282,7 +282,7 @@ unsigned int vp8_sub_pixel_variance16x16_mmx
xxsum0 += xxsum1;
*sse = xxsum0;
- return (xxsum0 - ((unsigned int)(xsum0 * xsum0) >> 8));
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
}
@@ -335,7 +335,7 @@ unsigned int vp8_sub_pixel_variance16x8_mmx
xxsum0 += xxsum1;
*sse = xxsum0;
- return (xxsum0 - ((unsigned int)(xsum0 * xsum0) >> 7));
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 7));
}
unsigned int vp8_sub_pixel_variance8x16_mmx
@@ -358,7 +358,7 @@ unsigned int vp8_sub_pixel_variance8x16_mmx
&xsum, &xxsum
);
*sse = xxsum;
- return (xxsum - ((unsigned int)(xsum * xsum) >> 7));
+ return (xxsum - (((unsigned int)xsum * xsum) >> 7));
}
diff --git a/libvpx/vp8/common/x86/variance_sse2.c b/libvpx/vp8/common/x86/variance_sse2.c
index afd6429..7fa5f53 100644
--- a/libvpx/vp8/common/x86/variance_sse2.c
+++ b/libvpx/vp8/common/x86/variance_sse2.c
@@ -148,7 +148,7 @@ unsigned int vp8_variance4x4_wmt(
vp8_get4x4var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 4));
+ return (var - (((unsigned int)avg * avg) >> 4));
}
@@ -165,7 +165,7 @@ unsigned int vp8_variance8x8_wmt
vp8_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 6));
+ return (var - (((unsigned int)avg * avg) >> 6));
}
@@ -184,7 +184,7 @@ unsigned int vp8_variance16x16_wmt
vp8_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
*sse = sse0;
- return (sse0 - ((unsigned int)(sum0 * sum0) >> 8));
+ return (sse0 - (((unsigned int)sum0 * sum0) >> 8));
}
unsigned int vp8_mse16x16_wmt(
const unsigned char *src_ptr,
@@ -220,7 +220,7 @@ unsigned int vp8_variance16x8_wmt
var = sse0 + sse1;
avg = sum0 + sum1;
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 7));
+ return (var - (((unsigned int)avg * avg) >> 7));
}
@@ -241,7 +241,7 @@ unsigned int vp8_variance8x16_wmt
var = sse0 + sse1;
avg = sum0 + sum1;
*sse = var;
- return (var - ((unsigned int)(avg * avg) >> 7));
+ return (var - (((unsigned int)avg * avg) >> 7));
}
@@ -265,7 +265,7 @@ unsigned int vp8_sub_pixel_variance4x4_wmt
&xsum, &xxsum
);
*sse = xxsum;
- return (xxsum - ((unsigned int)(xsum * xsum) >> 4));
+ return (xxsum - (((unsigned int)xsum * xsum) >> 4));
}
@@ -314,7 +314,7 @@ unsigned int vp8_sub_pixel_variance8x8_wmt
}
*sse = xxsum;
- return (xxsum - ((unsigned int)(xsum * xsum) >> 6));
+ return (xxsum - (((unsigned int)xsum * xsum) >> 6));
}
unsigned int vp8_sub_pixel_variance16x16_wmt
@@ -376,7 +376,7 @@ unsigned int vp8_sub_pixel_variance16x16_wmt
}
*sse = xxsum0;
- return (xxsum0 - ((unsigned int)(xsum0 * xsum0) >> 8));
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
}
unsigned int vp8_sub_pixel_mse16x16_wmt(
@@ -447,7 +447,7 @@ unsigned int vp8_sub_pixel_variance16x8_wmt
}
*sse = xxsum0;
- return (xxsum0 - ((unsigned int)(xsum0 * xsum0) >> 7));
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 7));
}
unsigned int vp8_sub_pixel_variance8x16_wmt
@@ -495,7 +495,7 @@ unsigned int vp8_sub_pixel_variance8x16_wmt
}
*sse = xxsum;
- return (xxsum - ((unsigned int)(xsum * xsum) >> 7));
+ return (xxsum - (((unsigned int)xsum * xsum) >> 7));
}
@@ -515,7 +515,7 @@ unsigned int vp8_variance_halfpixvar16x16_h_wmt(
&xsum0, &xxsum0);
*sse = xxsum0;
- return (xxsum0 - ((unsigned int)(xsum0 * xsum0) >> 8));
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
}
@@ -534,7 +534,7 @@ unsigned int vp8_variance_halfpixvar16x16_v_wmt(
&xsum0, &xxsum0);
*sse = xxsum0;
- return (xxsum0 - ((unsigned int)(xsum0 * xsum0) >> 8));
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
}
@@ -554,5 +554,5 @@ unsigned int vp8_variance_halfpixvar16x16_hv_wmt(
&xsum0, &xxsum0);
*sse = xxsum0;
- return (xxsum0 - ((unsigned int)(xsum0 * xsum0) >> 8));
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
}
diff --git a/libvpx/vp8/common/x86/variance_ssse3.c b/libvpx/vp8/common/x86/variance_ssse3.c
index ba2055c..f90f811 100644
--- a/libvpx/vp8/common/x86/variance_ssse3.c
+++ b/libvpx/vp8/common/x86/variance_ssse3.c
@@ -113,7 +113,7 @@ unsigned int vp8_sub_pixel_variance16x16_ssse3
}
*sse = xxsum0;
- return (xxsum0 - ((unsigned int)(xsum0 * xsum0) >> 8));
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
}
unsigned int vp8_sub_pixel_variance16x8_ssse3
@@ -162,5 +162,5 @@ unsigned int vp8_sub_pixel_variance16x8_ssse3
}
*sse = xxsum0;
- return (xxsum0 - ((unsigned int)(xsum0 * xsum0) >> 7));
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 7));
}
diff --git a/libvpx/vp8/common/x86/vp8_asm_stubs.c b/libvpx/vp8/common/x86/vp8_asm_stubs.c
index 3437a23..c0416b7 100644
--- a/libvpx/vp8/common/x86/vp8_asm_stubs.c
+++ b/libvpx/vp8/common/x86/vp8_asm_stubs.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx_ports/mem.h"
#include "filter_x86.h"
diff --git a/libvpx/vp8/decoder/dboolhuff.c b/libvpx/vp8/decoder/dboolhuff.c
index 7e7b05a..546fb2d 100644
--- a/libvpx/vp8/decoder/dboolhuff.c
+++ b/libvpx/vp8/decoder/dboolhuff.c
@@ -10,18 +10,20 @@
#include "dboolhuff.h"
-#include "vpx_ports/mem.h"
-#include "vpx_mem/vpx_mem.h"
int vp8dx_start_decode(BOOL_DECODER *br,
const unsigned char *source,
- unsigned int source_sz)
+ unsigned int source_sz,
+ vp8_decrypt_cb *decrypt_cb,
+ void *decrypt_state)
{
br->user_buffer_end = source+source_sz;
br->user_buffer = source;
br->value = 0;
br->count = -8;
br->range = 255;
+ br->decrypt_cb = decrypt_cb;
+ br->decrypt_state = decrypt_state;
if (source_sz && !source)
return 1;
@@ -32,21 +34,42 @@ int vp8dx_start_decode(BOOL_DECODER *br,
return 0;
}
-
void vp8dx_bool_decoder_fill(BOOL_DECODER *br)
{
- const unsigned char *bufptr;
- const unsigned char *bufend;
- VP8_BD_VALUE value;
- int count;
- bufend = br->user_buffer_end;
- bufptr = br->user_buffer;
- value = br->value;
- count = br->count;
-
- VP8DX_BOOL_DECODER_FILL(count, value, bufptr, bufend);
-
- br->user_buffer = bufptr;
+ const unsigned char *bufptr = br->user_buffer;
+ VP8_BD_VALUE value = br->value;
+ int count = br->count;
+ int shift = VP8_BD_VALUE_SIZE - 8 - (count + 8);
+ size_t bytes_left = br->user_buffer_end - bufptr;
+ size_t bits_left = bytes_left * CHAR_BIT;
+ int x = (int)(shift + CHAR_BIT - bits_left);
+ int loop_end = 0;
+ unsigned char decrypted[sizeof(VP8_BD_VALUE) + 1];
+
+ if (br->decrypt_cb) {
+ int n = bytes_left > sizeof(decrypted) ? sizeof(decrypted) : bytes_left;
+ br->decrypt_cb(br->decrypt_state, bufptr, decrypted, n);
+ bufptr = decrypted;
+ }
+
+ if(x >= 0)
+ {
+ count += VP8_LOTS_OF_BITS;
+ loop_end = x;
+ }
+
+ if (x < 0 || bits_left)
+ {
+ while(shift >= loop_end)
+ {
+ count += CHAR_BIT;
+ value |= (VP8_BD_VALUE)*bufptr << shift;
+ ++bufptr;
+ ++br->user_buffer;
+ shift -= CHAR_BIT;
+ }
+ }
+
br->value = value;
br->count = count;
}
diff --git a/libvpx/vp8/decoder/dboolhuff.h b/libvpx/vp8/decoder/dboolhuff.h
index 1a08c05..4c0ca1c 100644
--- a/libvpx/vp8/decoder/dboolhuff.h
+++ b/libvpx/vp8/decoder/dboolhuff.h
@@ -9,21 +9,30 @@
*/
-#ifndef DBOOLHUFF_H
-#define DBOOLHUFF_H
+#ifndef DBOOLHUFF_H_
+#define DBOOLHUFF_H_
+
#include <stddef.h>
#include <limits.h>
+
#include "vpx_config.h"
#include "vpx_ports/mem.h"
#include "vpx/vpx_integer.h"
typedef size_t VP8_BD_VALUE;
-# define VP8_BD_VALUE_SIZE ((int)sizeof(VP8_BD_VALUE)*CHAR_BIT)
+#define VP8_BD_VALUE_SIZE ((int)sizeof(VP8_BD_VALUE)*CHAR_BIT)
+
/*This is meant to be a large, positive constant that can still be efficiently
loaded as an immediate (on platforms like ARM, for example).
Even relatively modest values like 100 would work fine.*/
-# define VP8_LOTS_OF_BITS (0x40000000)
+#define VP8_LOTS_OF_BITS (0x40000000)
+
+/*Decrypt n bytes of data from input -> output, using the decrypt_state
+ passed in VP8D_SET_DECRYPTOR.
+*/
+typedef void (vp8_decrypt_cb)(void *decrypt_state, const unsigned char *input,
+ unsigned char *output, int count);
typedef struct
{
@@ -32,46 +41,20 @@ typedef struct
VP8_BD_VALUE value;
int count;
unsigned int range;
+ vp8_decrypt_cb *decrypt_cb;
+ void *decrypt_state;
} BOOL_DECODER;
DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]);
int vp8dx_start_decode(BOOL_DECODER *br,
const unsigned char *source,
- unsigned int source_sz);
+ unsigned int source_sz,
+ vp8_decrypt_cb *decrypt_cb,
+ void *decrypt_state);
void vp8dx_bool_decoder_fill(BOOL_DECODER *br);
-/*The refill loop is used in several places, so define it in a macro to make
- sure they're all consistent.
- An inline function would be cleaner, but has a significant penalty, because
- multiple BOOL_DECODER fields must be modified, and the compiler is not smart
- enough to eliminate the stores to those fields and the subsequent reloads
- from them when inlining the function.*/
-#define VP8DX_BOOL_DECODER_FILL(_count,_value,_bufptr,_bufend) \
- do \
- { \
- int shift = VP8_BD_VALUE_SIZE - 8 - ((_count) + 8); \
- int loop_end, x; \
- size_t bits_left = ((_bufend)-(_bufptr))*CHAR_BIT; \
- \
- x = (int)(shift + CHAR_BIT - bits_left); \
- loop_end = 0; \
- if(x >= 0) \
- { \
- (_count) += VP8_LOTS_OF_BITS; \
- loop_end = x; \
- if(!bits_left) break; \
- } \
- while(shift >= loop_end) \
- { \
- (_count) += CHAR_BIT; \
- (_value) |= (VP8_BD_VALUE)*(_bufptr)++ << shift; \
- shift -= CHAR_BIT; \
- } \
- } \
- while(0) \
-
static int vp8dx_decode_bool(BOOL_DECODER *br, int probability) {
unsigned int bit = 0;
@@ -151,4 +134,5 @@ static int vp8dx_bool_error(BOOL_DECODER *br)
/* No error. */
return 0;
}
-#endif
+
+#endif // DBOOLHUFF_H_
diff --git a/libvpx/vp8/decoder/decodemv.h b/libvpx/vp8/decoder/decodemv.h
index 9403424..05a33d2 100644
--- a/libvpx/vp8/decoder/decodemv.h
+++ b/libvpx/vp8/decoder/decodemv.h
@@ -8,7 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#ifndef DECODEMV_H_
+#define DECODEMV_H_
#include "onyxd_int.h"
void vp8_decode_mode_mvs(VP8D_COMP *);
+
+#endif // DECODEMV_H_
diff --git a/libvpx/vp8/decoder/decoderthreading.h b/libvpx/vp8/decoder/decoderthreading.h
index 60c39d1..bc716e4 100644
--- a/libvpx/vp8/decoder/decoderthreading.h
+++ b/libvpx/vp8/decoder/decoderthreading.h
@@ -8,19 +8,15 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
-
-
-
-#ifndef _DECODER_THREADING_H
-#define _DECODER_THREADING_H
+#ifndef DECODERTHREADING_H_
+#define DECODERTHREADING_H_
#if CONFIG_MULTITHREAD
-extern void vp8mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd);
-extern void vp8_decoder_remove_threads(VP8D_COMP *pbi);
-extern void vp8_decoder_create_threads(VP8D_COMP *pbi);
-extern void vp8mt_alloc_temp_buffers(VP8D_COMP *pbi, int width, int prev_mb_rows);
-extern void vp8mt_de_alloc_temp_buffers(VP8D_COMP *pbi, int mb_rows);
+void vp8mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd);
+void vp8_decoder_remove_threads(VP8D_COMP *pbi);
+void vp8_decoder_create_threads(VP8D_COMP *pbi);
+void vp8mt_alloc_temp_buffers(VP8D_COMP *pbi, int width, int prev_mb_rows);
+void vp8mt_de_alloc_temp_buffers(VP8D_COMP *pbi, int mb_rows);
#endif
-#endif
+#endif // DECODERTHREADING_H_
diff --git a/libvpx/vp8/decoder/decodframe.c b/libvpx/vp8/decoder/decodframe.c
index a4a00f6..44c35ef 100644
--- a/libvpx/vp8/decoder/decodframe.c
+++ b/libvpx/vp8/decoder/decodframe.c
@@ -10,7 +10,8 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
+#include "./vpx_scale_rtcd.h"
#include "onyxd_int.h"
#include "vp8/common/header.h"
#include "vp8/common/reconintra4x4.h"
@@ -20,7 +21,7 @@
#include "vp8/common/alloccommon.h"
#include "vp8/common/entropymode.h"
#include "vp8/common/quant_common.h"
-#include "vpx_scale/vpxscale.h"
+#include "vpx_scale/vpx_scale.h"
#include "vp8/common/setupintrarecon.h"
#include "decodemv.h"
@@ -758,11 +759,16 @@ static void decode_mb_rows(VP8D_COMP *pbi)
}
-static unsigned int read_partition_size(const unsigned char *cx_size)
+static unsigned int read_partition_size(VP8D_COMP *pbi,
+ const unsigned char *cx_size)
{
- const unsigned int size =
- cx_size[0] + (cx_size[1] << 8) + (cx_size[2] << 16);
- return size;
+ unsigned char temp[3];
+ if (pbi->decrypt_cb)
+ {
+ pbi->decrypt_cb(pbi->decrypt_state, cx_size, temp, 3);
+ cx_size = temp;
+ }
+ return cx_size[0] + (cx_size[1] << 8) + (cx_size[2] << 16);
}
static int read_is_valid(const unsigned char *start,
@@ -793,7 +799,7 @@ static unsigned int read_available_partition_size(
if (i < num_part - 1)
{
if (read_is_valid(partition_size_ptr, 3, first_fragment_end))
- partition_size = read_partition_size(partition_size_ptr);
+ partition_size = read_partition_size(pbi, partition_size_ptr);
else if (pbi->ec_active)
partition_size = (unsigned int)bytes_left;
else
@@ -827,8 +833,8 @@ static void setup_token_decoder(VP8D_COMP *pbi,
unsigned int partition_idx;
unsigned int fragment_idx;
unsigned int num_token_partitions;
- const unsigned char *first_fragment_end = pbi->fragments[0] +
- pbi->fragment_sizes[0];
+ const unsigned char *first_fragment_end = pbi->fragments.ptrs[0] +
+ pbi->fragments.sizes[0];
TOKEN_PARTITION multi_token_partition =
(TOKEN_PARTITION)vp8_read_literal(&pbi->mbc[8], 2);
@@ -838,10 +844,10 @@ static void setup_token_decoder(VP8D_COMP *pbi,
/* Check for partitions within the fragments and unpack the fragments
* so that each fragment pointer points to its corresponding partition. */
- for (fragment_idx = 0; fragment_idx < pbi->num_fragments; ++fragment_idx)
+ for (fragment_idx = 0; fragment_idx < pbi->fragments.count; ++fragment_idx)
{
- unsigned int fragment_size = pbi->fragment_sizes[fragment_idx];
- const unsigned char *fragment_end = pbi->fragments[fragment_idx] +
+ unsigned int fragment_size = pbi->fragments.sizes[fragment_idx];
+ const unsigned char *fragment_end = pbi->fragments.ptrs[fragment_idx] +
fragment_size;
/* Special case for handling the first partition since we have already
* read its size. */
@@ -849,16 +855,16 @@ static void setup_token_decoder(VP8D_COMP *pbi,
{
/* Size of first partition + token partition sizes element */
ptrdiff_t ext_first_part_size = token_part_sizes -
- pbi->fragments[0] + 3 * (num_token_partitions - 1);
+ pbi->fragments.ptrs[0] + 3 * (num_token_partitions - 1);
fragment_size -= (unsigned int)ext_first_part_size;
if (fragment_size > 0)
{
- pbi->fragment_sizes[0] = (unsigned int)ext_first_part_size;
+ pbi->fragments.sizes[0] = (unsigned int)ext_first_part_size;
/* The fragment contains an additional partition. Move to
* next. */
fragment_idx++;
- pbi->fragments[fragment_idx] = pbi->fragments[0] +
- pbi->fragment_sizes[0];
+ pbi->fragments.ptrs[fragment_idx] = pbi->fragments.ptrs[0] +
+ pbi->fragments.sizes[0];
}
}
/* Split the chunk into partitions read from the bitstream */
@@ -867,12 +873,12 @@ static void setup_token_decoder(VP8D_COMP *pbi,
ptrdiff_t partition_size = read_available_partition_size(
pbi,
token_part_sizes,
- pbi->fragments[fragment_idx],
+ pbi->fragments.ptrs[fragment_idx],
first_fragment_end,
fragment_end,
fragment_idx - 1,
num_token_partitions);
- pbi->fragment_sizes[fragment_idx] = (unsigned int)partition_size;
+ pbi->fragments.sizes[fragment_idx] = (unsigned int)partition_size;
fragment_size -= (unsigned int)partition_size;
assert(fragment_idx <= num_token_partitions);
if (fragment_size > 0)
@@ -880,19 +886,20 @@ static void setup_token_decoder(VP8D_COMP *pbi,
/* The fragment contains an additional partition.
* Move to next. */
fragment_idx++;
- pbi->fragments[fragment_idx] =
- pbi->fragments[fragment_idx - 1] + partition_size;
+ pbi->fragments.ptrs[fragment_idx] =
+ pbi->fragments.ptrs[fragment_idx - 1] + partition_size;
}
}
}
- pbi->num_fragments = num_token_partitions + 1;
+ pbi->fragments.count = num_token_partitions + 1;
- for (partition_idx = 1; partition_idx < pbi->num_fragments; ++partition_idx)
+ for (partition_idx = 1; partition_idx < pbi->fragments.count; ++partition_idx)
{
if (vp8dx_start_decode(bool_decoder,
- pbi->fragments[partition_idx],
- pbi->fragment_sizes[partition_idx]))
+ pbi->fragments.ptrs[partition_idx],
+ pbi->fragments.sizes[partition_idx],
+ pbi->decrypt_cb, pbi->decrypt_state))
vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR,
"Failed to allocate bool decoder %d",
partition_idx);
@@ -979,11 +986,11 @@ static void init_frame(VP8D_COMP *pbi)
int vp8_decode_frame(VP8D_COMP *pbi)
{
- vp8_reader *const bc = & pbi->mbc[8];
- VP8_COMMON *const pc = & pbi->common;
- MACROBLOCKD *const xd = & pbi->mb;
- const unsigned char *data = pbi->fragments[0];
- const unsigned char *data_end = data + pbi->fragment_sizes[0];
+ vp8_reader *const bc = &pbi->mbc[8];
+ VP8_COMMON *const pc = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ const unsigned char *data = pbi->fragments.ptrs[0];
+ const unsigned char *data_end = data + pbi->fragments.sizes[0];
ptrdiff_t first_partition_length_in_bytes;
int i, j, k, l;
@@ -1015,18 +1022,30 @@ int vp8_decode_frame(VP8D_COMP *pbi)
}
else
{
- pc->frame_type = (FRAME_TYPE)(data[0] & 1);
- pc->version = (data[0] >> 1) & 7;
- pc->show_frame = (data[0] >> 4) & 1;
+ unsigned char clear_buffer[10];
+ const unsigned char *clear = data;
+ if (pbi->decrypt_cb)
+ {
+ int n = data_end - data;
+ if (n > 10) n = 10;
+ pbi->decrypt_cb(pbi->decrypt_state, data, clear_buffer, n);
+ clear = clear_buffer;
+ }
+
+ pc->frame_type = (FRAME_TYPE)(clear[0] & 1);
+ pc->version = (clear[0] >> 1) & 7;
+ pc->show_frame = (clear[0] >> 4) & 1;
first_partition_length_in_bytes =
- (data[0] | (data[1] << 8) | (data[2] << 16)) >> 5;
+ (clear[0] | (clear[1] << 8) | (clear[2] << 16)) >> 5;
- if (!pbi->ec_active && (data + first_partition_length_in_bytes > data_end
+ if (!pbi->ec_active &&
+ (data + first_partition_length_in_bytes > data_end
|| data + first_partition_length_in_bytes < data))
vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt partition 0 length");
data += 3;
+ clear += 3;
vp8_setup_version(pc);
@@ -1039,7 +1058,7 @@ int vp8_decode_frame(VP8D_COMP *pbi)
*/
if (!pbi->ec_active || data + 3 < data_end)
{
- if (data[0] != 0x9d || data[1] != 0x01 || data[2] != 0x2a)
+ if (clear[0] != 0x9d || clear[1] != 0x01 || clear[2] != 0x2a)
vpx_internal_error(&pc->error, VPX_CODEC_UNSUP_BITSTREAM,
"Invalid frame sync code");
}
@@ -1050,13 +1069,13 @@ int vp8_decode_frame(VP8D_COMP *pbi)
*/
if (!pbi->ec_active || data + 6 < data_end)
{
- pc->Width = (data[3] | (data[4] << 8)) & 0x3fff;
- pc->horiz_scale = data[4] >> 6;
- pc->Height = (data[5] | (data[6] << 8)) & 0x3fff;
- pc->vert_scale = data[6] >> 6;
+ pc->Width = (clear[3] | (clear[4] << 8)) & 0x3fff;
+ pc->horiz_scale = clear[4] >> 6;
+ pc->Height = (clear[5] | (clear[6] << 8)) & 0x3fff;
+ pc->vert_scale = clear[6] >> 6;
}
data += 7;
-
+ clear += 7;
}
else
{
@@ -1071,7 +1090,8 @@ int vp8_decode_frame(VP8D_COMP *pbi)
init_frame(pbi);
- if (vp8dx_start_decode(bc, data, (unsigned int)(data_end - data)))
+ if (vp8dx_start_decode(bc, data, (unsigned int)(data_end - data),
+ pbi->decrypt_cb, pbi->decrypt_state))
vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate bool decoder 0");
if (pc->frame_type == KEY_FRAME) {
@@ -1334,11 +1354,11 @@ int vp8_decode_frame(VP8D_COMP *pbi)
#if CONFIG_MULTITHREAD
if (pbi->b_multithreaded_rd && pc->multi_token_partition != ONE_PARTITION)
{
- unsigned int i;
+ unsigned int thread;
vp8mt_decode_mb_rows(pbi, xd);
vp8_yv12_extend_frame_borders(yv12_fb_new);
- for (i = 0; i < pbi->decoding_thread_count; ++i)
- corrupt_tokens |= pbi->mb_row_di[i].mbd.corrupted;
+ for (thread = 0; thread < pbi->decoding_thread_count; ++thread)
+ corrupt_tokens |= pbi->mb_row_di[thread].mbd.corrupted;
}
else
#endif
diff --git a/libvpx/vp8/decoder/detokenize.h b/libvpx/vp8/decoder/detokenize.h
index 8640bda..f2130b3 100644
--- a/libvpx/vp8/decoder/detokenize.h
+++ b/libvpx/vp8/decoder/detokenize.h
@@ -8,13 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
-#ifndef DETOKENIZE_H
-#define DETOKENIZE_H
+#ifndef DETOKENIZE_H_
+#define DETOKENIZE_H_
#include "onyxd_int.h"
void vp8_reset_mb_tokens_context(MACROBLOCKD *x);
int vp8_decode_mb_tokens(VP8D_COMP *, MACROBLOCKD *);
-#endif /* DETOKENIZE_H */
+#endif // DETOKENIZE_H
diff --git a/libvpx/vp8/decoder/ec_types.h b/libvpx/vp8/decoder/ec_types.h
index ccb5ddb..b24bfd9 100644
--- a/libvpx/vp8/decoder/ec_types.h
+++ b/libvpx/vp8/decoder/ec_types.h
@@ -14,7 +14,6 @@
#define MAX_OVERLAPS 16
-
/* The area (pixel area in Q6) the block pointed to by bmi overlaps
* another block with.
*/
@@ -48,4 +47,4 @@ typedef struct
MV_REFERENCE_FRAME ref_frame;
} EC_BLOCK;
-#endif /* VP8_DEC_EC_TYPES_H */
+#endif // VP8_DEC_EC_TYPES_H
diff --git a/libvpx/vp8/decoder/error_concealment.c b/libvpx/vp8/decoder/error_concealment.c
index 8b2e32b..0b58c98 100644
--- a/libvpx/vp8/decoder/error_concealment.c
+++ b/libvpx/vp8/decoder/error_concealment.c
@@ -8,14 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include <assert.h>
+
#include "error_concealment.h"
#include "onyxd_int.h"
#include "decodemv.h"
#include "vpx_mem/vpx_mem.h"
#include "vp8/common/findnearmv.h"
-#include <assert.h>
-
#define MIN(x,y) (((x)<(y))?(x):(y))
#define MAX(x,y) (((x)>(y))?(x):(y))
diff --git a/libvpx/vp8/decoder/error_concealment.h b/libvpx/vp8/decoder/error_concealment.h
index 65ae9d9..fb96b36 100644
--- a/libvpx/vp8/decoder/error_concealment.h
+++ b/libvpx/vp8/decoder/error_concealment.h
@@ -9,8 +9,8 @@
*/
-#ifndef ERROR_CONCEALMENT_H
-#define ERROR_CONCEALMENT_H
+#ifndef ERROR_CONCEALMENT_H_
+#define ERROR_CONCEALMENT_H_
#include "onyxd_int.h"
#include "ec_types.h"
@@ -38,4 +38,4 @@ void vp8_interpolate_motion(MACROBLOCKD *mb,
*/
void vp8_conceal_corrupt_mb(MACROBLOCKD *xd);
-#endif
+#endif // ERROR_CONCEALMENT_H_
diff --git a/libvpx/vp8/decoder/onyxd_if.c b/libvpx/vp8/decoder/onyxd_if.c
index 8d6871b..2db3096 100644
--- a/libvpx/vp8/decoder/onyxd_if.c
+++ b/libvpx/vp8/decoder/onyxd_if.c
@@ -25,7 +25,8 @@
#include <assert.h>
#include "vp8/common/quant_common.h"
-#include "vpx_scale/vpxscale.h"
+#include "./vpx_scale_rtcd.h"
+#include "vpx_scale/vpx_scale.h"
#include "vp8/common/systemdependent.h"
#include "vpx_ports/vpx_timer.h"
#include "detokenize.h"
@@ -41,7 +42,16 @@ extern void vp8cx_init_de_quantizer(VP8D_COMP *pbi);
static int get_free_fb (VP8_COMMON *cm);
static void ref_cnt_fb (int *buf, int *idx, int new_idx);
-struct VP8D_COMP * vp8dx_create_decompressor(VP8D_CONFIG *oxcf)
+static void remove_decompressor(VP8D_COMP *pbi)
+{
+#if CONFIG_ERROR_CONCEALMENT
+ vp8_de_alloc_overlap_lists(pbi);
+#endif
+ vp8_remove_common(&pbi->common);
+ vpx_free(pbi);
+}
+
+static struct VP8D_COMP * create_decompressor(VP8D_CONFIG *oxcf)
{
VP8D_COMP *pbi = vpx_memalign(32, sizeof(VP8D_COMP));
@@ -53,7 +63,7 @@ struct VP8D_COMP * vp8dx_create_decompressor(VP8D_CONFIG *oxcf)
if (setjmp(pbi->common.error.jmp))
{
pbi->common.error.setjmp = 0;
- vp8dx_remove_decompressor(pbi);
+ remove_decompressor(pbi);
return 0;
}
@@ -64,11 +74,6 @@ struct VP8D_COMP * vp8dx_create_decompressor(VP8D_CONFIG *oxcf)
pbi->common.current_video_frame = 0;
pbi->ready_for_new_data = 1;
-#if CONFIG_MULTITHREAD
- pbi->max_threads = oxcf->max_threads;
- vp8_decoder_create_threads(pbi);
-#endif
-
/* vp8cx_init_de_quantizer() is first called here. Add check in frame_init_dequantizer() to avoid
* unnecessary calling of vp8cx_init_de_quantizer() for every frame.
*/
@@ -91,9 +96,6 @@ struct VP8D_COMP * vp8dx_create_decompressor(VP8D_CONFIG *oxcf)
pbi->decoded_key_frame = 0;
- pbi->input_fragments = oxcf->input_fragments;
- pbi->num_fragments = 0;
-
/* Independent partitions is activated when a frame updates the
* token probability table to have equal probabilities over the
* PREV_COEF context.
@@ -105,25 +107,6 @@ struct VP8D_COMP * vp8dx_create_decompressor(VP8D_CONFIG *oxcf)
return pbi;
}
-
-void vp8dx_remove_decompressor(VP8D_COMP *pbi)
-{
- if (!pbi)
- return;
-
-#if CONFIG_MULTITHREAD
- if (pbi->b_multithreaded_rd)
- vp8mt_de_alloc_temp_buffers(pbi, pbi->common.mb_rows);
- vp8_decoder_remove_threads(pbi);
-#endif
-#if CONFIG_ERROR_CONCEALMENT
- vp8_de_alloc_overlap_lists(pbi);
-#endif
- vp8_remove_common(&pbi->common);
- vpx_free(pbi);
-}
-
-
vpx_codec_err_t vp8dx_get_reference(VP8D_COMP *pbi, enum vpx_ref_frame_type ref_frame_flag, YV12_BUFFER_CONFIG *sd)
{
VP8_COMMON *cm = &pbi->common;
@@ -281,60 +264,13 @@ static int swap_frame_buffers (VP8_COMMON *cm)
return err;
}
-int vp8dx_receive_compressed_data(VP8D_COMP *pbi, size_t size,
- const uint8_t *source,
- int64_t time_stamp)
+int check_fragments_for_errors(VP8D_COMP *pbi)
{
-#if HAVE_NEON
- int64_t dx_store_reg[8];
-#endif
- VP8_COMMON *cm = &pbi->common;
- int retcode = -1;
-
- pbi->common.error.error_code = VPX_CODEC_OK;
-
- if (pbi->num_fragments == 0)
- {
- /* New frame, reset fragment pointers and sizes */
- vpx_memset((void*)pbi->fragments, 0, sizeof(pbi->fragments));
- vpx_memset(pbi->fragment_sizes, 0, sizeof(pbi->fragment_sizes));
- }
- if (pbi->input_fragments && !(source == NULL && size == 0))
- {
- /* Store a pointer to this fragment and return. We haven't
- * received the complete frame yet, so we will wait with decoding.
- */
- assert(pbi->num_fragments < MAX_PARTITIONS);
- pbi->fragments[pbi->num_fragments] = source;
- pbi->fragment_sizes[pbi->num_fragments] = size;
- pbi->num_fragments++;
- if (pbi->num_fragments > (1 << EIGHT_PARTITION) + 1)
- {
- pbi->common.error.error_code = VPX_CODEC_UNSUP_BITSTREAM;
- pbi->common.error.setjmp = 0;
- pbi->num_fragments = 0;
- return -1;
- }
- return 0;
- }
-
- if (!pbi->input_fragments)
- {
- pbi->fragments[0] = source;
- pbi->fragment_sizes[0] = size;
- pbi->num_fragments = 1;
- }
- assert(pbi->common.multi_token_partition <= EIGHT_PARTITION);
- if (pbi->num_fragments == 0)
- {
- pbi->num_fragments = 1;
- pbi->fragments[0] = NULL;
- pbi->fragment_sizes[0] = 0;
- }
-
if (!pbi->ec_active &&
- pbi->num_fragments <= 1 && pbi->fragment_sizes[0] == 0)
+ pbi->fragments.count <= 1 && pbi->fragments.sizes[0] == 0)
{
+ VP8_COMMON *cm = &pbi->common;
+
/* If error concealment is disabled we won't signal missing frames
* to the decoder.
*/
@@ -360,12 +296,29 @@ int vp8dx_receive_compressed_data(VP8D_COMP *pbi, size_t size,
/* Signal that we have no frame to show. */
cm->show_frame = 0;
- pbi->num_fragments = 0;
-
/* Nothing more to do. */
return 0;
}
+ return 1;
+}
+
+int vp8dx_receive_compressed_data(VP8D_COMP *pbi, size_t size,
+ const uint8_t *source,
+ int64_t time_stamp)
+{
+#if HAVE_NEON
+ int64_t dx_store_reg[8];
+#endif
+ VP8_COMMON *cm = &pbi->common;
+ int retcode = -1;
+
+ pbi->common.error.error_code = VPX_CODEC_OK;
+
+ retcode = check_fragments_for_errors(pbi);
+ if(retcode <= 0)
+ return retcode;
+
#if HAVE_NEON
#if CONFIG_RUNTIME_CPU_DETECT
if (cm->cpu_caps & HAS_NEON)
@@ -418,7 +371,13 @@ int vp8dx_receive_compressed_data(VP8D_COMP *pbi, size_t size,
vp8_clear_system_state();
-#if CONFIG_ERROR_CONCEALMENT
+ if (cm->show_frame)
+ {
+ cm->current_video_frame++;
+ cm->show_frame_mi = cm->mi;
+ }
+
+ #if CONFIG_ERROR_CONCEALMENT
/* swap the mode infos to storage for future error concealment */
if (pbi->ec_enabled && pbi->common.prev_mi)
{
@@ -440,9 +399,6 @@ int vp8dx_receive_compressed_data(VP8D_COMP *pbi, size_t size,
}
#endif
- if (cm->show_frame)
- cm->current_video_frame++;
-
pbi->ready_for_new_data = 0;
pbi->last_time_stamp = time_stamp;
@@ -457,7 +413,6 @@ decode_exit:
#endif
pbi->common.error.setjmp = 0;
- pbi->num_fragments = 0;
return retcode;
}
int vp8dx_get_raw_frame(VP8D_COMP *pbi, YV12_BUFFER_CONFIG *sd, int64_t *time_stamp, int64_t *time_end_stamp, vp8_ppflags_t *flags)
@@ -520,3 +475,54 @@ int vp8dx_references_buffer( VP8_COMMON *oci, int ref_frame )
return 0;
}
+
+int vp8_create_decoder_instances(struct frame_buffers *fb, VP8D_CONFIG *oxcf)
+{
+ if(!fb->use_frame_threads)
+ {
+ /* decoder instance for single thread mode */
+ fb->pbi[0] = create_decompressor(oxcf);
+ if(!fb->pbi[0])
+ return VPX_CODEC_ERROR;
+
+#if CONFIG_MULTITHREAD
+ /* enable row-based threading only when use_frame_threads
+ * is disabled */
+ fb->pbi[0]->max_threads = oxcf->max_threads;
+ vp8_decoder_create_threads(fb->pbi[0]);
+#endif
+ }
+ else
+ {
+ /* TODO : create frame threads and decoder instances for each
+ * thread here */
+ }
+
+ return VPX_CODEC_OK;
+}
+
+int vp8_remove_decoder_instances(struct frame_buffers *fb)
+{
+ if(!fb->use_frame_threads)
+ {
+ VP8D_COMP *pbi = fb->pbi[0];
+
+ if (!pbi)
+ return VPX_CODEC_ERROR;
+#if CONFIG_MULTITHREAD
+ if (pbi->b_multithreaded_rd)
+ vp8mt_de_alloc_temp_buffers(pbi, pbi->common.mb_rows);
+ vp8_decoder_remove_threads(pbi);
+#endif
+
+ /* decoder instance for single thread mode */
+ remove_decompressor(pbi);
+ }
+ else
+ {
+ /* TODO : remove frame threads and decoder instances for each
+ * thread here */
+ }
+
+ return VPX_CODEC_OK;
+}
diff --git a/libvpx/vp8/decoder/onyxd_int.h b/libvpx/vp8/decoder/onyxd_int.h
index 0063beb..54a98f7 100644
--- a/libvpx/vp8/decoder/onyxd_int.h
+++ b/libvpx/vp8/decoder/onyxd_int.h
@@ -9,8 +9,9 @@
*/
-#ifndef __INC_VP8D_INT_H
-#define __INC_VP8D_INT_H
+#ifndef ONYXD_INT_H_
+#define ONYXD_INT_H_
+
#include "vpx_config.h"
#include "vp8/common/onyxd.h"
#include "treereader.h"
@@ -33,6 +34,31 @@ typedef struct
MACROBLOCKD mbd;
} MB_ROW_DEC;
+
+typedef struct
+{
+ int enabled;
+ unsigned int count;
+ const unsigned char *ptrs[MAX_PARTITIONS];
+ unsigned int sizes[MAX_PARTITIONS];
+} FRAGMENT_DATA;
+
+#define MAX_FB_MT_DEC 32
+
+struct frame_buffers
+{
+ /*
+ * this struct will be populated with frame buffer management
+ * info in future commits. */
+
+ /* enable/disable frame-based threading */
+ int use_frame_threads;
+
+ /* decoder instances */
+ struct VP8D_COMP *pbi[MAX_FB_MT_DEC];
+
+};
+
typedef struct VP8D_COMP
{
DECLARE_ALIGNED(16, MACROBLOCKD, mb);
@@ -46,10 +72,7 @@ typedef struct VP8D_COMP
VP8D_CONFIG oxcf;
-
- const unsigned char *fragments[MAX_PARTITIONS];
- unsigned int fragment_sizes[MAX_PARTITIONS];
- unsigned int num_fragments;
+ FRAGMENT_DATA fragments;
#if CONFIG_MULTITHREAD
/* variable for threading */
@@ -95,15 +118,19 @@ typedef struct VP8D_COMP
#endif
int ec_enabled;
int ec_active;
- int input_fragments;
int decoded_key_frame;
int independent_partitions;
int frame_corrupt_residual;
+ vp8_decrypt_cb *decrypt_cb;
+ void *decrypt_state;
} VP8D_COMP;
int vp8_decode_frame(VP8D_COMP *cpi);
+int vp8_create_decoder_instances(struct frame_buffers *fb, VP8D_CONFIG *oxcf);
+int vp8_remove_decoder_instances(struct frame_buffers *fb);
+
#if CONFIG_DEBUG
#define CHECK_MEM_ERROR(lval,expr) do {\
lval = (expr); \
@@ -121,4 +148,4 @@ int vp8_decode_frame(VP8D_COMP *cpi);
} while(0)
#endif
-#endif
+#endif // ONYXD_INT_H_
diff --git a/libvpx/vp8/decoder/threading.c b/libvpx/vp8/decoder/threading.c
index 88c06be..7303189 100644
--- a/libvpx/vp8/decoder/threading.c
+++ b/libvpx/vp8/decoder/threading.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#if !defined(WIN32) && CONFIG_OS_SUPPORT == 1
# include <unistd.h>
#endif
@@ -36,7 +36,7 @@
} while (0)
-extern void vp8_mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd);
+void vp8_mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd);
static void setup_decoding_thread_data(VP8D_COMP *pbi, MACROBLOCKD *xd, MB_ROW_DEC *mbrd, int count)
{
@@ -343,7 +343,6 @@ static void mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd, int start_mb_row)
for (mb_row = start_mb_row; mb_row < pc->mb_rows; mb_row += (pbi->decoding_thread_count + 1))
{
- int i;
int recon_yoffset, recon_uvoffset;
int mb_col;
int filter_level;
diff --git a/libvpx/vp8/decoder/treereader.h b/libvpx/vp8/decoder/treereader.h
index 238ff85..9393bb4 100644
--- a/libvpx/vp8/decoder/treereader.h
+++ b/libvpx/vp8/decoder/treereader.h
@@ -9,18 +9,17 @@
*/
-#ifndef tree_reader_h
-#define tree_reader_h 1
+#ifndef TREEREADER_H_
+#define TREEREADER_H_
#include "vp8/common/treecoder.h"
-
#include "dboolhuff.h"
typedef BOOL_DECODER vp8_reader;
#define vp8_read vp8dx_decode_bool
#define vp8_read_literal vp8_decode_value
-#define vp8_read_bit( R) vp8_read( R, vp8_prob_half)
+#define vp8_read_bit(R) vp8_read(R, vp8_prob_half)
/* Intent of tree data structure is to make decoding trivial. */
@@ -38,4 +37,4 @@ static int vp8_treed_read(
return -i;
}
-#endif /* tree_reader_h */
+#endif // TREEREADER_H_
diff --git a/libvpx/vp8/decoder/asm_dec_offsets.c b/libvpx/vp8/decoder/vp8_asm_dec_offsets.c
index 842a0d5..842a0d5 100644
--- a/libvpx/vp8/decoder/asm_dec_offsets.c
+++ b/libvpx/vp8/decoder/vp8_asm_dec_offsets.c
diff --git a/libvpx/vp8/encoder/arm/armv5te/boolhuff_armv5te.asm b/libvpx/vp8/encoder/arm/armv5te/boolhuff_armv5te.asm
index a644a00..4abe818 100644
--- a/libvpx/vp8/encoder/arm/armv5te/boolhuff_armv5te.asm
+++ b/libvpx/vp8/encoder/arm/armv5te/boolhuff_armv5te.asm
@@ -15,7 +15,7 @@
EXPORT |vp8_encode_value|
IMPORT |vp8_validate_buffer_arm|
- INCLUDE asm_enc_offsets.asm
+ INCLUDE vp8_asm_enc_offsets.asm
ARM
REQUIRE8
diff --git a/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_armv5.asm b/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_armv5.asm
index a1cd467..90a141c 100644
--- a/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_armv5.asm
+++ b/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_armv5.asm
@@ -12,7 +12,7 @@
EXPORT |vp8cx_pack_tokens_armv5|
IMPORT |vp8_validate_buffer_arm|
- INCLUDE asm_enc_offsets.asm
+ INCLUDE vp8_asm_enc_offsets.asm
ARM
REQUIRE8
diff --git a/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_mbrow_armv5.asm b/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_mbrow_armv5.asm
index 1fa5e6c..3a8d17a 100644
--- a/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_mbrow_armv5.asm
+++ b/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_mbrow_armv5.asm
@@ -12,7 +12,7 @@
EXPORT |vp8cx_pack_mb_row_tokens_armv5|
IMPORT |vp8_validate_buffer_arm|
- INCLUDE asm_enc_offsets.asm
+ INCLUDE vp8_asm_enc_offsets.asm
ARM
REQUIRE8
diff --git a/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_partitions_armv5.asm b/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_partitions_armv5.asm
index 90a98fe..e9aa495 100644
--- a/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_partitions_armv5.asm
+++ b/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_partitions_armv5.asm
@@ -12,7 +12,7 @@
EXPORT |vp8cx_pack_tokens_into_partitions_armv5|
IMPORT |vp8_validate_buffer_arm|
- INCLUDE asm_enc_offsets.asm
+ INCLUDE vp8_asm_enc_offsets.asm
ARM
REQUIRE8
diff --git a/libvpx/vp8/encoder/arm/armv6/vp8_fast_quantize_b_armv6.asm b/libvpx/vp8/encoder/arm/armv6/vp8_fast_quantize_b_armv6.asm
index d61f5d9..de35a1e 100644
--- a/libvpx/vp8/encoder/arm/armv6/vp8_fast_quantize_b_armv6.asm
+++ b/libvpx/vp8/encoder/arm/armv6/vp8_fast_quantize_b_armv6.asm
@@ -11,7 +11,7 @@
EXPORT |vp8_fast_quantize_b_armv6|
- INCLUDE asm_enc_offsets.asm
+ INCLUDE vp8_asm_enc_offsets.asm
ARM
REQUIRE8
diff --git a/libvpx/vp8/encoder/arm/armv6/vp8_subtract_armv6.asm b/libvpx/vp8/encoder/arm/armv6/vp8_subtract_armv6.asm
index f329f8f..05746cf 100644
--- a/libvpx/vp8/encoder/arm/armv6/vp8_subtract_armv6.asm
+++ b/libvpx/vp8/encoder/arm/armv6/vp8_subtract_armv6.asm
@@ -13,7 +13,7 @@
EXPORT |vp8_subtract_mbuv_armv6|
EXPORT |vp8_subtract_b_armv6|
- INCLUDE asm_enc_offsets.asm
+ INCLUDE vp8_asm_enc_offsets.asm
ARM
REQUIRE8
diff --git a/libvpx/vp8/encoder/arm/dct_arm.c b/libvpx/vp8/encoder/arm/dct_arm.c
index af0fb27..f71300d 100644
--- a/libvpx/vp8/encoder/arm/dct_arm.c
+++ b/libvpx/vp8/encoder/arm/dct_arm.c
@@ -9,7 +9,7 @@
*/
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#if HAVE_MEDIA
diff --git a/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.asm b/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.asm
index 1430588..9374310 100644
--- a/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.asm
+++ b/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.asm
@@ -12,7 +12,7 @@
EXPORT |vp8_fast_quantize_b_neon|
EXPORT |vp8_fast_quantize_b_pair_neon|
- INCLUDE asm_enc_offsets.asm
+ INCLUDE vp8_asm_enc_offsets.asm
ARM
REQUIRE8
diff --git a/libvpx/vp8/encoder/arm/neon/shortfdct_neon.asm b/libvpx/vp8/encoder/arm/neon/shortfdct_neon.asm
index 09dd011..5ea8dd8 100644
--- a/libvpx/vp8/encoder/arm/neon/shortfdct_neon.asm
+++ b/libvpx/vp8/encoder/arm/neon/shortfdct_neon.asm
@@ -97,7 +97,7 @@ coeff
vmlal.s16 q11, d6, d17 ; c1*2217 + d1*5352 + 12000
vmlsl.s16 q12, d6, d16 ; d1*2217 - c1*5352 + 51000
- vmvn.s16 d4, d4
+ vmvn d4, d4
vshrn.s32 d1, q11, #16 ; op[4] = (c1*2217 + d1*5352 + 12000)>>16
vsub.s16 d1, d1, d4 ; op[4] += (d1!=0)
vshrn.s32 d3, q12, #16 ; op[12]= (d1*2217 - c1*5352 + 51000)>>16
@@ -200,7 +200,7 @@ coeff
vmlal.s16 q11, d27, d17 ; B[4] = c1*2217 + d1*5352 + 12000
vmlsl.s16 q12, d27, d16 ; B[12] = d1*2217 - c1*5352 + 51000
- vmvn.s16 q14, q14
+ vmvn q14, q14
vshrn.s32 d1, q9, #16 ; A[4] = (c1*2217 + d1*5352 + 12000)>>16
vshrn.s32 d3, q10, #16 ; A[12]= (d1*2217 - c1*5352 + 51000)>>16
diff --git a/libvpx/vp8/encoder/arm/neon/subtract_neon.asm b/libvpx/vp8/encoder/arm/neon/subtract_neon.asm
index 91a328c..5bda786 100644
--- a/libvpx/vp8/encoder/arm/neon/subtract_neon.asm
+++ b/libvpx/vp8/encoder/arm/neon/subtract_neon.asm
@@ -12,7 +12,7 @@
EXPORT |vp8_subtract_mby_neon|
EXPORT |vp8_subtract_mbuv_neon|
- INCLUDE asm_enc_offsets.asm
+ INCLUDE vp8_asm_enc_offsets.asm
ARM
REQUIRE8
diff --git a/libvpx/vp8/encoder/arm/quantize_arm.c b/libvpx/vp8/encoder/arm/quantize_arm.c
index 8999e34..80d9ad0 100644
--- a/libvpx/vp8/encoder/arm/quantize_arm.c
+++ b/libvpx/vp8/encoder/arm/quantize_arm.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vp8/encoder/block.h"
#include <math.h>
#include "vpx_mem/vpx_mem.h"
diff --git a/libvpx/vp8/encoder/bitstream.c b/libvpx/vp8/encoder/bitstream.c
index f84ae68..4707ae5 100644
--- a/libvpx/vp8/encoder/bitstream.c
+++ b/libvpx/vp8/encoder/bitstream.c
@@ -50,7 +50,7 @@ const int vp8cx_base_skip_false_prob[128] =
unsigned __int64 Sectionbits[500];
#endif
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
int intra_mode_stats[10][10][10];
static unsigned int tree_update_hist [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] [2];
extern unsigned int active_section;
@@ -90,17 +90,17 @@ static void update_mode(
if (new_b + (n << 8) < old_b)
{
- int i = 0;
+ int j = 0;
vp8_write_bit(w, 1);
do
{
- const vp8_prob p = Pnew[i];
+ const vp8_prob p = Pnew[j];
- vp8_write_literal(w, Pcur[i] = p ? p : 1, 8);
+ vp8_write_literal(w, Pcur[j] = p ? p : 1, 8);
}
- while (++i < n);
+ while (++j < n);
}
else
vp8_write_bit(w, 0);
@@ -245,15 +245,15 @@ void vp8_pack_tokens_c(vp8_writer *w, const TOKENEXTRA *p, int xcount)
if (L)
{
- const unsigned char *pp = b->prob;
- int v = e >> 1;
- int n = L; /* number of bits in v, assumed nonzero */
- int i = 0;
+ const unsigned char *proba = b->prob;
+ const int v2 = e >> 1;
+ int n2 = L; /* number of bits in v2, assumed nonzero */
+ i = 0;
do
{
- const int bb = (v >> --n) & 1;
- split = 1 + (((range - 1) * pp[i>>1]) >> 8);
+ const int bb = (v2 >> --n2) & 1;
+ split = 1 + (((range - 1) * proba[i>>1]) >> 8);
i = b->tree[i+bb];
if (bb)
@@ -301,7 +301,7 @@ void vp8_pack_tokens_c(vp8_writer *w, const TOKENEXTRA *p, int xcount)
lowvalue <<= shift;
}
- while (n);
+ while (n2);
}
@@ -493,7 +493,7 @@ static void write_mb_features(vp8_writer *w, const MB_MODE_INFO *mi, const MACRO
}
void vp8_convert_rfct_to_prob(VP8_COMP *const cpi)
{
- const int *const rfct = cpi->count_mb_ref_frame_usage;
+ const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
const int rf_intra = rfct[INTRA_FRAME];
const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
@@ -531,7 +531,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
vp8_convert_rfct_to_prob(cpi);
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
active_section = 1;
#endif
@@ -580,7 +580,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
xd->mb_to_top_edge = -((mb_row * 16)) << 3;
xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
active_section = 9;
#endif
@@ -593,7 +593,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
if (rf == INTRA_FRAME)
{
vp8_write(w, 0, cpi->prob_intra_coded);
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
active_section = 6;
#endif
write_ymode(w, mode, pc->fc.ymode_prob);
@@ -633,13 +633,13 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
vp8_mv_ref_probs(mv_ref_p, ct);
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
accum_mv_refs(mode, ct);
#endif
}
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
active_section = 3;
#endif
@@ -649,7 +649,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
{
case NEWMV:
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
active_section = 5;
#endif
@@ -692,7 +692,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
if (blockmode == NEW4X4)
{
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
active_section = 11;
#endif
write_mv(w, &blockmv.as_mv, &best_mv, (const MV_CONTEXT *) mvc);
@@ -769,7 +769,7 @@ static void write_kfmodes(VP8_COMP *cpi)
const B_PREDICTION_MODE L = left_block_mode(m, i);
const int bm = m->bmi[i].as_mode;
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
++intra_mode_stats [A] [L] [bm];
#endif
@@ -980,6 +980,12 @@ void vp8_calc_ref_frame_costs(int *ref_frame_cost,
int prob_garf
)
{
+ assert(prob_intra >= 0);
+ assert(prob_intra <= 255);
+ assert(prob_last >= 0);
+ assert(prob_last <= 255);
+ assert(prob_garf >= 0);
+ assert(prob_garf <= 255);
ref_frame_cost[INTRA_FRAME] = vp8_cost_zero(prob_intra);
ref_frame_cost[LAST_FRAME] = vp8_cost_one(prob_intra)
+ vp8_cost_zero(prob_last);
@@ -996,7 +1002,7 @@ int vp8_estimate_entropy_savings(VP8_COMP *cpi)
{
int savings = 0;
- const int *const rfct = cpi->count_mb_ref_frame_usage;
+ const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
const int rf_intra = rfct[INTRA_FRAME];
const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
int new_intra, new_last, new_garf, oldtotal, newtotal;
@@ -1154,7 +1160,7 @@ void vp8_update_coef_probs(VP8_COMP *cpi)
#endif
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
++ tree_update_hist [i][j][k][t] [u];
#endif
@@ -1175,7 +1181,7 @@ void vp8_update_coef_probs(VP8_COMP *cpi)
while (++t < ENTROPY_NODES);
/* Accum token counts for generation of default statistics */
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
t = 0;
do
@@ -1521,7 +1527,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
if (pc->frame_type != KEY_FRAME)
vp8_write_bit(bc, pc->refresh_last_frame);
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
if (pc->frame_type == INTER_FRAME)
active_section = 0;
@@ -1544,7 +1550,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
vp8_update_coef_probs(cpi);
#endif
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
active_section = 2;
#endif
@@ -1555,7 +1561,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
{
write_kfmodes(cpi);
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
active_section = 8;
#endif
}
@@ -1563,7 +1569,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
{
pack_inter_mode_mvs(cpi);
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
active_section = 1;
#endif
}
@@ -1681,7 +1687,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest
#endif
}
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
void print_tree_update_probs()
{
int i, j, k, l;
diff --git a/libvpx/vp8/encoder/block.h b/libvpx/vp8/encoder/block.h
index f9d63eb..cf74c7a 100644
--- a/libvpx/vp8/encoder/block.h
+++ b/libvpx/vp8/encoder/block.h
@@ -18,6 +18,9 @@
#include "vp8/common/entropy.h"
#include "vpx_ports/mem.h"
+#define MAX_MODES 20
+#define MAX_ERROR_BINS 1024
+
/* motion search site */
typedef struct
{
@@ -34,7 +37,7 @@ typedef struct block
/* 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries */
short *quant;
short *quant_fast;
- unsigned char *quant_shift;
+ short *quant_shift;
short *zbin;
short *zrun_zbin_boost;
short *round;
@@ -134,7 +137,19 @@ typedef struct macroblock
int uv_mode_count[VP8_UV_MODES]; /* intra MB type cts this frame */
int64_t prediction_error;
int64_t intra_error;
-
+ int count_mb_ref_frame_usage[MAX_REF_FRAMES];
+
+ int rd_thresh_mult[MAX_MODES];
+ int rd_threshes[MAX_MODES];
+ unsigned int mbs_tested_so_far;
+ unsigned int mode_test_hit_counts[MAX_MODES];
+ int zbin_mode_boost_enabled;
+ int zbin_mode_boost;
+ int last_zbin_mode_boost;
+
+ int last_zbin_over_quant;
+ int zbin_over_quant;
+ int error_bins[MAX_ERROR_BINS];
void (*short_fdct4x4)(short *input, short *output, int pitch);
void (*short_fdct8x4)(short *input, short *output, int pitch);
diff --git a/libvpx/vp8/encoder/boolhuff.c b/libvpx/vp8/encoder/boolhuff.c
index 74770a2..3b0c03a 100644
--- a/libvpx/vp8/encoder/boolhuff.c
+++ b/libvpx/vp8/encoder/boolhuff.c
@@ -16,7 +16,7 @@ unsigned __int64 Sectionbits[500];
#endif
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
unsigned int active_section = 0;
#endif
diff --git a/libvpx/vp8/encoder/boolhuff.h b/libvpx/vp8/encoder/boolhuff.h
index 8309063..39ab586 100644
--- a/libvpx/vp8/encoder/boolhuff.h
+++ b/libvpx/vp8/encoder/boolhuff.h
@@ -67,7 +67,7 @@ static void vp8_encode_bool(BOOL_CODER *br, int bit, int probability)
unsigned int lowvalue = br->lowvalue;
register unsigned int shift;
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
#if defined(SECTIONBITS_OUTPUT)
if (bit)
diff --git a/libvpx/vp8/encoder/denoising.c b/libvpx/vp8/encoder/denoising.c
index c0dd7c1..7819265 100644
--- a/libvpx/vp8/encoder/denoising.c
+++ b/libvpx/vp8/encoder/denoising.c
@@ -13,7 +13,7 @@
#include "vp8/common/reconinter.h"
#include "vpx/vpx_integer.h"
#include "vpx_mem/vpx_mem.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
static const unsigned int NOISE_MOTION_THRESHOLD = 25 * 25;
/* SSE_DIFF_THRESHOLD is selected as ~95% confidence assuming
@@ -140,8 +140,7 @@ int vp8_denoiser_allocate(VP8_DENOISER *denoiser, int width, int height)
int i;
assert(denoiser);
- /* don't need one for intra start at 1 */
- for (i = 1; i < MAX_REF_FRAMES; i++)
+ for (i = 0; i < MAX_REF_FRAMES; i++)
{
denoiser->yv12_running_avg[i].flags = 0;
@@ -175,8 +174,7 @@ void vp8_denoiser_free(VP8_DENOISER *denoiser)
int i;
assert(denoiser);
- /* we don't have one for intra ref frame */
- for (i = 1; i < MAX_REF_FRAMES ; i++)
+ for (i = 0; i < MAX_REF_FRAMES ; i++)
{
vp8_yv12_de_alloc_frame_buffer(&denoiser->yv12_running_avg[i]);
}
@@ -208,8 +206,6 @@ void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser,
MB_MODE_INFO saved_mbmi;
MACROBLOCKD *filter_xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &filter_xd->mode_info_context->mbmi;
- int mv_col;
- int mv_row;
int sse_diff = zero_mv_sse - best_sse;
saved_mbmi = *mbmi;
@@ -291,7 +287,7 @@ void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser,
{
/* Filter. */
decision = vp8_denoiser_filter(&denoiser->yv12_mc_running_avg,
- &denoiser->yv12_running_avg[LAST_FRAME],
+ &denoiser->yv12_running_avg[INTRA_FRAME],
x,
motion_magnitude2,
recon_yoffset, recon_uvoffset);
@@ -303,7 +299,7 @@ void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser,
*/
vp8_copy_mem16x16(
x->thismb, 16,
- denoiser->yv12_running_avg[LAST_FRAME].y_buffer + recon_yoffset,
- denoiser->yv12_running_avg[LAST_FRAME].y_stride);
+ denoiser->yv12_running_avg[INTRA_FRAME].y_buffer + recon_yoffset,
+ denoiser->yv12_running_avg[INTRA_FRAME].y_stride);
}
}
diff --git a/libvpx/vp8/encoder/encodeframe.c b/libvpx/vp8/encoder/encodeframe.c
index 8828dd9..b550f6b 100644
--- a/libvpx/vp8/encoder/encodeframe.c
+++ b/libvpx/vp8/encoder/encodeframe.c
@@ -10,6 +10,7 @@
#include "vpx_config.h"
+#include "vp8_rtcd.h"
#include "encodemb.h"
#include "encodemv.h"
#include "vp8/common/common.h"
@@ -45,7 +46,6 @@ extern void vp8_auto_select_speed(VP8_COMP *cpi);
extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
MACROBLOCK *x,
MB_ROW_COMP *mbr_ei,
- int mb_row,
int count);
static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x );
@@ -530,7 +530,8 @@ void encode_mb_row(VP8_COMP *cpi,
* segmentation map
*/
if ((cpi->current_layer == 0) &&
- (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled))
+ (cpi->cyclic_refresh_mode_enabled &&
+ xd->segmentation_enabled))
{
cpi->segmentation_map[map_index+mb_col] = xd->mode_info_context->mbmi.segment_id;
@@ -642,8 +643,6 @@ static void init_encode_frame_mb_context(VP8_COMP *cpi)
xd->left_context = &cm->left_context;
- vp8_zero(cpi->count_mb_ref_frame_usage)
-
x->mvc = cm->fc.mvc;
vpx_memset(cm->above_context, 0,
@@ -678,6 +677,7 @@ static void init_encode_frame_mb_context(VP8_COMP *cpi)
vp8_zero(x->uv_mode_count)
x->prediction_error = 0;
x->intra_error = 0;
+ vp8_zero(x->count_mb_ref_frame_usage);
}
static void sum_coef_counts(MACROBLOCK *x, MACROBLOCK *x_thread)
@@ -766,7 +766,7 @@ void vp8_encode_frame(VP8_COMP *cpi)
vp8cx_frame_init_quantizer(cpi);
- vp8_initialize_rd_consts(cpi,
+ vp8_initialize_rd_consts(cpi, x,
vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
vp8cx_initialize_me_consts(cpi, cm->base_qindex);
@@ -805,7 +805,8 @@ void vp8_encode_frame(VP8_COMP *cpi)
{
int i;
- vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei, 1, cpi->encoding_thread_count);
+ vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei,
+ cpi->encoding_thread_count);
for (i = 0; i < cm->mb_rows; i++)
cpi->mt_current_mb_col[i] = -1;
@@ -852,11 +853,10 @@ void vp8_encode_frame(VP8_COMP *cpi)
if (xd->segmentation_enabled)
{
- int i, j;
+ int j;
if (xd->segmentation_enabled)
{
-
for (i = 0; i < cpi->encoding_thread_count; i++)
{
for (j = 0; j < 4; j++)
@@ -868,7 +868,7 @@ void vp8_encode_frame(VP8_COMP *cpi)
for (i = 0; i < cpi->encoding_thread_count; i++)
{
int mode_count;
- int mv_vals;
+ int c_idx;
totalrate += cpi->mb_row_ei[i].totalrate;
cpi->mb.skip_true_count += cpi->mb_row_ei[i].mb.skip_true_count;
@@ -881,18 +881,26 @@ void vp8_encode_frame(VP8_COMP *cpi)
cpi->mb.uv_mode_count[mode_count] +=
cpi->mb_row_ei[i].mb.uv_mode_count[mode_count];
- for(mv_vals = 0; mv_vals < MVvals; mv_vals++)
+ for(c_idx = 0; c_idx < MVvals; c_idx++)
{
- cpi->mb.MVcount[0][mv_vals] +=
- cpi->mb_row_ei[i].mb.MVcount[0][mv_vals];
- cpi->mb.MVcount[1][mv_vals] +=
- cpi->mb_row_ei[i].mb.MVcount[1][mv_vals];
+ cpi->mb.MVcount[0][c_idx] +=
+ cpi->mb_row_ei[i].mb.MVcount[0][c_idx];
+ cpi->mb.MVcount[1][c_idx] +=
+ cpi->mb_row_ei[i].mb.MVcount[1][c_idx];
}
cpi->mb.prediction_error +=
cpi->mb_row_ei[i].mb.prediction_error;
cpi->mb.intra_error += cpi->mb_row_ei[i].mb.intra_error;
+ for(c_idx = 0; c_idx < MAX_REF_FRAMES; c_idx++)
+ cpi->mb.count_mb_ref_frame_usage[c_idx] +=
+ cpi->mb_row_ei[i].mb.count_mb_ref_frame_usage[c_idx];
+
+ for(c_idx = 0; c_idx < MAX_ERROR_BINS; c_idx++)
+ cpi->mb.error_bins[c_idx] +=
+ cpi->mb_row_ei[i].mb.error_bins[c_idx];
+
/* add up counts for each thread */
sum_coef_counts(x, &cpi->mb_row_ei[i].mb);
}
@@ -987,13 +995,14 @@ void vp8_encode_frame(VP8_COMP *cpi)
{
int tot_modes;
- tot_modes = cpi->count_mb_ref_frame_usage[INTRA_FRAME]
- + cpi->count_mb_ref_frame_usage[LAST_FRAME]
- + cpi->count_mb_ref_frame_usage[GOLDEN_FRAME]
- + cpi->count_mb_ref_frame_usage[ALTREF_FRAME];
+ tot_modes = cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME]
+ + cpi->mb.count_mb_ref_frame_usage[LAST_FRAME]
+ + cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME]
+ + cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
if (tot_modes)
- cpi->this_frame_percent_intra = cpi->count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes;
+ cpi->this_frame_percent_intra =
+ cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes;
}
@@ -1224,17 +1233,17 @@ int vp8cx_encode_inter_macroblock
if (cpi->sf.RD)
{
- int zbin_mode_boost_enabled = cpi->zbin_mode_boost_enabled;
+ int zbin_mode_boost_enabled = x->zbin_mode_boost_enabled;
/* Are we using the fast quantizer for the mode selection? */
if(cpi->sf.use_fastquant_for_pick)
{
- cpi->mb.quantize_b = vp8_fast_quantize_b;
- cpi->mb.quantize_b_pair = vp8_fast_quantize_b_pair;
+ x->quantize_b = vp8_fast_quantize_b;
+ x->quantize_b_pair = vp8_fast_quantize_b_pair;
/* the fast quantizer does not use zbin_extra, so
* do not recalculate */
- cpi->zbin_mode_boost_enabled = 0;
+ x->zbin_mode_boost_enabled = 0;
}
vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
&distortion, &intra_error);
@@ -1242,12 +1251,12 @@ int vp8cx_encode_inter_macroblock
/* switch back to the regular quantizer for the encode */
if (cpi->sf.improved_quant)
{
- cpi->mb.quantize_b = vp8_regular_quantize_b;
- cpi->mb.quantize_b_pair = vp8_regular_quantize_b_pair;
+ x->quantize_b = vp8_regular_quantize_b;
+ x->quantize_b_pair = vp8_regular_quantize_b_pair;
}
/* restore cpi->zbin_mode_boost_enabled */
- cpi->zbin_mode_boost_enabled = zbin_mode_boost_enabled;
+ x->zbin_mode_boost_enabled = zbin_mode_boost_enabled;
}
else
@@ -1290,25 +1299,27 @@ int vp8cx_encode_inter_macroblock
}
{
- /* Experimental code. Special case for gf and arf zeromv modes.
- * Increase zbin size to supress noise
+ /* Experimental code.
+ * Special case for gf and arf zeromv modes, for 1 temporal layer.
+ * Increase zbin size to supress noise.
*/
- cpi->zbin_mode_boost = 0;
- if (cpi->zbin_mode_boost_enabled)
+ x->zbin_mode_boost = 0;
+ if (x->zbin_mode_boost_enabled)
{
if ( xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME )
{
if (xd->mode_info_context->mbmi.mode == ZEROMV)
{
- if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME)
- cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
+ if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME &&
+ cpi->oxcf.number_of_layers == 1)
+ x->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
- cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
+ x->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
}
else if (xd->mode_info_context->mbmi.mode == SPLITMV)
- cpi->zbin_mode_boost = 0;
+ x->zbin_mode_boost = 0;
else
- cpi->zbin_mode_boost = MV_ZBIN_BOOST;
+ x->zbin_mode_boost = MV_ZBIN_BOOST;
}
}
@@ -1318,7 +1329,7 @@ int vp8cx_encode_inter_macroblock
vp8_update_zbin_extra(cpi, x);
}
- cpi->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame] ++;
+ x->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame] ++;
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
{
diff --git a/libvpx/vp8/encoder/encodeintra.c b/libvpx/vp8/encoder/encodeintra.c
index 340dd63..cfa4cb9 100644
--- a/libvpx/vp8/encoder/encodeintra.c
+++ b/libvpx/vp8/encoder/encodeintra.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "quantize.h"
#include "vp8/common/reconintra4x4.h"
#include "encodemb.h"
diff --git a/libvpx/vp8/encoder/encodemb.c b/libvpx/vp8/encoder/encodemb.c
index 7d494f2..7ed2fe1 100644
--- a/libvpx/vp8/encoder/encodemb.c
+++ b/libvpx/vp8/encoder/encodemb.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "encodemb.h"
#include "vp8/common/reconinter.h"
#include "quantize.h"
diff --git a/libvpx/vp8/encoder/encodemv.c b/libvpx/vp8/encoder/encodemv.c
index 0c43d06..2a74ff4 100644
--- a/libvpx/vp8/encoder/encodemv.c
+++ b/libvpx/vp8/encoder/encodemv.c
@@ -16,7 +16,7 @@
#include <math.h>
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
extern unsigned int active_section;
#endif
@@ -359,7 +359,7 @@ void vp8_write_mvprobs(VP8_COMP *cpi)
vp8_writer *const w = cpi->bc;
MV_CONTEXT *mvc = cpi->common.fc.mvc;
int flags[2] = {0, 0};
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
active_section = 4;
#endif
write_component_probs(
@@ -374,7 +374,7 @@ void vp8_write_mvprobs(VP8_COMP *cpi)
if (flags[0] || flags[1])
vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cpi->common.fc.mvc, flags);
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
active_section = 5;
#endif
}
diff --git a/libvpx/vp8/encoder/ethreading.c b/libvpx/vp8/encoder/ethreading.c
index 39340f2..d4b17ce 100644
--- a/libvpx/vp8/encoder/ethreading.c
+++ b/libvpx/vp8/encoder/ethreading.c
@@ -214,7 +214,9 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data)
* vp8cx_encode_inter_macroblock()) back into the
* global segmentation map
*/
- if (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled)
+ if ((cpi->current_layer == 0) &&
+ (cpi->cyclic_refresh_mode_enabled &&
+ xd->segmentation_enabled))
{
const MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
cpi->segmentation_map[map_index + mb_col] = mbmi->segment_id;
@@ -416,13 +418,23 @@ static void setup_mbby_copy(MACROBLOCK *mbdst, MACROBLOCK *mbsrc)
zd->block[i].dequant = zd->dequant_uv;
zd->block[24].dequant = zd->dequant_y2;
#endif
+
+
+ vpx_memcpy(z->rd_threshes, x->rd_threshes, sizeof(x->rd_threshes));
+ vpx_memcpy(z->rd_thresh_mult, x->rd_thresh_mult,
+ sizeof(x->rd_thresh_mult));
+
+ z->zbin_over_quant = x->zbin_over_quant;
+ z->zbin_mode_boost_enabled = x->zbin_mode_boost_enabled;
+ z->zbin_mode_boost = x->zbin_mode_boost;
+
+ vpx_memset(z->error_bins, 0, sizeof(z->error_bins));
}
}
void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
MACROBLOCK *x,
MB_ROW_COMP *mbr_ei,
- int mb_row,
int count
)
{
@@ -430,7 +442,6 @@ void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
VP8_COMMON *const cm = & cpi->common;
MACROBLOCKD *const xd = & x->e_mbd;
int i;
- (void) mb_row;
for (i = 0; i < count; i++)
{
@@ -478,6 +489,8 @@ void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
vp8_zero(mb->MVcount);
mb->prediction_error = 0;
mb->intra_error = 0;
+ vp8_zero(mb->count_mb_ref_frame_usage);
+ mb->mbs_tested_so_far = 0;
}
}
diff --git a/libvpx/vp8/encoder/firstpass.c b/libvpx/vp8/encoder/firstpass.c
index b668c8f..433726d 100644
--- a/libvpx/vp8/encoder/firstpass.c
+++ b/libvpx/vp8/encoder/firstpass.c
@@ -12,6 +12,7 @@
#include <limits.h>
#include <stdio.h>
+#include "./vpx_scale_rtcd.h"
#include "block.h"
#include "onyx_int.h"
#include "vp8/common/variance.h"
@@ -20,7 +21,7 @@
#include "vp8/common/systemdependent.h"
#include "mcomp.h"
#include "firstpass.h"
-#include "vpx_scale/vpxscale.h"
+#include "vpx_scale/vpx_scale.h"
#include "encodemb.h"
#include "vp8/common/extend.h"
#include "vpx_mem/vpx_mem.h"
@@ -569,7 +570,7 @@ void vp8_first_pass(VP8_COMP *cpi)
/* Initialise the MV cost table to the defaults */
{
int flag[2] = {1, 1};
- vp8_initialize_rd_consts(cpi, vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
+ vp8_initialize_rd_consts(cpi, x, vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
vpx_memcpy(cm->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cm->fc.mvc, flag);
}
@@ -857,7 +858,9 @@ skip_motion_search:
*/
if ((cm->current_video_frame > 0) &&
(cpi->twopass.this_frame_stats.pcnt_inter > 0.20) &&
- ((cpi->twopass.this_frame_stats.intra_error / cpi->twopass.this_frame_stats.coded_error) > 2.0))
+ ((cpi->twopass.this_frame_stats.intra_error /
+ DOUBLE_DIVIDE_CHECK(cpi->twopass.this_frame_stats.coded_error)) >
+ 2.0))
{
vp8_yv12_copy_frame(lst_yv12, gld_yv12);
}
@@ -2115,23 +2118,25 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame)
(cpi->twopass.kf_group_error_left > 0))
{
cpi->twopass.gf_group_bits =
- (int)((double)cpi->twopass.kf_group_bits *
- (gf_group_err / (double)cpi->twopass.kf_group_error_left));
+ (int64_t)(cpi->twopass.kf_group_bits *
+ (gf_group_err / cpi->twopass.kf_group_error_left));
}
else
cpi->twopass.gf_group_bits = 0;
- cpi->twopass.gf_group_bits = (int)(
+ cpi->twopass.gf_group_bits =
(cpi->twopass.gf_group_bits < 0)
? 0
: (cpi->twopass.gf_group_bits > cpi->twopass.kf_group_bits)
- ? cpi->twopass.kf_group_bits : cpi->twopass.gf_group_bits);
+ ? cpi->twopass.kf_group_bits : cpi->twopass.gf_group_bits;
/* Clip cpi->twopass.gf_group_bits based on user supplied data rate
* variability limit (cpi->oxcf.two_pass_vbrmax_section)
*/
- if (cpi->twopass.gf_group_bits > max_bits * cpi->baseline_gf_interval)
- cpi->twopass.gf_group_bits = max_bits * cpi->baseline_gf_interval;
+ if (cpi->twopass.gf_group_bits >
+ (int64_t)max_bits * cpi->baseline_gf_interval)
+ cpi->twopass.gf_group_bits =
+ (int64_t)max_bits * cpi->baseline_gf_interval;
/* Reset the file position */
reset_fpf_position(cpi, start_pos);
@@ -2445,7 +2450,7 @@ void vp8_second_pass(VP8_COMP *cpi)
*/
if (cpi->oxcf.error_resilient_mode)
{
- cpi->twopass.gf_group_bits = (int)cpi->twopass.kf_group_bits;
+ cpi->twopass.gf_group_bits = cpi->twopass.kf_group_bits;
cpi->twopass.gf_group_error_left =
(int)cpi->twopass.kf_group_error_left;
cpi->baseline_gf_interval = cpi->twopass.frames_to_key;
diff --git a/libvpx/vp8/encoder/mcomp.c b/libvpx/vp8/encoder/mcomp.c
index b08c7a5..83c3989 100644
--- a/libvpx/vp8/encoder/mcomp.c
+++ b/libvpx/vp8/encoder/mcomp.c
@@ -18,7 +18,7 @@
#include <math.h>
#include "vp8/common/findnearmv.h"
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
static int mv_ref_ct [31] [4] [2];
static int mv_mode_cts [4] [2];
#endif
@@ -233,19 +233,18 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
#if ARCH_X86 || ARCH_X86_64
MACROBLOCKD *xd = &x->e_mbd;
- unsigned char *y0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
+ unsigned char *y_0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
unsigned char *y;
- int buf_r1, buf_r2, buf_c1, buf_c2;
+ int buf_r1, buf_r2, buf_c1;
/* Clamping to avoid out-of-range data access */
buf_r1 = ((bestmv->as_mv.row - 3) < x->mv_row_min)?(bestmv->as_mv.row - x->mv_row_min):3;
buf_r2 = ((bestmv->as_mv.row + 3) > x->mv_row_max)?(x->mv_row_max - bestmv->as_mv.row):3;
buf_c1 = ((bestmv->as_mv.col - 3) < x->mv_col_min)?(bestmv->as_mv.col - x->mv_col_min):3;
- buf_c2 = ((bestmv->as_mv.col + 3) > x->mv_col_max)?(x->mv_col_max - bestmv->as_mv.col):3;
y_stride = 32;
/* Copy to intermediate buffer before searching. */
- vfp->copymem(y0 - buf_c1 - pre_stride*buf_r1, pre_stride, xd->y_buf, y_stride, 16+buf_r1+buf_r2);
+ vfp->copymem(y_0 - buf_c1 - pre_stride*buf_r1, pre_stride, xd->y_buf, y_stride, 16+buf_r1+buf_r2);
y = xd->y_buf + y_stride*buf_r1 +buf_c1;
#else
unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
@@ -376,12 +375,12 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
#if ARCH_X86 || ARCH_X86_64
MACROBLOCKD *xd = &x->e_mbd;
- unsigned char *y0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
+ unsigned char *y_0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
unsigned char *y;
y_stride = 32;
/* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
- vfp->copymem(y0 - 1 - pre_stride, pre_stride, xd->y_buf, y_stride, 18);
+ vfp->copymem(y_0 - 1 - pre_stride, pre_stride, xd->y_buf, y_stride, 18);
y = xd->y_buf + y_stride + 1;
#else
unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
@@ -687,12 +686,12 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
#if ARCH_X86 || ARCH_X86_64
MACROBLOCKD *xd = &x->e_mbd;
- unsigned char *y0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
+ unsigned char *y_0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
unsigned char *y;
y_stride = 32;
/* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
- vfp->copymem(y0 - 1 - pre_stride, pre_stride, xd->y_buf, y_stride, 18);
+ vfp->copymem(y_0 - 1 - pre_stride, pre_stride, xd->y_buf, y_stride, 18);
y = xd->y_buf + y_stride + 1;
#else
unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col;
@@ -1913,7 +1912,7 @@ int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
}
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
void print_mode_context(void)
{
FILE *f = fopen("modecont.c", "w");
@@ -1966,8 +1965,8 @@ void print_mode_context(void)
fclose(f);
}
-/* MV ref count ENTROPY_STATS stats code */
-#ifdef ENTROPY_STATS
+/* MV ref count VP8_ENTROPY_STATS stats code */
+#ifdef VP8_ENTROPY_STATS
void init_mv_ref_counts()
{
vpx_memset(mv_ref_ct, 0, sizeof(mv_ref_ct));
@@ -2021,6 +2020,6 @@ void accum_mv_refs(MB_PREDICTION_MODE m, const int ct[4])
}
}
-#endif/* END MV ref count ENTROPY_STATS stats code */
+#endif/* END MV ref count VP8_ENTROPY_STATS stats code */
#endif
diff --git a/libvpx/vp8/encoder/mcomp.h b/libvpx/vp8/encoder/mcomp.h
index 890113f..e36c515 100644
--- a/libvpx/vp8/encoder/mcomp.h
+++ b/libvpx/vp8/encoder/mcomp.h
@@ -15,7 +15,7 @@
#include "block.h"
#include "vp8/common/variance.h"
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
extern void init_mv_ref_counts();
extern void accum_mv_refs(MB_PREDICTION_MODE, const int near_mv_ref_cts[4]);
#endif
diff --git a/libvpx/vp8/encoder/onyx_if.c b/libvpx/vp8/encoder/onyx_if.c
index c7d81b1..73f6583 100644
--- a/libvpx/vp8/encoder/onyx_if.c
+++ b/libvpx/vp8/encoder/onyx_if.c
@@ -10,6 +10,7 @@
#include "vpx_config.h"
+#include "./vpx_scale_rtcd.h"
#include "vp8/common/onyxc_int.h"
#include "vp8/common/blockd.h"
#include "onyx_int.h"
@@ -19,7 +20,7 @@
#include "mcomp.h"
#include "firstpass.h"
#include "psnr.h"
-#include "vpx_scale/vpxscale.h"
+#include "vpx_scale/vpx_scale.h"
#include "vp8/common/extend.h"
#include "ratectrl.h"
#include "vp8/common/quant_common.h"
@@ -110,7 +111,7 @@ extern int skip_false_count;
#endif
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
extern int intra_mode_stats[10][10][10];
#endif
@@ -238,7 +239,7 @@ static void save_layer_context(VP8_COMP *cpi)
lc->rate_correction_factor = cpi->rate_correction_factor;
lc->key_frame_rate_correction_factor = cpi->key_frame_rate_correction_factor;
lc->gf_rate_correction_factor = cpi->gf_rate_correction_factor;
- lc->zbin_over_quant = cpi->zbin_over_quant;
+ lc->zbin_over_quant = cpi->mb.zbin_over_quant;
lc->inter_frame_target = cpi->inter_frame_target;
lc->total_byte_count = cpi->total_byte_count;
lc->filter_level = cpi->common.filter_level;
@@ -246,8 +247,8 @@ static void save_layer_context(VP8_COMP *cpi)
lc->last_frame_percent_intra = cpi->last_frame_percent_intra;
memcpy (lc->count_mb_ref_frame_usage,
- cpi->count_mb_ref_frame_usage,
- sizeof(cpi->count_mb_ref_frame_usage));
+ cpi->mb.count_mb_ref_frame_usage,
+ sizeof(cpi->mb.count_mb_ref_frame_usage));
}
static void restore_layer_context(VP8_COMP *cpi, const int layer)
@@ -276,16 +277,135 @@ static void restore_layer_context(VP8_COMP *cpi, const int layer)
cpi->rate_correction_factor = lc->rate_correction_factor;
cpi->key_frame_rate_correction_factor = lc->key_frame_rate_correction_factor;
cpi->gf_rate_correction_factor = lc->gf_rate_correction_factor;
- cpi->zbin_over_quant = lc->zbin_over_quant;
+ cpi->mb.zbin_over_quant = lc->zbin_over_quant;
cpi->inter_frame_target = lc->inter_frame_target;
cpi->total_byte_count = lc->total_byte_count;
cpi->common.filter_level = lc->filter_level;
cpi->last_frame_percent_intra = lc->last_frame_percent_intra;
- memcpy (cpi->count_mb_ref_frame_usage,
+ memcpy (cpi->mb.count_mb_ref_frame_usage,
lc->count_mb_ref_frame_usage,
- sizeof(cpi->count_mb_ref_frame_usage));
+ sizeof(cpi->mb.count_mb_ref_frame_usage));
+}
+
+static int rescale(int val, int num, int denom)
+{
+ int64_t llnum = num;
+ int64_t llden = denom;
+ int64_t llval = val;
+
+ return (int)(llval * llnum / llden);
+}
+
+static void init_temporal_layer_context(VP8_COMP *cpi,
+ VP8_CONFIG *oxcf,
+ const int layer,
+ double prev_layer_frame_rate)
+{
+ LAYER_CONTEXT *lc = &cpi->layer_context[layer];
+
+ lc->frame_rate = cpi->output_frame_rate / cpi->oxcf.rate_decimator[layer];
+ lc->target_bandwidth = cpi->oxcf.target_bitrate[layer] * 1000;
+
+ lc->starting_buffer_level_in_ms = oxcf->starting_buffer_level;
+ lc->optimal_buffer_level_in_ms = oxcf->optimal_buffer_level;
+ lc->maximum_buffer_size_in_ms = oxcf->maximum_buffer_size;
+
+ lc->starting_buffer_level =
+ rescale((int)(oxcf->starting_buffer_level),
+ lc->target_bandwidth, 1000);
+
+ if (oxcf->optimal_buffer_level == 0)
+ lc->optimal_buffer_level = lc->target_bandwidth / 8;
+ else
+ lc->optimal_buffer_level =
+ rescale((int)(oxcf->optimal_buffer_level),
+ lc->target_bandwidth, 1000);
+
+ if (oxcf->maximum_buffer_size == 0)
+ lc->maximum_buffer_size = lc->target_bandwidth / 8;
+ else
+ lc->maximum_buffer_size =
+ rescale((int)(oxcf->maximum_buffer_size),
+ lc->target_bandwidth, 1000);
+
+ /* Work out the average size of a frame within this layer */
+ if (layer > 0)
+ lc->avg_frame_size_for_layer =
+ (int)((cpi->oxcf.target_bitrate[layer] -
+ cpi->oxcf.target_bitrate[layer-1]) * 1000 /
+ (lc->frame_rate - prev_layer_frame_rate));
+
+ lc->active_worst_quality = cpi->oxcf.worst_allowed_q;
+ lc->active_best_quality = cpi->oxcf.best_allowed_q;
+ lc->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
+
+ lc->buffer_level = lc->starting_buffer_level;
+ lc->bits_off_target = lc->starting_buffer_level;
+
+ lc->total_actual_bits = 0;
+ lc->ni_av_qi = 0;
+ lc->ni_tot_qi = 0;
+ lc->ni_frames = 0;
+ lc->rate_correction_factor = 1.0;
+ lc->key_frame_rate_correction_factor = 1.0;
+ lc->gf_rate_correction_factor = 1.0;
+ lc->inter_frame_target = 0;
+}
+
+// Upon a run-time change in temporal layers, reset the layer context parameters
+// for any "new" layers. For "existing" layers, let them inherit the parameters
+// from the previous layer state (at the same layer #). In future we may want
+// to better map the previous layer state(s) to the "new" ones.
+static void reset_temporal_layer_change(VP8_COMP *cpi,
+ VP8_CONFIG *oxcf,
+ const int prev_num_layers)
+{
+ int i;
+ double prev_layer_frame_rate = 0;
+ const int curr_num_layers = cpi->oxcf.number_of_layers;
+ // If the previous state was 1 layer, get current layer context from cpi.
+ // We need this to set the layer context for the new layers below.
+ if (prev_num_layers == 1)
+ {
+ cpi->current_layer = 0;
+ save_layer_context(cpi);
+ }
+ for (i = 0; i < curr_num_layers; i++)
+ {
+ LAYER_CONTEXT *lc = &cpi->layer_context[i];
+ if (i >= prev_num_layers)
+ {
+ init_temporal_layer_context(cpi, oxcf, i, prev_layer_frame_rate);
+ }
+ // The initial buffer levels are set based on their starting levels.
+ // We could set the buffer levels based on the previous state (normalized
+ // properly by the layer bandwidths) but we would need to keep track of
+ // the previous set of layer bandwidths (i.e., target_bitrate[i])
+ // before the layer change. For now, reset to the starting levels.
+ lc->buffer_level = cpi->oxcf.starting_buffer_level_in_ms *
+ cpi->oxcf.target_bitrate[i];
+ lc->bits_off_target = lc->buffer_level;
+ // TDOD(marpan): Should we set the rate_correction_factor and
+ // active_worst/best_quality to values derived from the previous layer
+ // state (to smooth-out quality dips/rate fluctuation at transition)?
+
+ // We need to treat the 1 layer case separately: oxcf.target_bitrate[i]
+ // is not set for 1 layer, and the restore_layer_context/save_context()
+ // are not called in the encoding loop, so we need to call it here to
+ // pass the layer context state to |cpi|.
+ if (curr_num_layers == 1)
+ {
+ lc->target_bandwidth = cpi->oxcf.target_bandwidth;
+ lc->buffer_level = cpi->oxcf.starting_buffer_level_in_ms *
+ lc->target_bandwidth / 1000;
+ lc->bits_off_target = lc->buffer_level;
+ restore_layer_context(cpi, 0);
+ }
+ prev_layer_frame_rate = cpi->output_frame_rate /
+ cpi->oxcf.rate_decimator[i];
+ }
}
static void setup_features(VP8_COMP *cpi)
@@ -640,11 +760,9 @@ void vp8_set_speed_features(VP8_COMP *cpi)
for (i = 0; i < MAX_MODES; i ++)
{
cpi->mode_check_freq[i] = 0;
- cpi->mode_test_hit_counts[i] = 0;
- cpi->mode_chosen_counts[i] = 0;
}
- cpi->mbs_tested_so_far = 0;
+ cpi->mb.mbs_tested_so_far = 0;
/* best quality defaults */
sf->RD = 1;
@@ -826,7 +944,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
{
unsigned int sum = 0;
unsigned int total_mbs = cm->MBs;
- int i, thresh;
+ int thresh;
unsigned int total_skip;
int min = 2000;
@@ -838,7 +956,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
for (i = 0; i < min; i++)
{
- sum += cpi->error_bins[i];
+ sum += cpi->mb.error_bins[i];
}
total_skip = sum;
@@ -847,7 +965,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
/* i starts from 2 to make sure thresh started from 2048 */
for (; i < 1024; i++)
{
- sum += cpi->error_bins[i];
+ sum += cpi->mb.error_bins[i];
if (10 * sum >= (unsigned int)(cpi->Speed - 6)*(total_mbs - total_skip))
break;
@@ -902,7 +1020,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
if (Speed >= 15)
sf->half_pixel_search = 0;
- vpx_memset(cpi->error_bins, 0, sizeof(cpi->error_bins));
+ vpx_memset(cpi->mb.error_bins, 0, sizeof(cpi->mb.error_bins));
}; /* switch */
@@ -1077,10 +1195,7 @@ void vp8_alloc_compressor_data(VP8_COMP *cpi)
}
/* Data used for real time vc mode to see if gf needs refreshing */
- cpi->inter_zz_count = 0;
cpi->zeromv_count = 0;
- cpi->gf_bad_count = 0;
- cpi->gf_update_recommended = 0;
/* Structures used to monitor GF usage */
@@ -1204,17 +1319,6 @@ void vp8_new_frame_rate(VP8_COMP *cpi, double framerate)
}
-static int
-rescale(int val, int num, int denom)
-{
- int64_t llnum = num;
- int64_t llden = denom;
- int64_t llval = val;
-
- return (int)(llval * llnum / llden);
-}
-
-
static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
{
VP8_COMMON *cm = &cpi->common;
@@ -1269,59 +1373,9 @@ static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
for (i=0; i<cpi->oxcf.number_of_layers; i++)
{
- LAYER_CONTEXT *lc = &cpi->layer_context[i];
-
- /* Layer configuration */
- lc->frame_rate =
- cpi->output_frame_rate / cpi->oxcf.rate_decimator[i];
- lc->target_bandwidth = cpi->oxcf.target_bitrate[i] * 1000;
-
- lc->starting_buffer_level_in_ms = oxcf->starting_buffer_level;
- lc->optimal_buffer_level_in_ms = oxcf->optimal_buffer_level;
- lc->maximum_buffer_size_in_ms = oxcf->maximum_buffer_size;
-
- lc->starting_buffer_level =
- rescale((int)(oxcf->starting_buffer_level),
- lc->target_bandwidth, 1000);
-
- if (oxcf->optimal_buffer_level == 0)
- lc->optimal_buffer_level = lc->target_bandwidth / 8;
- else
- lc->optimal_buffer_level =
- rescale((int)(oxcf->optimal_buffer_level),
- lc->target_bandwidth, 1000);
-
- if (oxcf->maximum_buffer_size == 0)
- lc->maximum_buffer_size = lc->target_bandwidth / 8;
- else
- lc->maximum_buffer_size =
- rescale((int)oxcf->maximum_buffer_size,
- lc->target_bandwidth, 1000);
-
- /* Work out the average size of a frame within this layer */
- if (i > 0)
- lc->avg_frame_size_for_layer =
- (int)((cpi->oxcf.target_bitrate[i] -
- cpi->oxcf.target_bitrate[i-1]) * 1000 /
- (lc->frame_rate - prev_layer_frame_rate));
-
- lc->active_worst_quality = cpi->oxcf.worst_allowed_q;
- lc->active_best_quality = cpi->oxcf.best_allowed_q;
- lc->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
-
- lc->buffer_level = lc->starting_buffer_level;
- lc->bits_off_target = lc->starting_buffer_level;
-
- lc->total_actual_bits = 0;
- lc->ni_av_qi = 0;
- lc->ni_tot_qi = 0;
- lc->ni_frames = 0;
- lc->rate_correction_factor = 1.0;
- lc->key_frame_rate_correction_factor = 1.0;
- lc->gf_rate_correction_factor = 1.0;
- lc->inter_frame_target = 0;
-
- prev_layer_frame_rate = lc->frame_rate;
+ init_temporal_layer_context(cpi, oxcf, i, prev_layer_frame_rate);
+ prev_layer_frame_rate = cpi->output_frame_rate /
+ cpi->oxcf.rate_decimator[i];
}
}
@@ -1388,7 +1442,7 @@ static void update_layer_contexts (VP8_COMP *cpi)
void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
{
VP8_COMMON *cm = &cpi->common;
- int last_w, last_h;
+ int last_w, last_h, prev_number_of_layers;
if (!cpi)
return;
@@ -1413,6 +1467,7 @@ void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
last_w = cpi->oxcf.Width;
last_h = cpi->oxcf.Height;
+ prev_number_of_layers = cpi->oxcf.number_of_layers;
cpi->oxcf = *oxcf;
@@ -1605,6 +1660,16 @@ void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
cpi->target_bandwidth = cpi->oxcf.target_bandwidth;
+ // Check if the number of temporal layers has changed, and if so reset the
+ // pattern counter and set/initialize the temporal layer context for the
+ // new layer configuration.
+ if (cpi->oxcf.number_of_layers != prev_number_of_layers)
+ {
+ // If the number of temporal layers are changed we must start at the
+ // base of the pattern cycle, so reset temporal_pattern_counter.
+ cpi->temporal_pattern_counter = 0;
+ reset_temporal_layer_change(cpi, oxcf, prev_number_of_layers);
+ }
cm->Width = cpi->oxcf.Width;
cm->Height = cpi->oxcf.Height;
@@ -1742,6 +1807,7 @@ struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf)
memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob, sizeof(vp8cx_base_skip_false_prob));
cpi->common.current_video_frame = 0;
+ cpi->temporal_pattern_counter = 0;
cpi->kf_overspend_bits = 0;
cpi->kf_bitrate_adjustment = 0;
cpi->frames_till_gf_update_due = 0;
@@ -1809,7 +1875,7 @@ struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf)
else
cpi->cyclic_refresh_map = (signed char *) NULL;
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
init_context_counters();
#endif
@@ -1924,10 +1990,10 @@ struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf)
/* Set starting values of RD threshold multipliers (128 = *1) */
for (i = 0; i < MAX_MODES; i++)
{
- cpi->rd_thresh_mult[i] = 128;
+ cpi->mb.rd_thresh_mult[i] = 128;
}
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
init_mv_ref_counts();
#endif
@@ -2002,7 +2068,7 @@ struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf)
cpi->refining_search_sad = vp8_refining_search_sad;
/* make sure frame 1 is okay */
- cpi->error_bins[0] = cpi->common.MBs;
+ cpi->mb.error_bins[0] = cpi->common.MBs;
/* vp8cx_init_quantizer() is first called here. Add check in
* vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only
@@ -2064,7 +2130,7 @@ void vp8_remove_compressor(VP8_COMP **ptr)
#endif
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
print_context_counters();
print_tree_update_probs();
print_mode_context();
@@ -2246,7 +2312,7 @@ void vp8_remove_compressor(VP8_COMP **ptr)
}
#endif
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
{
int i, j, k;
FILE *fmode = fopen("modecontext.c", "w");
@@ -2591,7 +2657,7 @@ static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
Scale2Ratio(cm->horiz_scale, &hr, &hs);
Scale2Ratio(cm->vert_scale, &vr, &vs);
- vp8_scale_frame(sd, &cpi->scaled_source, cm->temp_scale_frame.y_buffer,
+ vpx_scale_frame(sd, &cpi->scaled_source, cm->temp_scale_frame.y_buffer,
tmp_height, hs, hr, vs, vr, 0);
vp8_yv12_extend_frame_borders(&cpi->scaled_source);
@@ -2689,7 +2755,7 @@ static void update_alt_ref_frame_stats(VP8_COMP *cpi)
/* Clear the alternate reference update pending flag. */
cpi->source_alt_ref_pending = 0;
- /* Set the alternate refernce frame active flag */
+ /* Set the alternate reference frame active flag */
cpi->source_alt_ref_active = 1;
@@ -2775,10 +2841,14 @@ static void update_golden_frame_stats(VP8_COMP *cpi)
if (cpi->common.frames_since_golden > 1)
{
- cpi->recent_ref_frame_usage[INTRA_FRAME] += cpi->count_mb_ref_frame_usage[INTRA_FRAME];
- cpi->recent_ref_frame_usage[LAST_FRAME] += cpi->count_mb_ref_frame_usage[LAST_FRAME];
- cpi->recent_ref_frame_usage[GOLDEN_FRAME] += cpi->count_mb_ref_frame_usage[GOLDEN_FRAME];
- cpi->recent_ref_frame_usage[ALTREF_FRAME] += cpi->count_mb_ref_frame_usage[ALTREF_FRAME];
+ cpi->recent_ref_frame_usage[INTRA_FRAME] +=
+ cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME];
+ cpi->recent_ref_frame_usage[LAST_FRAME] +=
+ cpi->mb.count_mb_ref_frame_usage[LAST_FRAME];
+ cpi->recent_ref_frame_usage[GOLDEN_FRAME] +=
+ cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME];
+ cpi->recent_ref_frame_usage[ALTREF_FRAME] +=
+ cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
}
}
}
@@ -2790,7 +2860,7 @@ static void update_rd_ref_frame_probs(VP8_COMP *cpi)
{
VP8_COMMON *cm = &cpi->common;
- const int *const rfct = cpi->count_mb_ref_frame_usage;
+ const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
const int rf_intra = rfct[INTRA_FRAME];
const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
@@ -2815,6 +2885,8 @@ static void update_rd_ref_frame_probs(VP8_COMP *cpi)
if (cpi->common.refresh_alt_ref_frame)
{
cpi->prob_intra_coded += 40;
+ if (cpi->prob_intra_coded > 255)
+ cpi->prob_intra_coded = 255;
cpi->prob_last_coded = 200;
cpi->prob_gf_coded = 1;
}
@@ -3131,6 +3203,57 @@ static void update_reference_frames(VP8_COMP *cpi)
cpi->current_ref_frames[LAST_FRAME] = cm->current_video_frame;
#endif
}
+
+#if CONFIG_TEMPORAL_DENOISING
+ if (cpi->oxcf.noise_sensitivity)
+ {
+ /* we shouldn't have to keep multiple copies as we know in advance which
+ * buffer we should start - for now to get something up and running
+ * I've chosen to copy the buffers
+ */
+ if (cm->frame_type == KEY_FRAME)
+ {
+ int i;
+ vp8_yv12_copy_frame(
+ cpi->Source,
+ &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
+
+ vp8_yv12_extend_frame_borders(
+ &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
+
+ for (i = 2; i < MAX_REF_FRAMES - 1; i++)
+ vp8_yv12_copy_frame(
+ &cpi->denoiser.yv12_running_avg[LAST_FRAME],
+ &cpi->denoiser.yv12_running_avg[i]);
+ }
+ else /* For non key frames */
+ {
+ vp8_yv12_extend_frame_borders(
+ &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
+
+ if (cm->refresh_alt_ref_frame || cm->copy_buffer_to_arf)
+ {
+ vp8_yv12_copy_frame(
+ &cpi->denoiser.yv12_running_avg[INTRA_FRAME],
+ &cpi->denoiser.yv12_running_avg[ALTREF_FRAME]);
+ }
+ if (cm->refresh_golden_frame || cm->copy_buffer_to_gf)
+ {
+ vp8_yv12_copy_frame(
+ &cpi->denoiser.yv12_running_avg[INTRA_FRAME],
+ &cpi->denoiser.yv12_running_avg[GOLDEN_FRAME]);
+ }
+ if(cm->refresh_last_frame)
+ {
+ vp8_yv12_copy_frame(
+ &cpi->denoiser.yv12_running_avg[INTRA_FRAME],
+ &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
+ }
+ }
+
+ }
+#endif
+
}
void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm)
@@ -3174,51 +3297,6 @@ void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm)
}
vp8_yv12_extend_frame_borders(cm->frame_to_show);
-#if CONFIG_TEMPORAL_DENOISING
- if (cpi->oxcf.noise_sensitivity)
- {
-
-
- /* we shouldn't have to keep multiple copies as we know in advance which
- * buffer we should start - for now to get something up and running
- * I've chosen to copy the buffers
- */
- if (cm->frame_type == KEY_FRAME)
- {
- int i;
- vp8_yv12_copy_frame(
- cpi->Source,
- &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
-
- vp8_yv12_extend_frame_borders(
- &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
-
- for (i = 2; i < MAX_REF_FRAMES - 1; i++)
- vp8_yv12_copy_frame(
- cpi->Source,
- &cpi->denoiser.yv12_running_avg[i]);
- }
- else /* For non key frames */
- {
- vp8_yv12_extend_frame_borders(
- &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
-
- if (cm->refresh_alt_ref_frame || cm->copy_buffer_to_arf)
- {
- vp8_yv12_copy_frame(
- &cpi->denoiser.yv12_running_avg[LAST_FRAME],
- &cpi->denoiser.yv12_running_avg[ALTREF_FRAME]);
- }
- if (cm->refresh_golden_frame || cm->copy_buffer_to_gf)
- {
- vp8_yv12_copy_frame(
- &cpi->denoiser.yv12_running_avg[LAST_FRAME],
- &cpi->denoiser.yv12_running_avg[GOLDEN_FRAME]);
- }
- }
-
- }
-#endif
}
@@ -3302,19 +3380,19 @@ static void encode_frame_to_data_rate
cm->copy_buffer_to_arf = 0;
/* Clear zbin over-quant value and mode boost values. */
- cpi->zbin_over_quant = 0;
- cpi->zbin_mode_boost = 0;
+ cpi->mb.zbin_over_quant = 0;
+ cpi->mb.zbin_mode_boost = 0;
/* Enable or disable mode based tweaking of the zbin
* For 2 Pass Only used where GF/ARF prediction quality
* is above a threshold
*/
- cpi->zbin_mode_boost_enabled = 1;
+ cpi->mb.zbin_mode_boost_enabled = 1;
if (cpi->pass == 2)
{
if ( cpi->gfu_boost <= 400 )
{
- cpi->zbin_mode_boost_enabled = 0;
+ cpi->mb.zbin_mode_boost_enabled = 0;
}
}
@@ -3324,7 +3402,7 @@ static void encode_frame_to_data_rate
else
cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 0;
- /* Check to see if a key frame is signalled
+ /* Check to see if a key frame is signaled
* For two pass with auto key frame enabled cm->frame_type may already
* be set, but not for one pass.
*/
@@ -3381,7 +3459,7 @@ static void encode_frame_to_data_rate
/* Reset the RD threshold multipliers to default of * 1 (128) */
for (i = 0; i < MAX_MODES; i++)
{
- cpi->rd_thresh_mult[i] = 128;
+ cpi->mb.rd_thresh_mult[i] = 128;
}
}
@@ -3459,7 +3537,7 @@ static void encode_frame_to_data_rate
/* Note that we should not throw out a key frame (especially when
* spatial resampling is enabled).
*/
- if ((cm->frame_type == KEY_FRAME))
+ if (cm->frame_type == KEY_FRAME)
{
cpi->decimation_count = cpi->decimation_factor;
}
@@ -3477,6 +3555,8 @@ static void encode_frame_to_data_rate
cm->current_video_frame++;
cpi->frames_since_key++;
+ // We advance the temporal pattern for dropped frames.
+ cpi->temporal_pattern_counter++;
#if CONFIG_INTERNAL_STATS
cpi->count ++;
@@ -3518,6 +3598,8 @@ static void encode_frame_to_data_rate
#endif
cm->current_video_frame++;
cpi->frames_since_key++;
+ // We advance the temporal pattern for dropped frames.
+ cpi->temporal_pattern_counter++;
return;
}
@@ -4070,8 +4152,9 @@ static void encode_frame_to_data_rate
q_low = (Q < q_high) ? (Q + 1) : q_high;
/* If we are using over quant do the same for zbin_oq_low */
- if (cpi->zbin_over_quant > 0)
- zbin_oq_low = (cpi->zbin_over_quant < zbin_oq_high) ? (cpi->zbin_over_quant + 1) : zbin_oq_high;
+ if (cpi->mb.zbin_over_quant > 0)
+ zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high) ?
+ (cpi->mb.zbin_over_quant + 1) : zbin_oq_high;
if (undershoot_seen)
{
@@ -4087,11 +4170,13 @@ static void encode_frame_to_data_rate
* is max)
*/
if (Q < MAXQ)
- cpi->zbin_over_quant = 0;
+ cpi->mb.zbin_over_quant = 0;
else
{
- zbin_oq_low = (cpi->zbin_over_quant < zbin_oq_high) ? (cpi->zbin_over_quant + 1) : zbin_oq_high;
- cpi->zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
+ zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high) ?
+ (cpi->mb.zbin_over_quant + 1) : zbin_oq_high;
+ cpi->mb.zbin_over_quant =
+ (zbin_oq_high + zbin_oq_low) / 2;
}
}
else
@@ -4104,7 +4189,9 @@ static void encode_frame_to_data_rate
Q = vp8_regulate_q(cpi, cpi->this_frame_target);
- while (((Q < q_low) || (cpi->zbin_over_quant < zbin_oq_low)) && (Retries < 10))
+ while (((Q < q_low) ||
+ (cpi->mb.zbin_over_quant < zbin_oq_low)) &&
+ (Retries < 10))
{
vp8_update_rate_correction_factors(cpi, 0);
Q = vp8_regulate_q(cpi, cpi->this_frame_target);
@@ -4117,12 +4204,13 @@ static void encode_frame_to_data_rate
/* Frame is too small */
else
{
- if (cpi->zbin_over_quant == 0)
+ if (cpi->mb.zbin_over_quant == 0)
/* Lower q_high if not using over quant */
q_high = (Q > q_low) ? (Q - 1) : q_low;
else
/* else lower zbin_oq_high */
- zbin_oq_high = (cpi->zbin_over_quant > zbin_oq_low) ? (cpi->zbin_over_quant - 1) : zbin_oq_low;
+ zbin_oq_high = (cpi->mb.zbin_over_quant > zbin_oq_low) ?
+ (cpi->mb.zbin_over_quant - 1) : zbin_oq_low;
if (overshoot_seen)
{
@@ -4138,9 +4226,10 @@ static void encode_frame_to_data_rate
* is max)
*/
if (Q < MAXQ)
- cpi->zbin_over_quant = 0;
+ cpi->mb.zbin_over_quant = 0;
else
- cpi->zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
+ cpi->mb.zbin_over_quant =
+ (zbin_oq_high + zbin_oq_low) / 2;
}
else
{
@@ -4163,7 +4252,9 @@ static void encode_frame_to_data_rate
q_low = Q;
}
- while (((Q > q_high) || (cpi->zbin_over_quant > zbin_oq_high)) && (Retries < 10))
+ while (((Q > q_high) ||
+ (cpi->mb.zbin_over_quant > zbin_oq_high)) &&
+ (Retries < 10))
{
vp8_update_rate_correction_factors(cpi, 0);
Q = vp8_regulate_q(cpi, cpi->this_frame_target);
@@ -4181,7 +4272,9 @@ static void encode_frame_to_data_rate
Q = q_low;
/* Clamp cpi->zbin_over_quant */
- cpi->zbin_over_quant = (cpi->zbin_over_quant < zbin_oq_low) ? zbin_oq_low : (cpi->zbin_over_quant > zbin_oq_high) ? zbin_oq_high : cpi->zbin_over_quant;
+ cpi->mb.zbin_over_quant = (cpi->mb.zbin_over_quant < zbin_oq_low) ?
+ zbin_oq_low : (cpi->mb.zbin_over_quant > zbin_oq_high) ?
+ zbin_oq_high : cpi->mb.zbin_over_quant;
Loop = Q != last_q;
}
@@ -4263,7 +4356,6 @@ static void encode_frame_to_data_rate
/* Point to beginning of MODE_INFO arrays. */
MODE_INFO *tmp = cm->mi;
- cpi->inter_zz_count = 0;
cpi->zeromv_count = 0;
if(cm->frame_type != KEY_FRAME)
@@ -4272,8 +4364,6 @@ static void encode_frame_to_data_rate
{
for (mb_col = 0; mb_col < cm->mb_cols; mb_col ++)
{
- if(tmp->mbmi.mode == ZEROMV && tmp->mbmi.ref_frame == LAST_FRAME)
- cpi->inter_zz_count++;
if(tmp->mbmi.mode == ZEROMV)
cpi->zeromv_count++;
tmp++;
@@ -4583,9 +4673,6 @@ static void encode_frame_to_data_rate
cm->frame_type, cm->refresh_golden_frame,
cm->refresh_alt_ref_frame);
- for (i = 0; i < MAX_MODES; i++)
- fprintf(fmodes, "%5d ", cpi->mode_chosen_counts[i]);
-
fprintf(fmodes, "\n");
fclose(fmodes);
@@ -4680,6 +4767,7 @@ static void encode_frame_to_data_rate
{
cm->current_video_frame++;
cpi->frames_since_key++;
+ cpi->temporal_pattern_counter++;
}
/* reset to normal state now that we are done. */
@@ -4703,67 +4791,6 @@ static void encode_frame_to_data_rate
}
-
-
-static void check_gf_quality(VP8_COMP *cpi)
-{
- VP8_COMMON *cm = &cpi->common;
- int gf_active_pct = (100 * cpi->gf_active_count) / (cm->mb_rows * cm->mb_cols);
- int gf_ref_usage_pct = (cpi->count_mb_ref_frame_usage[GOLDEN_FRAME] * 100) / (cm->mb_rows * cm->mb_cols);
- int last_ref_zz_useage = (cpi->inter_zz_count * 100) / (cm->mb_rows * cm->mb_cols);
-
- /* Gf refresh is not currently being signalled */
- if (cpi->gf_update_recommended == 0)
- {
- if (cpi->common.frames_since_golden > 7)
- {
- /* Low use of gf */
- if ((gf_active_pct < 10) || ((gf_active_pct + gf_ref_usage_pct) < 15))
- {
- /* ...but last frame zero zero usage is reasonbable so a
- * new gf might be appropriate
- */
- if (last_ref_zz_useage >= 25)
- {
- cpi->gf_bad_count ++;
-
- /* Check that the condition is stable */
- if (cpi->gf_bad_count >= 8)
- {
- cpi->gf_update_recommended = 1;
- cpi->gf_bad_count = 0;
- }
- }
- else
- /* Restart count as the background is not stable enough */
- cpi->gf_bad_count = 0;
- }
- else
- /* Gf useage has picked up so reset count */
- cpi->gf_bad_count = 0;
- }
- }
- /* If the signal is set but has not been read should we cancel it. */
- else if (last_ref_zz_useage < 15)
- {
- cpi->gf_update_recommended = 0;
- cpi->gf_bad_count = 0;
- }
-
-#if 0
- {
- FILE *f = fopen("gfneeded.stt", "a");
- fprintf(f, "%10d %10d %10d %10d %10ld \n",
- cm->current_video_frame,
- cpi->common.frames_since_golden,
- gf_active_pct, gf_ref_usage_pct,
- cpi->gf_update_recommended);
- fclose(f);
- }
-
-#endif
-}
-
#if !(CONFIG_REALTIME_ONLY)
static void Pass2Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest, unsigned char * dest_end, unsigned int *frame_flags)
{
@@ -5060,15 +5087,13 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l
/* Restore layer specific context & set frame rate */
layer = cpi->oxcf.layer_id[
- cm->current_video_frame % cpi->oxcf.periodicity];
+ cpi->temporal_pattern_counter % cpi->oxcf.periodicity];
restore_layer_context (cpi, layer);
vp8_new_frame_rate (cpi, cpi->layer_context[layer].frame_rate);
}
if (cpi->compressor_speed == 2)
{
- if (cpi->oxcf.number_of_layers == 1)
- check_gf_quality(cpi);
vpx_usec_timer_start(&tsctimer);
vpx_usec_timer_start(&ticktimer);
}
@@ -5229,7 +5254,7 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l
if (cm->show_frame)
{
-
+ cpi->common.show_frame_mi = cpi->common.mi;
cpi->count ++;
if (cpi->b_calculate_psnr)
@@ -5410,6 +5435,7 @@ int vp8_get_preview_raw_frame(VP8_COMP *cpi, YV12_BUFFER_CONFIG *dest, vp8_ppfla
#endif
#if CONFIG_POSTPROC
+ cpi->common.show_frame_mi = cpi->common.mi;
ret = vp8_post_proc_frame(&cpi->common, dest, flags);
#else
diff --git a/libvpx/vp8/encoder/onyx_int.h b/libvpx/vp8/encoder/onyx_int.h
index ed9c762..5120fcc 100644
--- a/libvpx/vp8/encoder/onyx_int.h
+++ b/libvpx/vp8/encoder/onyx_int.h
@@ -43,7 +43,7 @@
#define AF_THRESH 25
#define AF_THRESH2 100
#define ARF_DECAY_THRESH 12
-#define MAX_MODES 20
+
#define MIN_THRESHMULT 32
#define MAX_THRESHMULT 512
@@ -282,17 +282,17 @@ typedef struct VP8_COMP
{
DECLARE_ALIGNED(16, short, Y1quant[QINDEX_RANGE][16]);
- DECLARE_ALIGNED(16, unsigned char, Y1quant_shift[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, Y1quant_shift[QINDEX_RANGE][16]);
DECLARE_ALIGNED(16, short, Y1zbin[QINDEX_RANGE][16]);
DECLARE_ALIGNED(16, short, Y1round[QINDEX_RANGE][16]);
DECLARE_ALIGNED(16, short, Y2quant[QINDEX_RANGE][16]);
- DECLARE_ALIGNED(16, unsigned char, Y2quant_shift[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, Y2quant_shift[QINDEX_RANGE][16]);
DECLARE_ALIGNED(16, short, Y2zbin[QINDEX_RANGE][16]);
DECLARE_ALIGNED(16, short, Y2round[QINDEX_RANGE][16]);
DECLARE_ALIGNED(16, short, UVquant[QINDEX_RANGE][16]);
- DECLARE_ALIGNED(16, unsigned char, UVquant_shift[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, UVquant_shift[QINDEX_RANGE][16]);
DECLARE_ALIGNED(16, short, UVzbin[QINDEX_RANGE][16]);
DECLARE_ALIGNED(16, short, UVround[QINDEX_RANGE][16]);
@@ -349,13 +349,8 @@ typedef struct VP8_COMP
int ambient_err;
unsigned int mode_check_freq[MAX_MODES];
- unsigned int mode_test_hit_counts[MAX_MODES];
- unsigned int mode_chosen_counts[MAX_MODES];
- unsigned int mbs_tested_so_far;
- int rd_thresh_mult[MAX_MODES];
int rd_baseline_thresh[MAX_MODES];
- int rd_threshes[MAX_MODES];
int RDMULT;
int RDDIV ;
@@ -416,12 +411,6 @@ typedef struct VP8_COMP
int ni_frames;
int avg_frame_qindex;
- int zbin_over_quant;
- int zbin_mode_boost;
- int zbin_mode_boost_enabled;
- int last_zbin_over_quant;
- int last_zbin_mode_boost;
-
int64_t total_byte_count;
int buffered_mode;
@@ -477,7 +466,6 @@ typedef struct VP8_COMP
int Speed;
int compressor_speed;
- int interquantizer;
int auto_gold;
int auto_adjust_gold_quantizer;
int auto_worst_q;
@@ -493,24 +481,16 @@ typedef struct VP8_COMP
int last_skip_probs_q[3];
int recent_ref_frame_usage[MAX_REF_FRAMES];
- int count_mb_ref_frame_usage[MAX_REF_FRAMES];
int this_frame_percent_intra;
int last_frame_percent_intra;
int ref_frame_flags;
SPEED_FEATURES sf;
- int error_bins[1024];
- /* Data used for real time conferencing mode to help determine if it
- * would be good to update the gf
- */
- int inter_zz_count;
/* Count ZEROMV on all reference frames. */
int zeromv_count;
int lf_zeromv_pct;
- int gf_bad_count;
- int gf_update_recommended;
unsigned char *segmentation_map;
signed char segment_feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
@@ -529,6 +509,10 @@ typedef struct VP8_COMP
int cyclic_refresh_q;
signed char *cyclic_refresh_map;
+ // Frame counter for the temporal pattern. Counter is rest when the temporal
+ // layers are changed dynamically (run-time change).
+ unsigned int temporal_pattern_counter;
+
#if CONFIG_MULTITHREAD
/* multithread data */
int * mt_current_mb_col;
@@ -606,7 +590,7 @@ typedef struct VP8_COMP
/* Error score of frames still to be coded in kf group */
int64_t kf_group_error_left;
/* Projected Bits available for a group including 1 GF or ARF */
- int gf_group_bits;
+ int64_t gf_group_bits;
/* Bits for the golden frame or ARF */
int gf_bits;
int alt_extra_bits;
@@ -712,11 +696,8 @@ typedef struct VP8_COMP
} rd_costs;
} VP8_COMP;
-void control_data_rate(VP8_COMP *cpi);
-
-void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char *dest_end, unsigned long *size);
-
-int rd_cost_intra_mb(MACROBLOCKD *x);
+void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest,
+ unsigned char *dest_end, unsigned long *size);
void vp8_tokenize_mb(VP8_COMP *, MACROBLOCK *, TOKENEXTRA **);
diff --git a/libvpx/vp8/encoder/pickinter.c b/libvpx/vp8/encoder/pickinter.c
index 3f09a9f..c5279fe 100644
--- a/libvpx/vp8/encoder/pickinter.c
+++ b/libvpx/vp8/encoder/pickinter.c
@@ -389,7 +389,7 @@ static void pick_intra_mbuv_mode(MACROBLOCK *mb)
}
-static void update_mvcount(VP8_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv)
+static void update_mvcount(MACROBLOCK *x, int_mv *best_ref_mv)
{
MACROBLOCKD *xd = &x->e_mbd;
/* Split MV modes currently not supported when RD is nopt enabled,
@@ -594,6 +594,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
unsigned int zero_mv_sse = INT_MAX, best_sse = INT_MAX;
#endif
+ int sf_improved_mv_pred = cpi->sf.improved_mv_pred;
int_mv mvp;
int near_sadidx[8] = {0, 1, 2, 3, 4, 5, 6, 7};
@@ -680,7 +681,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
get_predictor_pointers(cpi, plane, recon_yoffset, recon_uvoffset);
/* Count of the number of MBs tested so far this frame */
- cpi->mbs_tested_so_far++;
+ x->mbs_tested_so_far++;
*returnintra = INT_MAX;
x->skip = 0;
@@ -701,7 +702,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
int this_rd = INT_MAX;
int this_ref_frame = ref_frame_map[vp8_ref_frame_order[mode_index]];
- if (best_rd <= cpi->rd_threshes[mode_index])
+ if (best_rd <= x->rd_threshes[mode_index])
continue;
if (this_ref_frame < 0)
@@ -746,22 +747,22 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
/* Check to see if the testing frequency for this mode is at its max
* If so then prevent it from being tested and increase the threshold
* for its testing */
- if (cpi->mode_test_hit_counts[mode_index] &&
+ if (x->mode_test_hit_counts[mode_index] &&
(cpi->mode_check_freq[mode_index] > 1))
{
- if (cpi->mbs_tested_so_far <= (cpi->mode_check_freq[mode_index] *
- cpi->mode_test_hit_counts[mode_index]))
+ if (x->mbs_tested_so_far <= (cpi->mode_check_freq[mode_index] *
+ x->mode_test_hit_counts[mode_index]))
{
/* Increase the threshold for coding this mode to make it less
* likely to be chosen */
- cpi->rd_thresh_mult[mode_index] += 4;
+ x->rd_thresh_mult[mode_index] += 4;
- if (cpi->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
- cpi->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
+ if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
+ x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
- cpi->rd_threshes[mode_index] =
+ x->rd_threshes[mode_index] =
(cpi->rd_baseline_thresh[mode_index] >> 7) *
- cpi->rd_thresh_mult[mode_index];
+ x->rd_thresh_mult[mode_index];
continue;
}
}
@@ -769,7 +770,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
/* We have now reached the point where we are going to test the current
* mode so increment the counter for the number of times it has been
* tested */
- cpi->mode_test_hit_counts[mode_index] ++;
+ x->mode_test_hit_counts[mode_index] ++;
rate2 = 0;
distortion2 = 0;
@@ -882,7 +883,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
last frame motion info is not stored, then we can not
use improved_mv_pred. */
if (cpi->oxcf.mr_encoder_id && !parent_ref_valid)
- cpi->sf.improved_mv_pred = 0;
+ sf_improved_mv_pred = 0;
if (parent_ref_valid && parent_ref_frame)
{
@@ -899,7 +900,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
}else
#endif
{
- if(cpi->sf.improved_mv_pred)
+ if(sf_improved_mv_pred)
{
if(!saddone)
{
@@ -1109,12 +1110,12 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
/* Testing this mode gave rise to an improvement in best error
* score. Lower threshold a bit for next time
*/
- cpi->rd_thresh_mult[mode_index] =
- (cpi->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2)) ?
- cpi->rd_thresh_mult[mode_index] - 2 : MIN_THRESHMULT;
- cpi->rd_threshes[mode_index] =
+ x->rd_thresh_mult[mode_index] =
+ (x->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2)) ?
+ x->rd_thresh_mult[mode_index] - 2 : MIN_THRESHMULT;
+ x->rd_threshes[mode_index] =
(cpi->rd_baseline_thresh[mode_index] >> 7) *
- cpi->rd_thresh_mult[mode_index];
+ x->rd_thresh_mult[mode_index];
}
/* If the mode did not help improve the best error case then raise the
@@ -1122,14 +1123,14 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
*/
else
{
- cpi->rd_thresh_mult[mode_index] += 4;
+ x->rd_thresh_mult[mode_index] += 4;
- if (cpi->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
- cpi->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
+ if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
+ x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
- cpi->rd_threshes[mode_index] =
+ x->rd_threshes[mode_index] =
(cpi->rd_baseline_thresh[mode_index] >> 7) *
- cpi->rd_thresh_mult[mode_index];
+ x->rd_thresh_mult[mode_index];
}
if (x->skip)
@@ -1139,16 +1140,16 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
/* Reduce the activation RD thresholds for the best choice mode */
if ((cpi->rd_baseline_thresh[best_mode_index] > 0) && (cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2)))
{
- int best_adjustment = (cpi->rd_thresh_mult[best_mode_index] >> 3);
+ int best_adjustment = (x->rd_thresh_mult[best_mode_index] >> 3);
- cpi->rd_thresh_mult[best_mode_index] =
- (cpi->rd_thresh_mult[best_mode_index]
+ x->rd_thresh_mult[best_mode_index] =
+ (x->rd_thresh_mult[best_mode_index]
>= (MIN_THRESHMULT + best_adjustment)) ?
- cpi->rd_thresh_mult[best_mode_index] - best_adjustment :
+ x->rd_thresh_mult[best_mode_index] - best_adjustment :
MIN_THRESHMULT;
- cpi->rd_threshes[best_mode_index] =
+ x->rd_threshes[best_mode_index] =
(cpi->rd_baseline_thresh[best_mode_index] >> 7) *
- cpi->rd_thresh_mult[best_mode_index];
+ x->rd_thresh_mult[best_mode_index];
}
@@ -1160,7 +1161,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
this_rdbin = 1023;
}
- cpi->error_bins[this_rdbin] ++;
+ x->error_bins[this_rdbin] ++;
}
#if CONFIG_TEMPORAL_DENOISING
@@ -1241,7 +1242,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
!= cpi->common.ref_frame_sign_bias[xd->mode_info_context->mbmi.ref_frame])
best_ref_mv.as_int = best_ref_mv_sb[!sign_bias].as_int;
- update_mvcount(cpi, x, &best_ref_mv);
+ update_mvcount(x, &best_ref_mv);
}
diff --git a/libvpx/vp8/encoder/picklpf.c b/libvpx/vp8/encoder/picklpf.c
index 4121349..841e1e4 100644
--- a/libvpx/vp8/encoder/picklpf.c
+++ b/libvpx/vp8/encoder/picklpf.c
@@ -9,11 +9,12 @@
*/
+#include "./vpx_scale_rtcd.h"
#include "vp8/common/onyxc_int.h"
#include "onyx_int.h"
#include "quantize.h"
#include "vpx_mem/vpx_mem.h"
-#include "vpx_scale/vpxscale.h"
+#include "vpx_scale/vpx_scale.h"
#include "vp8/common/alloccommon.h"
#include "vp8/common/loopfilter.h"
#if ARCH_ARM
diff --git a/libvpx/vp8/encoder/psnr.c b/libvpx/vp8/encoder/psnr.c
index 5bb49ad..b3a3d95 100644
--- a/libvpx/vp8/encoder/psnr.c
+++ b/libvpx/vp8/encoder/psnr.c
@@ -13,7 +13,7 @@
#include "math.h"
#include "vp8/common/systemdependent.h" /* for vp8_clear_system_state() */
-#define MAX_PSNR 60
+#define MAX_PSNR 100
double vp8_mse2psnr(double Samples, double Peak, double Mse)
{
diff --git a/libvpx/vp8/encoder/quantize.c b/libvpx/vp8/encoder/quantize.c
index 88fea11..fda997f 100644
--- a/libvpx/vp8/encoder/quantize.c
+++ b/libvpx/vp8/encoder/quantize.c
@@ -50,8 +50,8 @@ void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d)
if (x >= zbin)
{
x += round_ptr[rc];
- y = (((x * quant_ptr[rc]) >> 16) + x)
- >> quant_shift_ptr[rc]; /* quantize (x) */
+ y = ((((x * quant_ptr[rc]) >> 16) + x)
+ * quant_shift_ptr[rc]) >> 16; /* quantize (x) */
x = (y ^ sz) - sz; /* get the sign back */
qcoeff_ptr[rc] = x; /* write to destination */
dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
@@ -113,7 +113,7 @@ void vp8_regular_quantize_b_c(BLOCK *b, BLOCKD *d)
short *zbin_ptr = b->zbin;
short *round_ptr = b->round;
short *quant_ptr = b->quant;
- unsigned char *quant_shift_ptr = b->quant_shift;
+ short *quant_shift_ptr = b->quant_shift;
short *qcoeff_ptr = d->qcoeff;
short *dqcoeff_ptr = d->dqcoeff;
short *dequant_ptr = d->dequant;
@@ -138,8 +138,8 @@ void vp8_regular_quantize_b_c(BLOCK *b, BLOCKD *d)
if (x >= zbin)
{
x += round_ptr[rc];
- y = (((x * quant_ptr[rc]) >> 16) + x)
- >> quant_shift_ptr[rc]; /* quantize (x) */
+ y = ((((x * quant_ptr[rc]) >> 16) + x)
+ * quant_shift_ptr[rc]) >> 16; /* quantize (x) */
x = (y ^ sz) - sz; /* get the sign back */
qcoeff_ptr[rc] = x; /* write to destination */
dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */
@@ -167,7 +167,7 @@ void vp8_strict_quantize_b_c(BLOCK *b, BLOCKD *d)
int sz;
short *coeff_ptr;
short *quant_ptr;
- unsigned char *quant_shift_ptr;
+ short *quant_shift_ptr;
short *qcoeff_ptr;
short *dqcoeff_ptr;
short *dequant_ptr;
@@ -184,21 +184,21 @@ void vp8_strict_quantize_b_c(BLOCK *b, BLOCKD *d)
for (i = 0; i < 16; i++)
{
int dq;
- int round;
+ int rounding;
/*TODO: These arrays should be stored in zig-zag order.*/
rc = vp8_default_zig_zag1d[i];
z = coeff_ptr[rc];
dq = dequant_ptr[rc];
- round = dq >> 1;
+ rounding = dq >> 1;
/* Sign of z. */
sz = -(z < 0);
x = (z + sz) ^ sz;
- x += round;
+ x += rounding;
if (x >= dq)
{
/* Quantize x. */
- y = (((x * quant_ptr[rc]) >> 16) + x) >> quant_shift_ptr[rc];
+ y = ((((x * quant_ptr[rc]) >> 16) + x) * quant_shift_ptr[rc]) >> 16;
/* Put the sign back. */
x = (y + sz) ^ sz;
/* Save the coefficient and its dequantized value. */
@@ -406,7 +406,7 @@ static const int qzbin_factors_y2[129] =
#define EXACT_QUANT
#ifdef EXACT_QUANT
static void invert_quant(int improved_quant, short *quant,
- unsigned char *shift, short d)
+ short *shift, short d)
{
if(improved_quant)
{
@@ -418,11 +418,15 @@ static void invert_quant(int improved_quant, short *quant,
t = 1 + (1<<(16+l))/d;
*quant = (short)(t - (1<<16));
*shift = l;
+ /* use multiplication and constant shift by 16 */
+ *shift = 1 << (16 - *shift);
}
else
{
*quant = (1 << 16) / d;
*shift = 0;
+ /* use multiplication and constant shift by 16 */
+ *shift = 1 << (16 - *shift);
}
}
@@ -587,20 +591,20 @@ void vp8cx_init_quantizer(VP8_COMP *cpi)
#define ZBIN_EXTRA_Y \
(( cpi->common.Y1dequant[QIndex][1] * \
- ( cpi->zbin_over_quant + \
- cpi->zbin_mode_boost + \
+ ( x->zbin_over_quant + \
+ x->zbin_mode_boost + \
x->act_zbin_adj ) ) >> 7)
#define ZBIN_EXTRA_UV \
(( cpi->common.UVdequant[QIndex][1] * \
- ( cpi->zbin_over_quant + \
- cpi->zbin_mode_boost + \
+ ( x->zbin_over_quant + \
+ x->zbin_mode_boost + \
x->act_zbin_adj ) ) >> 7)
#define ZBIN_EXTRA_Y2 \
(( cpi->common.Y2dequant[QIndex][1] * \
- ( (cpi->zbin_over_quant / 2) + \
- cpi->zbin_mode_boost + \
+ ( (x->zbin_over_quant / 2) + \
+ x->zbin_mode_boost + \
x->act_zbin_adj ) ) >> 7)
void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip)
@@ -702,15 +706,15 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip)
/* save this macroblock QIndex for vp8_update_zbin_extra() */
x->q_index = QIndex;
- cpi->last_zbin_over_quant = cpi->zbin_over_quant;
- cpi->last_zbin_mode_boost = cpi->zbin_mode_boost;
+ x->last_zbin_over_quant = x->zbin_over_quant;
+ x->last_zbin_mode_boost = x->zbin_mode_boost;
x->last_act_zbin_adj = x->act_zbin_adj;
}
- else if(cpi->last_zbin_over_quant != cpi->zbin_over_quant
- || cpi->last_zbin_mode_boost != cpi->zbin_mode_boost
+ else if(x->last_zbin_over_quant != x->zbin_over_quant
+ || x->last_zbin_mode_boost != x->zbin_mode_boost
|| x->last_act_zbin_adj != x->act_zbin_adj)
{
/* Y */
@@ -729,8 +733,8 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip)
zbin_extra = ZBIN_EXTRA_Y2;
x->block[24].zbin_extra = (short)zbin_extra;
- cpi->last_zbin_over_quant = cpi->zbin_over_quant;
- cpi->last_zbin_mode_boost = cpi->zbin_mode_boost;
+ x->last_zbin_over_quant = x->zbin_over_quant;
+ x->last_zbin_mode_boost = x->zbin_mode_boost;
x->last_act_zbin_adj = x->act_zbin_adj;
}
}
@@ -764,7 +768,7 @@ void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x)
void vp8cx_frame_init_quantizer(VP8_COMP *cpi)
{
/* Clear Zbin mode boost for default case */
- cpi->zbin_mode_boost = 0;
+ cpi->mb.zbin_mode_boost = 0;
/* MB level quantizer setup */
vp8cx_mb_init_quantizer(cpi, &cpi->mb, 0);
diff --git a/libvpx/vp8/encoder/ratectrl.c b/libvpx/vp8/encoder/ratectrl.c
index 77c1c5a..8e3c01d 100644
--- a/libvpx/vp8/encoder/ratectrl.c
+++ b/libvpx/vp8/encoder/ratectrl.c
@@ -614,7 +614,6 @@ static void calc_gf_params(VP8_COMP *cpi)
static void calc_pframe_target_size(VP8_COMP *cpi)
{
int min_frame_target;
- int Adjustment;
int old_per_frame_bandwidth = cpi->per_frame_bandwidth;
if ( cpi->current_layer > 0)
@@ -658,6 +657,7 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
/* 1 pass */
else
{
+ int Adjustment;
/* Make rate adjustment to recover bits spent in key frame
* Test to see if the key frame inter data rate correction
* should still be in force
@@ -688,7 +688,7 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
*/
if ((cpi->gf_overspend_bits > 0) && (cpi->this_frame_target > min_frame_target))
{
- int Adjustment = (cpi->non_gf_bitrate_adjustment <= cpi->gf_overspend_bits) ? cpi->non_gf_bitrate_adjustment : cpi->gf_overspend_bits;
+ Adjustment = (cpi->non_gf_bitrate_adjustment <= cpi->gf_overspend_bits) ? cpi->non_gf_bitrate_adjustment : cpi->gf_overspend_bits;
if (Adjustment > (cpi->this_frame_target - min_frame_target))
Adjustment = (cpi->this_frame_target - min_frame_target);
@@ -1109,7 +1109,9 @@ void vp8_update_rate_correction_factors(VP8_COMP *cpi, int damp_var)
}
else
{
- if (cpi->common.refresh_alt_ref_frame || cpi->common.refresh_golden_frame)
+ if (cpi->oxcf.number_of_layers == 1 &&
+ (cpi->common.refresh_alt_ref_frame ||
+ cpi->common.refresh_golden_frame))
rate_correction_factor = cpi->gf_rate_correction_factor;
else
rate_correction_factor = cpi->rate_correction_factor;
@@ -1122,9 +1124,9 @@ void vp8_update_rate_correction_factors(VP8_COMP *cpi, int damp_var)
projected_size_based_on_q = (int)(((.5 + rate_correction_factor * vp8_bits_per_mb[cpi->common.frame_type][Q]) * cpi->common.MBs) / (1 << BPER_MB_NORMBITS));
/* Make some allowance for cpi->zbin_over_quant */
- if (cpi->zbin_over_quant > 0)
+ if (cpi->mb.zbin_over_quant > 0)
{
- int Z = cpi->zbin_over_quant;
+ int Z = cpi->mb.zbin_over_quant;
double Factor = 0.99;
double factor_adjustment = 0.01 / 256.0;
@@ -1186,7 +1188,9 @@ void vp8_update_rate_correction_factors(VP8_COMP *cpi, int damp_var)
cpi->key_frame_rate_correction_factor = rate_correction_factor;
else
{
- if (cpi->common.refresh_alt_ref_frame || cpi->common.refresh_golden_frame)
+ if (cpi->oxcf.number_of_layers == 1 &&
+ (cpi->common.refresh_alt_ref_frame ||
+ cpi->common.refresh_golden_frame))
cpi->gf_rate_correction_factor = rate_correction_factor;
else
cpi->rate_correction_factor = rate_correction_factor;
@@ -1199,7 +1203,7 @@ int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame)
int Q = cpi->active_worst_quality;
/* Reset Zbin OQ value */
- cpi->zbin_over_quant = 0;
+ cpi->mb.zbin_over_quant = 0;
if (cpi->oxcf.fixed_q >= 0)
{
@@ -1209,11 +1213,13 @@ int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame)
{
Q = cpi->oxcf.key_q;
}
- else if (cpi->common.refresh_alt_ref_frame)
+ else if (cpi->oxcf.number_of_layers == 1 &&
+ cpi->common.refresh_alt_ref_frame)
{
Q = cpi->oxcf.alt_q;
}
- else if (cpi->common.refresh_golden_frame)
+ else if (cpi->oxcf.number_of_layers == 1 &&
+ cpi->common.refresh_golden_frame)
{
Q = cpi->oxcf.gold_q;
}
@@ -1232,7 +1238,9 @@ int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame)
correction_factor = cpi->key_frame_rate_correction_factor;
else
{
- if (cpi->common.refresh_alt_ref_frame || cpi->common.refresh_golden_frame)
+ if (cpi->oxcf.number_of_layers == 1 &&
+ (cpi->common.refresh_alt_ref_frame ||
+ cpi->common.refresh_golden_frame))
correction_factor = cpi->gf_rate_correction_factor;
else
correction_factor = cpi->rate_correction_factor;
@@ -1281,7 +1289,10 @@ int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame)
if (cpi->common.frame_type == KEY_FRAME)
zbin_oqmax = 0;
- else if (cpi->common.refresh_alt_ref_frame || (cpi->common.refresh_golden_frame && !cpi->source_alt_ref_active))
+ else if (cpi->oxcf.number_of_layers == 1 &&
+ (cpi->common.refresh_alt_ref_frame ||
+ (cpi->common.refresh_golden_frame &&
+ !cpi->source_alt_ref_active)))
zbin_oqmax = 16;
else
zbin_oqmax = ZBIN_OQ_MAX;
@@ -1307,12 +1318,12 @@ int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame)
* normal maximum by expanding the zero bin and hence
* decreasing the number of low magnitude non zero coefficients.
*/
- while (cpi->zbin_over_quant < zbin_oqmax)
+ while (cpi->mb.zbin_over_quant < zbin_oqmax)
{
- cpi->zbin_over_quant ++;
+ cpi->mb.zbin_over_quant ++;
- if (cpi->zbin_over_quant > zbin_oqmax)
- cpi->zbin_over_quant = zbin_oqmax;
+ if (cpi->mb.zbin_over_quant > zbin_oqmax)
+ cpi->mb.zbin_over_quant = zbin_oqmax;
/* Adjust bits_per_mb_at_this_q estimate */
bits_per_mb_at_this_q = (int)(Factor * bits_per_mb_at_this_q);
@@ -1349,10 +1360,10 @@ static int estimate_keyframe_frequency(VP8_COMP *cpi)
* whichever is smaller.
*/
int key_freq = cpi->oxcf.key_freq>0 ? cpi->oxcf.key_freq : 1;
- av_key_frame_frequency = (int)cpi->output_frame_rate * 2;
+ av_key_frame_frequency = 1 + (int)cpi->output_frame_rate * 2;
if (cpi->oxcf.auto_key && av_key_frame_frequency > key_freq)
- av_key_frame_frequency = cpi->oxcf.key_freq;
+ av_key_frame_frequency = key_freq;
cpi->prior_key_frame_distance[KEY_FRAME_CONTEXT - 1]
= av_key_frame_frequency;
@@ -1382,6 +1393,10 @@ static int estimate_keyframe_frequency(VP8_COMP *cpi)
av_key_frame_frequency /= total_weight;
}
+ // TODO (marpan): Given the checks above, |av_key_frame_frequency|
+ // should always be above 0. But for now we keep the sanity check in.
+ if (av_key_frame_frequency == 0)
+ av_key_frame_frequency = 1;
return av_key_frame_frequency;
}
diff --git a/libvpx/vp8/encoder/rdopt.c b/libvpx/vp8/encoder/rdopt.c
index 7d80606..8579614 100644
--- a/libvpx/vp8/encoder/rdopt.c
+++ b/libvpx/vp8/encoder/rdopt.c
@@ -14,7 +14,7 @@
#include <limits.h>
#include <assert.h>
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vp8/common/pragmas.h"
#include "tokenize.h"
#include "treewriter.h"
@@ -223,7 +223,7 @@ void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex)
cpi->mb.sadperbit4 = sad_per_bit4lut[QIndex];
}
-void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue)
+void vp8_initialize_rd_consts(VP8_COMP *cpi, MACROBLOCK *x, int Qvalue)
{
int q;
int i;
@@ -238,15 +238,15 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue)
cpi->RDMULT = (int)(rdconst * (capped_q * capped_q));
/* Extend rate multiplier along side quantizer zbin increases */
- if (cpi->zbin_over_quant > 0)
+ if (cpi->mb.zbin_over_quant > 0)
{
double oq_factor;
double modq;
/* Experimental code using the same basic equation as used for Q above
- * The units of cpi->zbin_over_quant are 1/128 of Q bin size
+ * The units of cpi->mb.zbin_over_quant are 1/128 of Q bin size
*/
- oq_factor = 1.0 + ((double)0.0015625 * cpi->zbin_over_quant);
+ oq_factor = 1.0 + ((double)0.0015625 * cpi->mb.zbin_over_quant);
modq = (int)((double)capped_q * oq_factor);
cpi->RDMULT = (int)(rdconst * (modq * modq));
}
@@ -265,6 +265,11 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue)
vp8_set_speed_features(cpi);
+ for (i = 0; i < MAX_MODES; i++)
+ {
+ x->mode_test_hit_counts[i] = 0;
+ }
+
q = (int)pow(Qvalue, 1.25);
if (q < 8)
@@ -279,14 +284,14 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue)
{
if (cpi->sf.thresh_mult[i] < INT_MAX)
{
- cpi->rd_threshes[i] = cpi->sf.thresh_mult[i] * q / 100;
+ x->rd_threshes[i] = cpi->sf.thresh_mult[i] * q / 100;
}
else
{
- cpi->rd_threshes[i] = INT_MAX;
+ x->rd_threshes[i] = INT_MAX;
}
- cpi->rd_baseline_thresh[i] = cpi->rd_threshes[i];
+ cpi->rd_baseline_thresh[i] = x->rd_threshes[i];
}
}
else
@@ -297,14 +302,14 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue)
{
if (cpi->sf.thresh_mult[i] < (INT_MAX / q))
{
- cpi->rd_threshes[i] = cpi->sf.thresh_mult[i] * q;
+ x->rd_threshes[i] = cpi->sf.thresh_mult[i] * q;
}
else
{
- cpi->rd_threshes[i] = INT_MAX;
+ x->rd_threshes[i] = INT_MAX;
}
- cpi->rd_baseline_thresh[i] = cpi->rd_threshes[i];
+ cpi->rd_baseline_thresh[i] = x->rd_threshes[i];
}
}
@@ -879,8 +884,8 @@ static void rd_pick_intra_mbuv_mode(MACROBLOCK *x, int *rate,
for (mode = DC_PRED; mode <= TM_PRED; mode++)
{
- int rate;
- int distortion;
+ int this_rate;
+ int this_distortion;
int this_rd;
xd->mode_info_context->mbmi.uv_mode = mode;
@@ -902,17 +907,17 @@ static void rd_pick_intra_mbuv_mode(MACROBLOCK *x, int *rate,
vp8_quantize_mbuv(x);
rate_to = rd_cost_mbuv(x);
- rate = rate_to + x->intra_uv_mode_cost[xd->frame_type][xd->mode_info_context->mbmi.uv_mode];
+ this_rate = rate_to + x->intra_uv_mode_cost[xd->frame_type][xd->mode_info_context->mbmi.uv_mode];
- distortion = vp8_mbuverror(x) / 4;
+ this_distortion = vp8_mbuverror(x) / 4;
- this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
+ this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
if (this_rd < best_rd)
{
best_rd = this_rd;
- d = distortion;
- r = rate;
+ d = this_distortion;
+ r = this_rate;
*rate_tokenonly = rate_to;
mode_selected = mode;
}
@@ -1289,12 +1294,11 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
if (bestsme < INT_MAX)
{
- int distortion;
+ int disto;
unsigned int sse;
cpi->find_fractional_mv_step(x, c, e, &mode_mv[NEW4X4],
bsi->ref_mv, x->errorperbit, v_fn_ptr, x->mvcost,
- &distortion, &sse);
-
+ &disto, &sse);
}
} /* NEW4X4 */
@@ -1728,7 +1732,7 @@ void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffse
}
}
-static void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv)
+static void rd_update_mvcount(MACROBLOCK *x, int_mv *best_ref_mv)
{
if (x->e_mbd.mode_info_context->mbmi.mode == SPLITMV)
{
@@ -2010,7 +2014,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
*returnintra = INT_MAX;
/* Count of the number of MBs tested so far this frame */
- cpi->mbs_tested_so_far++;
+ x->mbs_tested_so_far++;
x->skip = 0;
@@ -2022,7 +2026,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
int this_ref_frame = ref_frame_map[vp8_ref_frame_order[mode_index]];
/* Test best rd so far against threshold for trying this mode. */
- if (best_mode.rd <= cpi->rd_threshes[mode_index])
+ if (best_mode.rd <= x->rd_threshes[mode_index])
continue;
if (this_ref_frame < 0)
@@ -2068,19 +2072,21 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
* max If so then prevent it from being tested and increase the
* threshold for its testing
*/
- if (cpi->mode_test_hit_counts[mode_index] && (cpi->mode_check_freq[mode_index] > 1))
+ if (x->mode_test_hit_counts[mode_index] && (cpi->mode_check_freq[mode_index] > 1))
{
- if (cpi->mbs_tested_so_far <= cpi->mode_check_freq[mode_index] * cpi->mode_test_hit_counts[mode_index])
+ if (x->mbs_tested_so_far <= cpi->mode_check_freq[mode_index] * x->mode_test_hit_counts[mode_index])
{
/* Increase the threshold for coding this mode to make it
* less likely to be chosen
*/
- cpi->rd_thresh_mult[mode_index] += 4;
+ x->rd_thresh_mult[mode_index] += 4;
- if (cpi->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
- cpi->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
+ if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
+ x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
- cpi->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) * cpi->rd_thresh_mult[mode_index];
+ x->rd_threshes[mode_index] =
+ (cpi->rd_baseline_thresh[mode_index] >> 7) *
+ x->rd_thresh_mult[mode_index];
continue;
}
@@ -2090,28 +2096,28 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
* current mode so increment the counter for the number of times
* it has been tested
*/
- cpi->mode_test_hit_counts[mode_index] ++;
+ x->mode_test_hit_counts[mode_index] ++;
/* Experimental code. Special case for gf and arf zeromv modes.
* Increase zbin size to supress noise
*/
- if (cpi->zbin_mode_boost_enabled)
+ if (x->zbin_mode_boost_enabled)
{
if ( this_ref_frame == INTRA_FRAME )
- cpi->zbin_mode_boost = 0;
+ x->zbin_mode_boost = 0;
else
{
if (vp8_mode_order[mode_index] == ZEROMV)
{
if (this_ref_frame != LAST_FRAME)
- cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
+ x->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
- cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
+ x->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
}
else if (vp8_mode_order[mode_index] == SPLITMV)
- cpi->zbin_mode_boost = 0;
+ x->zbin_mode_boost = 0;
else
- cpi->zbin_mode_boost = MV_ZBIN_BOOST;
+ x->zbin_mode_boost = MV_ZBIN_BOOST;
}
vp8_update_zbin_extra(cpi, x);
@@ -2170,8 +2176,10 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
int this_rd_thresh;
int distortion;
- this_rd_thresh = (vp8_ref_frame_order[mode_index] == 1) ? cpi->rd_threshes[THR_NEW1] : cpi->rd_threshes[THR_NEW3];
- this_rd_thresh = (vp8_ref_frame_order[mode_index] == 2) ? cpi->rd_threshes[THR_NEW2] : this_rd_thresh;
+ this_rd_thresh = (vp8_ref_frame_order[mode_index] == 1) ?
+ x->rd_threshes[THR_NEW1] : x->rd_threshes[THR_NEW3];
+ this_rd_thresh = (vp8_ref_frame_order[mode_index] == 2) ?
+ x->rd_threshes[THR_NEW2] : this_rd_thresh;
tmp_rd = vp8_rd_pick_best_mbsegmentation(cpi, x, &best_ref_mv,
best_mode.yrd, mdcounts,
@@ -2464,8 +2472,9 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
/* Testing this mode gave rise to an improvement in best error
* score. Lower threshold a bit for next time
*/
- cpi->rd_thresh_mult[mode_index] = (cpi->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2)) ? cpi->rd_thresh_mult[mode_index] - 2 : MIN_THRESHMULT;
- cpi->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) * cpi->rd_thresh_mult[mode_index];
+ x->rd_thresh_mult[mode_index] =
+ (x->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2)) ?
+ x->rd_thresh_mult[mode_index] - 2 : MIN_THRESHMULT;
}
/* If the mode did not help improve the best error case then raise
@@ -2473,13 +2482,14 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
*/
else
{
- cpi->rd_thresh_mult[mode_index] += 4;
-
- if (cpi->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
- cpi->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
+ x->rd_thresh_mult[mode_index] += 4;
- cpi->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) * cpi->rd_thresh_mult[mode_index];
+ if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
+ x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
}
+ x->rd_threshes[mode_index] =
+ (cpi->rd_baseline_thresh[mode_index] >> 7) *
+ x->rd_thresh_mult[mode_index];
if (x->skip)
break;
@@ -2489,15 +2499,18 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
/* Reduce the activation RD thresholds for the best choice mode */
if ((cpi->rd_baseline_thresh[best_mode_index] > 0) && (cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2)))
{
- int best_adjustment = (cpi->rd_thresh_mult[best_mode_index] >> 2);
-
- cpi->rd_thresh_mult[best_mode_index] = (cpi->rd_thresh_mult[best_mode_index] >= (MIN_THRESHMULT + best_adjustment)) ? cpi->rd_thresh_mult[best_mode_index] - best_adjustment : MIN_THRESHMULT;
- cpi->rd_threshes[best_mode_index] = (cpi->rd_baseline_thresh[best_mode_index] >> 7) * cpi->rd_thresh_mult[best_mode_index];
+ int best_adjustment = (x->rd_thresh_mult[best_mode_index] >> 2);
+
+ x->rd_thresh_mult[best_mode_index] =
+ (x->rd_thresh_mult[best_mode_index] >=
+ (MIN_THRESHMULT + best_adjustment)) ?
+ x->rd_thresh_mult[best_mode_index] - best_adjustment :
+ MIN_THRESHMULT;
+ x->rd_threshes[best_mode_index] =
+ (cpi->rd_baseline_thresh[best_mode_index] >> 7) *
+ x->rd_thresh_mult[best_mode_index];
}
- /* Note how often each mode chosen as best */
- cpi->mode_chosen_counts[best_mode_index] ++;
-
#if CONFIG_TEMPORAL_DENOISING
if (cpi->oxcf.noise_sensitivity)
{
@@ -2591,7 +2604,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
!= cpi->common.ref_frame_sign_bias[xd->mode_info_context->mbmi.ref_frame])
best_ref_mv.as_int = best_ref_mv_sb[!sign_bias].as_int;
- rd_update_mvcount(cpi, x, &best_ref_mv);
+ rd_update_mvcount(x, &best_ref_mv);
}
void vp8_rd_pick_intra_mode(MACROBLOCK *x, int *rate_)
diff --git a/libvpx/vp8/encoder/rdopt.h b/libvpx/vp8/encoder/rdopt.h
index d7b0442..1e11fa7 100644
--- a/libvpx/vp8/encoder/rdopt.h
+++ b/libvpx/vp8/encoder/rdopt.h
@@ -65,7 +65,7 @@ static void insertsortsad(int arr[],int idx[], int len)
}
}
-extern void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue);
+extern void vp8_initialize_rd_consts(VP8_COMP *cpi, MACROBLOCK *x, int Qvalue);
extern void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int recon_uvoffset, int *returnrate, int *returndistortion, int *returnintra);
extern void vp8_rd_pick_intra_mode(MACROBLOCK *x, int *rate);
diff --git a/libvpx/vp8/encoder/temporal_filter.c b/libvpx/vp8/encoder/temporal_filter.c
index b83ae89..7e3af71 100644
--- a/libvpx/vp8/encoder/temporal_filter.c
+++ b/libvpx/vp8/encoder/temporal_filter.c
@@ -17,7 +17,7 @@
#include "mcomp.h"
#include "firstpass.h"
#include "psnr.h"
-#include "vpx_scale/vpxscale.h"
+#include "vpx_scale/vpx_scale.h"
#include "vp8/common/extend.h"
#include "ratectrl.h"
#include "vp8/common/quant_common.h"
diff --git a/libvpx/vp8/encoder/tokenize.c b/libvpx/vp8/encoder/tokenize.c
index 3b5268b..11559a7 100644
--- a/libvpx/vp8/encoder/tokenize.c
+++ b/libvpx/vp8/encoder/tokenize.c
@@ -20,7 +20,7 @@
/* Global event counters used for accumulating statistics across several
compressions, then generating context.c = initial stats. */
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
_int64 context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#endif
void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t) ;
@@ -413,7 +413,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
}
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
void init_context_counters(void)
{
diff --git a/libvpx/vp8/encoder/tokenize.h b/libvpx/vp8/encoder/tokenize.h
index c2d1438..1e6cea1 100644
--- a/libvpx/vp8/encoder/tokenize.h
+++ b/libvpx/vp8/encoder/tokenize.h
@@ -33,7 +33,7 @@ typedef struct
int rd_cost_mby(MACROBLOCKD *);
-#ifdef ENTROPY_STATS
+#ifdef VP8_ENTROPY_STATS
void init_context_counters();
void print_context_counters();
diff --git a/libvpx/vp8/encoder/asm_enc_offsets.c b/libvpx/vp8/encoder/vp8_asm_enc_offsets.c
index a4169b3..a4169b3 100644
--- a/libvpx/vp8/encoder/asm_enc_offsets.c
+++ b/libvpx/vp8/encoder/vp8_asm_enc_offsets.c
diff --git a/libvpx/vp8/encoder/x86/dct_sse2.asm b/libvpx/vp8/encoder/x86/dct_sse2.asm
index d880ce0..d06bca5 100644
--- a/libvpx/vp8/encoder/x86/dct_sse2.asm
+++ b/libvpx/vp8/encoder/x86/dct_sse2.asm
@@ -29,7 +29,7 @@
movsxd rax, dword ptr arg(2)
lea rcx, [rsi + rax*2]
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
%define input rcx
%define output rdx
%define pitch r8
@@ -53,7 +53,7 @@
RESTORE_GOT
pop rbp
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
RESTORE_XMM
%endif
%endif
diff --git a/libvpx/vp8/encoder/x86/denoising_sse2.c b/libvpx/vp8/encoder/x86/denoising_sse2.c
index c1ac6c1..cceb826 100644
--- a/libvpx/vp8/encoder/x86/denoising_sse2.c
+++ b/libvpx/vp8/encoder/x86/denoising_sse2.c
@@ -12,9 +12,10 @@
#include "vp8/common/reconinter.h"
#include "vpx/vpx_integer.h"
#include "vpx_mem/vpx_mem.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include <emmintrin.h>
+#include "vpx_ports/emmintrin_compat.h"
union sum_union {
__m128i v;
diff --git a/libvpx/vp8/encoder/x86/quantize_sse2.asm b/libvpx/vp8/encoder/x86/quantize_sse2.asm
deleted file mode 100644
index 724e54c..0000000
--- a/libvpx/vp8/encoder/x86/quantize_sse2.asm
+++ /dev/null
@@ -1,386 +0,0 @@
-;
-; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-;
-; Use of this source code is governed by a BSD-style license and patent
-; grant that can be found in the LICENSE file in the root of the source
-; tree. All contributing project authors may be found in the AUTHORS
-; file in the root of the source tree.
-;
-
-
-%include "vpx_ports/x86_abi_support.asm"
-%include "asm_enc_offsets.asm"
-
-
-; void vp8_regular_quantize_b_sse2 | arg
-; (BLOCK *b, | 0
-; BLOCKD *d) | 1
-
-global sym(vp8_regular_quantize_b_sse2) PRIVATE
-sym(vp8_regular_quantize_b_sse2):
- push rbp
- mov rbp, rsp
- SAVE_XMM 7
- GET_GOT rbx
-
-%if ABI_IS_32BIT
- push rdi
- push rsi
-%else
- %ifidn __OUTPUT_FORMAT__,x64
- push rdi
- push rsi
- %endif
-%endif
-
- ALIGN_STACK 16, rax
- %define zrun_zbin_boost 0 ; 8
- %define abs_minus_zbin 8 ; 32
- %define temp_qcoeff 40 ; 32
- %define qcoeff 72 ; 32
- %define stack_size 104
- sub rsp, stack_size
- ; end prolog
-
-%if ABI_IS_32BIT
- mov rdi, arg(0) ; BLOCK *b
- mov rsi, arg(1) ; BLOCKD *d
-%else
- %ifidn __OUTPUT_FORMAT__,x64
- mov rdi, rcx ; BLOCK *b
- mov rsi, rdx ; BLOCKD *d
- %else
- ;mov rdi, rdi ; BLOCK *b
- ;mov rsi, rsi ; BLOCKD *d
- %endif
-%endif
-
- mov rdx, [rdi + vp8_block_coeff] ; coeff_ptr
- mov rcx, [rdi + vp8_block_zbin] ; zbin_ptr
- movd xmm7, [rdi + vp8_block_zbin_extra] ; zbin_oq_value
-
- ; z
- movdqa xmm0, [rdx]
- movdqa xmm4, [rdx + 16]
- mov rdx, [rdi + vp8_block_round] ; round_ptr
-
- pshuflw xmm7, xmm7, 0
- punpcklwd xmm7, xmm7 ; duplicated zbin_oq_value
-
- movdqa xmm1, xmm0
- movdqa xmm5, xmm4
-
- ; sz
- psraw xmm0, 15
- psraw xmm4, 15
-
- ; (z ^ sz)
- pxor xmm1, xmm0
- pxor xmm5, xmm4
-
- ; x = abs(z)
- psubw xmm1, xmm0
- psubw xmm5, xmm4
-
- movdqa xmm2, [rcx]
- movdqa xmm3, [rcx + 16]
- mov rcx, [rdi + vp8_block_quant] ; quant_ptr
-
- ; *zbin_ptr + zbin_oq_value
- paddw xmm2, xmm7
- paddw xmm3, xmm7
-
- ; x - (*zbin_ptr + zbin_oq_value)
- psubw xmm1, xmm2
- psubw xmm5, xmm3
- movdqa [rsp + abs_minus_zbin], xmm1
- movdqa [rsp + abs_minus_zbin + 16], xmm5
-
- ; add (zbin_ptr + zbin_oq_value) back
- paddw xmm1, xmm2
- paddw xmm5, xmm3
-
- movdqa xmm2, [rdx]
- movdqa xmm6, [rdx + 16]
-
- movdqa xmm3, [rcx]
- movdqa xmm7, [rcx + 16]
-
- ; x + round
- paddw xmm1, xmm2
- paddw xmm5, xmm6
-
- ; y = x * quant_ptr >> 16
- pmulhw xmm3, xmm1
- pmulhw xmm7, xmm5
-
- ; y += x
- paddw xmm1, xmm3
- paddw xmm5, xmm7
-
- movdqa [rsp + temp_qcoeff], xmm1
- movdqa [rsp + temp_qcoeff + 16], xmm5
-
- pxor xmm6, xmm6
- ; zero qcoeff
- movdqa [rsp + qcoeff], xmm6
- movdqa [rsp + qcoeff + 16], xmm6
-
- mov rdx, [rdi + vp8_block_zrun_zbin_boost] ; zbin_boost_ptr
- mov rax, [rdi + vp8_block_quant_shift] ; quant_shift_ptr
- mov [rsp + zrun_zbin_boost], rdx
-
-%macro ZIGZAG_LOOP 1
- ; x
- movsx ecx, WORD PTR[rsp + abs_minus_zbin + %1 * 2]
-
- ; if (x >= zbin)
- sub cx, WORD PTR[rdx] ; x - zbin
- lea rdx, [rdx + 2] ; zbin_boost_ptr++
- jl .rq_zigzag_loop_%1 ; x < zbin
-
- movsx edi, WORD PTR[rsp + temp_qcoeff + %1 * 2]
-
- ; downshift by quant_shift[rc]
- movsx cx, BYTE PTR[rax + %1] ; quant_shift_ptr[rc]
- sar edi, cl ; also sets Z bit
- je .rq_zigzag_loop_%1 ; !y
- mov WORD PTR[rsp + qcoeff + %1 * 2], di ;qcoeff_ptr[rc] = temp_qcoeff[rc]
- mov rdx, [rsp + zrun_zbin_boost] ; reset to b->zrun_zbin_boost
-.rq_zigzag_loop_%1:
-%endmacro
-; in vp8_default_zig_zag1d order: see vp8/common/entropy.c
-ZIGZAG_LOOP 0
-ZIGZAG_LOOP 1
-ZIGZAG_LOOP 4
-ZIGZAG_LOOP 8
-ZIGZAG_LOOP 5
-ZIGZAG_LOOP 2
-ZIGZAG_LOOP 3
-ZIGZAG_LOOP 6
-ZIGZAG_LOOP 9
-ZIGZAG_LOOP 12
-ZIGZAG_LOOP 13
-ZIGZAG_LOOP 10
-ZIGZAG_LOOP 7
-ZIGZAG_LOOP 11
-ZIGZAG_LOOP 14
-ZIGZAG_LOOP 15
-
- movdqa xmm2, [rsp + qcoeff]
- movdqa xmm3, [rsp + qcoeff + 16]
-
- mov rcx, [rsi + vp8_blockd_dequant] ; dequant_ptr
- mov rdi, [rsi + vp8_blockd_dqcoeff] ; dqcoeff_ptr
-
- ; y ^ sz
- pxor xmm2, xmm0
- pxor xmm3, xmm4
- ; x = (y ^ sz) - sz
- psubw xmm2, xmm0
- psubw xmm3, xmm4
-
- ; dequant
- movdqa xmm0, [rcx]
- movdqa xmm1, [rcx + 16]
-
- mov rcx, [rsi + vp8_blockd_qcoeff] ; qcoeff_ptr
-
- pmullw xmm0, xmm2
- pmullw xmm1, xmm3
-
- movdqa [rcx], xmm2 ; store qcoeff
- movdqa [rcx + 16], xmm3
- movdqa [rdi], xmm0 ; store dqcoeff
- movdqa [rdi + 16], xmm1
-
- mov rcx, [rsi + vp8_blockd_eob]
-
- ; select the last value (in zig_zag order) for EOB
- pcmpeqw xmm2, xmm6
- pcmpeqw xmm3, xmm6
- ; !
- pcmpeqw xmm6, xmm6
- pxor xmm2, xmm6
- pxor xmm3, xmm6
- ; mask inv_zig_zag
- pand xmm2, [GLOBAL(inv_zig_zag)]
- pand xmm3, [GLOBAL(inv_zig_zag + 16)]
- ; select the max value
- pmaxsw xmm2, xmm3
- pshufd xmm3, xmm2, 00001110b
- pmaxsw xmm2, xmm3
- pshuflw xmm3, xmm2, 00001110b
- pmaxsw xmm2, xmm3
- pshuflw xmm3, xmm2, 00000001b
- pmaxsw xmm2, xmm3
- movd eax, xmm2
- and eax, 0xff
-
- mov BYTE PTR [rcx], al ; store eob
-
- ; begin epilog
- add rsp, stack_size
- pop rsp
-%if ABI_IS_32BIT
- pop rsi
- pop rdi
-%else
- %ifidn __OUTPUT_FORMAT__,x64
- pop rsi
- pop rdi
- %endif
-%endif
- RESTORE_GOT
- RESTORE_XMM
- pop rbp
- ret
-
-; void vp8_fast_quantize_b_sse2 | arg
-; (BLOCK *b, | 0
-; BLOCKD *d) | 1
-
-global sym(vp8_fast_quantize_b_sse2) PRIVATE
-sym(vp8_fast_quantize_b_sse2):
- push rbp
- mov rbp, rsp
- GET_GOT rbx
-
-%if ABI_IS_32BIT
- push rdi
- push rsi
-%else
- %ifidn __OUTPUT_FORMAT__,x64
- push rdi
- push rsi
- %else
- ; these registers are used for passing arguments
- %endif
-%endif
-
- ; end prolog
-
-%if ABI_IS_32BIT
- mov rdi, arg(0) ; BLOCK *b
- mov rsi, arg(1) ; BLOCKD *d
-%else
- %ifidn __OUTPUT_FORMAT__,x64
- mov rdi, rcx ; BLOCK *b
- mov rsi, rdx ; BLOCKD *d
- %else
- ;mov rdi, rdi ; BLOCK *b
- ;mov rsi, rsi ; BLOCKD *d
- %endif
-%endif
-
- mov rax, [rdi + vp8_block_coeff]
- mov rcx, [rdi + vp8_block_round]
- mov rdx, [rdi + vp8_block_quant_fast]
-
- ; z = coeff
- movdqa xmm0, [rax]
- movdqa xmm4, [rax + 16]
-
- ; dup z so we can save sz
- movdqa xmm1, xmm0
- movdqa xmm5, xmm4
-
- ; sz = z >> 15
- psraw xmm0, 15
- psraw xmm4, 15
-
- ; x = abs(z) = (z ^ sz) - sz
- pxor xmm1, xmm0
- pxor xmm5, xmm4
- psubw xmm1, xmm0
- psubw xmm5, xmm4
-
- ; x += round
- paddw xmm1, [rcx]
- paddw xmm5, [rcx + 16]
-
- mov rax, [rsi + vp8_blockd_qcoeff]
- mov rcx, [rsi + vp8_blockd_dequant]
- mov rdi, [rsi + vp8_blockd_dqcoeff]
-
- ; y = x * quant >> 16
- pmulhw xmm1, [rdx]
- pmulhw xmm5, [rdx + 16]
-
- ; x = (y ^ sz) - sz
- pxor xmm1, xmm0
- pxor xmm5, xmm4
- psubw xmm1, xmm0
- psubw xmm5, xmm4
-
- ; qcoeff = x
- movdqa [rax], xmm1
- movdqa [rax + 16], xmm5
-
- ; x * dequant
- movdqa xmm2, xmm1
- movdqa xmm3, xmm5
- pmullw xmm2, [rcx]
- pmullw xmm3, [rcx + 16]
-
- ; dqcoeff = x * dequant
- movdqa [rdi], xmm2
- movdqa [rdi + 16], xmm3
-
- pxor xmm4, xmm4 ;clear all bits
- pcmpeqw xmm1, xmm4
- pcmpeqw xmm5, xmm4
-
- pcmpeqw xmm4, xmm4 ;set all bits
- pxor xmm1, xmm4
- pxor xmm5, xmm4
-
- pand xmm1, [GLOBAL(inv_zig_zag)]
- pand xmm5, [GLOBAL(inv_zig_zag + 16)]
-
- pmaxsw xmm1, xmm5
-
- mov rcx, [rsi + vp8_blockd_eob]
-
- ; now down to 8
- pshufd xmm5, xmm1, 00001110b
-
- pmaxsw xmm1, xmm5
-
- ; only 4 left
- pshuflw xmm5, xmm1, 00001110b
-
- pmaxsw xmm1, xmm5
-
- ; okay, just 2!
- pshuflw xmm5, xmm1, 00000001b
-
- pmaxsw xmm1, xmm5
-
- movd eax, xmm1
- and eax, 0xff
-
- mov BYTE PTR [rcx], al ; store eob
-
- ; begin epilog
-%if ABI_IS_32BIT
- pop rsi
- pop rdi
-%else
- %ifidn __OUTPUT_FORMAT__,x64
- pop rsi
- pop rdi
- %endif
-%endif
-
- RESTORE_GOT
- pop rbp
- ret
-
-SECTION_RODATA
-align 16
-inv_zig_zag:
- dw 0x0001, 0x0002, 0x0006, 0x0007
- dw 0x0003, 0x0005, 0x0008, 0x000d
- dw 0x0004, 0x0009, 0x000c, 0x000e
- dw 0x000a, 0x000b, 0x000f, 0x0010
diff --git a/libvpx/vp8/encoder/x86/quantize_sse2.c b/libvpx/vp8/encoder/x86/quantize_sse2.c
new file mode 100644
index 0000000..f495bf2
--- /dev/null
+++ b/libvpx/vp8/encoder/x86/quantize_sse2.c
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vpx_config.h"
+#include "vp8_rtcd.h"
+#include "vpx_ports/x86.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp8/encoder/block.h"
+#include "vp8/common/entropy.h" /* vp8_default_inv_zig_zag */
+
+#include <mmintrin.h> /* MMX */
+#include <xmmintrin.h> /* SSE */
+#include <emmintrin.h> /* SSE2 */
+
+#define SELECT_EOB(i, z) \
+ do { \
+ short boost = *zbin_boost_ptr; \
+ int cmp = (x[z] < boost) | (y[z] == 0); \
+ zbin_boost_ptr++; \
+ if (cmp) \
+ goto select_eob_end_##i; \
+ qcoeff_ptr[z] = y[z]; \
+ eob = i; \
+ zbin_boost_ptr = b->zrun_zbin_boost; \
+ select_eob_end_##i:; \
+ } while (0)
+
+void vp8_regular_quantize_b_sse2(BLOCK *b, BLOCKD *d)
+{
+ char eob = 0;
+ short *zbin_boost_ptr = b->zrun_zbin_boost;
+ short *qcoeff_ptr = d->qcoeff;
+ DECLARE_ALIGNED_ARRAY(16, short, x, 16);
+ DECLARE_ALIGNED_ARRAY(16, short, y, 16);
+
+ __m128i sz0, x0, sz1, x1, y0, y1, x_minus_zbin0, x_minus_zbin1;
+ __m128i quant_shift0 = _mm_load_si128((__m128i *)(b->quant_shift));
+ __m128i quant_shift1 = _mm_load_si128((__m128i *)(b->quant_shift + 8));
+ __m128i z0 = _mm_load_si128((__m128i *)(b->coeff));
+ __m128i z1 = _mm_load_si128((__m128i *)(b->coeff+8));
+ __m128i zbin_extra = _mm_cvtsi32_si128(b->zbin_extra);
+ __m128i zbin0 = _mm_load_si128((__m128i *)(b->zbin));
+ __m128i zbin1 = _mm_load_si128((__m128i *)(b->zbin + 8));
+ __m128i round0 = _mm_load_si128((__m128i *)(b->round));
+ __m128i round1 = _mm_load_si128((__m128i *)(b->round + 8));
+ __m128i quant0 = _mm_load_si128((__m128i *)(b->quant));
+ __m128i quant1 = _mm_load_si128((__m128i *)(b->quant + 8));
+ __m128i dequant0 = _mm_load_si128((__m128i *)(d->dequant));
+ __m128i dequant1 = _mm_load_si128((__m128i *)(d->dequant + 8));
+
+ vpx_memset(qcoeff_ptr, 0, 32);
+
+ /* Duplicate to all lanes. */
+ zbin_extra = _mm_shufflelo_epi16(zbin_extra, 0);
+ zbin_extra = _mm_unpacklo_epi16(zbin_extra, zbin_extra);
+
+ /* Sign of z: z >> 15 */
+ sz0 = _mm_srai_epi16(z0, 15);
+ sz1 = _mm_srai_epi16(z1, 15);
+
+ /* x = abs(z): (z ^ sz) - sz */
+ x0 = _mm_xor_si128(z0, sz0);
+ x1 = _mm_xor_si128(z1, sz1);
+ x0 = _mm_sub_epi16(x0, sz0);
+ x1 = _mm_sub_epi16(x1, sz1);
+
+ /* zbin[] + zbin_extra */
+ zbin0 = _mm_add_epi16(zbin0, zbin_extra);
+ zbin1 = _mm_add_epi16(zbin1, zbin_extra);
+
+ /* In C x is compared to zbin where zbin = zbin[] + boost + extra. Rebalance
+ * the equation because boost is the only value which can change:
+ * x - (zbin[] + extra) >= boost */
+ x_minus_zbin0 = _mm_sub_epi16(x0, zbin0);
+ x_minus_zbin1 = _mm_sub_epi16(x1, zbin1);
+
+ _mm_store_si128((__m128i *)(x), x_minus_zbin0);
+ _mm_store_si128((__m128i *)(x + 8), x_minus_zbin1);
+
+ /* All the remaining calculations are valid whether they are done now with
+ * simd or later inside the loop one at a time. */
+ x0 = _mm_add_epi16(x0, round0);
+ x1 = _mm_add_epi16(x1, round1);
+
+ y0 = _mm_mulhi_epi16(x0, quant0);
+ y1 = _mm_mulhi_epi16(x1, quant1);
+
+ y0 = _mm_add_epi16(y0, x0);
+ y1 = _mm_add_epi16(y1, x1);
+
+ /* Instead of shifting each value independently we convert the scaling
+ * factor with 1 << (16 - shift) so we can use multiply/return high half. */
+ y0 = _mm_mulhi_epi16(y0, quant_shift0);
+ y1 = _mm_mulhi_epi16(y1, quant_shift1);
+
+ /* Return the sign: (y ^ sz) - sz */
+ y0 = _mm_xor_si128(y0, sz0);
+ y1 = _mm_xor_si128(y1, sz1);
+ y0 = _mm_sub_epi16(y0, sz0);
+ y1 = _mm_sub_epi16(y1, sz1);
+
+ _mm_store_si128((__m128i *)(y), y0);
+ _mm_store_si128((__m128i *)(y + 8), y1);
+
+ zbin_boost_ptr = b->zrun_zbin_boost;
+
+ /* The loop gets unrolled anyway. Avoid the vp8_default_zig_zag1d lookup. */
+ SELECT_EOB(1, 0);
+ SELECT_EOB(2, 1);
+ SELECT_EOB(3, 4);
+ SELECT_EOB(4, 8);
+ SELECT_EOB(5, 5);
+ SELECT_EOB(6, 2);
+ SELECT_EOB(7, 3);
+ SELECT_EOB(8, 6);
+ SELECT_EOB(9, 9);
+ SELECT_EOB(10, 12);
+ SELECT_EOB(11, 13);
+ SELECT_EOB(12, 10);
+ SELECT_EOB(13, 7);
+ SELECT_EOB(14, 11);
+ SELECT_EOB(15, 14);
+ SELECT_EOB(16, 15);
+
+ y0 = _mm_load_si128((__m128i *)(d->qcoeff));
+ y1 = _mm_load_si128((__m128i *)(d->qcoeff + 8));
+
+ /* dqcoeff = qcoeff * dequant */
+ y0 = _mm_mullo_epi16(y0, dequant0);
+ y1 = _mm_mullo_epi16(y1, dequant1);
+
+ _mm_store_si128((__m128i *)(d->dqcoeff), y0);
+ _mm_store_si128((__m128i *)(d->dqcoeff + 8), y1);
+
+ *d->eob = eob;
+}
+
+void vp8_fast_quantize_b_sse2(BLOCK *b, BLOCKD *d)
+{
+ __m128i z0 = _mm_load_si128((__m128i *)(b->coeff));
+ __m128i z1 = _mm_load_si128((__m128i *)(b->coeff + 8));
+ __m128i round0 = _mm_load_si128((__m128i *)(b->round));
+ __m128i round1 = _mm_load_si128((__m128i *)(b->round + 8));
+ __m128i quant_fast0 = _mm_load_si128((__m128i *)(b->quant_fast));
+ __m128i quant_fast1 = _mm_load_si128((__m128i *)(b->quant_fast + 8));
+ __m128i dequant0 = _mm_load_si128((__m128i *)(d->dequant));
+ __m128i dequant1 = _mm_load_si128((__m128i *)(d->dequant + 8));
+ __m128i inv_zig_zag0 = _mm_load_si128((const __m128i *)(vp8_default_inv_zig_zag));
+ __m128i inv_zig_zag1 = _mm_load_si128((const __m128i *)(vp8_default_inv_zig_zag + 8));
+
+ __m128i sz0, sz1, x0, x1, y0, y1, xdq0, xdq1, zeros, ones;
+
+ /* sign of z: z >> 15 */
+ sz0 = _mm_srai_epi16(z0, 15);
+ sz1 = _mm_srai_epi16(z1, 15);
+
+ /* x = abs(z): (z ^ sz) - sz */
+ x0 = _mm_xor_si128(z0, sz0);
+ x1 = _mm_xor_si128(z1, sz1);
+ x0 = _mm_sub_epi16(x0, sz0);
+ x1 = _mm_sub_epi16(x1, sz1);
+
+ /* x += round */
+ x0 = _mm_add_epi16(x0, round0);
+ x1 = _mm_add_epi16(x1, round1);
+
+ /* y = (x * quant) >> 16 */
+ y0 = _mm_mulhi_epi16(x0, quant_fast0);
+ y1 = _mm_mulhi_epi16(x1, quant_fast1);
+
+ /* x = abs(y) = (y ^ sz) - sz */
+ y0 = _mm_xor_si128(y0, sz0);
+ y1 = _mm_xor_si128(y1, sz1);
+ x0 = _mm_sub_epi16(y0, sz0);
+ x1 = _mm_sub_epi16(y1, sz1);
+
+ /* qcoeff = x */
+ _mm_store_si128((__m128i *)(d->qcoeff), x0);
+ _mm_store_si128((__m128i *)(d->qcoeff + 8), x1);
+
+ /* x * dequant */
+ xdq0 = _mm_mullo_epi16(x0, dequant0);
+ xdq1 = _mm_mullo_epi16(x1, dequant1);
+
+ /* dqcoeff = x * dequant */
+ _mm_store_si128((__m128i *)(d->dqcoeff), xdq0);
+ _mm_store_si128((__m128i *)(d->dqcoeff + 8), xdq1);
+
+ /* build a mask for the zig zag */
+ zeros = _mm_setzero_si128();
+
+ x0 = _mm_cmpeq_epi16(x0, zeros);
+ x1 = _mm_cmpeq_epi16(x1, zeros);
+
+ ones = _mm_cmpeq_epi16(zeros, zeros);
+
+ x0 = _mm_xor_si128(x0, ones);
+ x1 = _mm_xor_si128(x1, ones);
+
+ x0 = _mm_and_si128(x0, inv_zig_zag0);
+ x1 = _mm_and_si128(x1, inv_zig_zag1);
+
+ x0 = _mm_max_epi16(x0, x1);
+
+ /* now down to 8 */
+ x1 = _mm_shuffle_epi32(x0, 0xE); // 0b00001110
+
+ x0 = _mm_max_epi16(x0, x1);
+
+ /* only 4 left */
+ x1 = _mm_shufflelo_epi16(x0, 0xE); // 0b00001110
+
+ x0 = _mm_max_epi16(x0, x1);
+
+ /* okay, just 2! */
+ x1 = _mm_shufflelo_epi16(x0, 0x1); // 0b00000001
+
+ x0 = _mm_max_epi16(x0, x1);
+
+ *d->eob = 0xFF & _mm_cvtsi128_si32(x0);
+}
diff --git a/libvpx/vp8/encoder/x86/quantize_sse4.asm b/libvpx/vp8/encoder/x86/quantize_sse4.asm
index f0e5d40..dbd171b 100644
--- a/libvpx/vp8/encoder/x86/quantize_sse4.asm
+++ b/libvpx/vp8/encoder/x86/quantize_sse4.asm
@@ -9,7 +9,7 @@
%include "vpx_ports/x86_abi_support.asm"
-%include "asm_enc_offsets.asm"
+%include "vp8_asm_enc_offsets.asm"
; void vp8_regular_quantize_b_sse4 | arg
@@ -31,7 +31,7 @@ sym(vp8_regular_quantize_b_sse4):
%define stack_size 32
sub rsp, stack_size
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
SAVE_XMM 8, u
push rdi
push rsi
@@ -43,7 +43,7 @@ sym(vp8_regular_quantize_b_sse4):
mov rdi, arg(0) ; BLOCK *b
mov rsi, arg(1) ; BLOCKD *d
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
mov rdi, rcx ; BLOCK *b
mov rsi, rdx ; BLOCKD *d
%else
@@ -240,7 +240,7 @@ ZIGZAG_LOOP 15, 7, xmm3, xmm7, xmm8
pop rbp
%else
%undef xmm5
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
pop rsi
pop rdi
RESTORE_XMM
diff --git a/libvpx/vp8/encoder/x86/quantize_ssse3.asm b/libvpx/vp8/encoder/x86/quantize_ssse3.asm
index dd526f4..7b1dc11 100644
--- a/libvpx/vp8/encoder/x86/quantize_ssse3.asm
+++ b/libvpx/vp8/encoder/x86/quantize_ssse3.asm
@@ -9,7 +9,7 @@
%include "vpx_ports/x86_abi_support.asm"
-%include "asm_enc_offsets.asm"
+%include "vp8_asm_enc_offsets.asm"
; void vp8_fast_quantize_b_ssse3 | arg
@@ -27,7 +27,7 @@ sym(vp8_fast_quantize_b_ssse3):
push rdi
push rsi
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
push rdi
push rsi
%endif
@@ -38,7 +38,7 @@ sym(vp8_fast_quantize_b_ssse3):
mov rdi, arg(0) ; BLOCK *b
mov rsi, arg(1) ; BLOCKD *d
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
mov rdi, rcx ; BLOCK *b
mov rsi, rdx ; BLOCKD *d
%else
@@ -122,7 +122,7 @@ sym(vp8_fast_quantize_b_ssse3):
pop rsi
pop rdi
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
pop rsi
pop rdi
%endif
diff --git a/libvpx/vp8/encoder/x86/temporal_filter_apply_sse2.asm b/libvpx/vp8/encoder/x86/temporal_filter_apply_sse2.asm
index ce9d983..bd92b39 100644
--- a/libvpx/vp8/encoder/x86/temporal_filter_apply_sse2.asm
+++ b/libvpx/vp8/encoder/x86/temporal_filter_apply_sse2.asm
@@ -50,7 +50,7 @@ sym(vp8_temporal_filter_apply_sse2):
; 0x8000 >> (16 - strength)
mov rdx, 16
sub rdx, arg(4) ; 16 - strength
- movd xmm4, rdx ; can't use rdx w/ shift
+ movq xmm4, rdx ; can't use rdx w/ shift
movdqa xmm5, [GLOBAL(_const_top_bit)]
psrlw xmm5, xmm4
movdqa [rsp + rounding_bit], xmm5
diff --git a/libvpx/vp8/encoder/x86/vp8_enc_stubs_mmx.c b/libvpx/vp8/encoder/x86/vp8_enc_stubs_mmx.c
index da25f52..cf3d8ca 100644
--- a/libvpx/vp8/encoder/x86/vp8_enc_stubs_mmx.c
+++ b/libvpx/vp8/encoder/x86/vp8_enc_stubs_mmx.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx_ports/x86.h"
#include "vp8/encoder/block.h"
diff --git a/libvpx/vp8/encoder/x86/vp8_enc_stubs_sse2.c b/libvpx/vp8/encoder/x86/vp8_enc_stubs_sse2.c
index 68db815..3dfbee3 100644
--- a/libvpx/vp8/encoder/x86/vp8_enc_stubs_sse2.c
+++ b/libvpx/vp8/encoder/x86/vp8_enc_stubs_sse2.c
@@ -10,7 +10,7 @@
#include "vpx_config.h"
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx_ports/x86.h"
#include "vp8/encoder/block.h"
diff --git a/libvpx/vp8/vp8_common.mk b/libvpx/vp8/vp8_common.mk
index a328f46..cde2651 100644
--- a/libvpx/vp8/vp8_common.mk
+++ b/libvpx/vp8/vp8_common.mk
@@ -14,7 +14,6 @@ VP8_COMMON_SRCS-yes += common/ppflags.h
VP8_COMMON_SRCS-yes += common/onyx.h
VP8_COMMON_SRCS-yes += common/onyxd.h
VP8_COMMON_SRCS-yes += common/alloccommon.c
-VP8_COMMON_SRCS-yes += common/asm_com_offsets.c
VP8_COMMON_SRCS-yes += common/blockd.c
VP8_COMMON_SRCS-yes += common/coefupdateprobs.h
VP8_COMMON_SRCS-yes += common/debugmodes.c
@@ -67,6 +66,7 @@ VP8_COMMON_SRCS-yes += common/setupintrarecon.c
VP8_COMMON_SRCS-yes += common/swapyv12buffer.c
VP8_COMMON_SRCS-yes += common/variance_c.c
VP8_COMMON_SRCS-yes += common/variance.h
+VP8_COMMON_SRCS-yes += common/vp8_asm_com_offsets.c
VP8_COMMON_SRCS-yes += common/vp8_entropymodedata.h
@@ -191,3 +191,8 @@ VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/variance_neon$(ASM)
VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp8_subpixelvariance8x8_neon$(ASM)
VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp8_subpixelvariance16x16_neon$(ASM)
VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp8_subpixelvariance16x16s_neon$(ASM)
+
+$(eval $(call asm_offsets_template,\
+ vp8_asm_com_offsets.asm, $(VP8_PREFIX)common/vp8_asm_com_offsets.c))
+
+$(eval $(call rtcd_h_template,vp8_rtcd,vp8/common/rtcd_defs.sh))
diff --git a/libvpx/vp8/vp8_cx_iface.c b/libvpx/vp8/vp8_cx_iface.c
index eeac3a8..4531d5a 100644
--- a/libvpx/vp8/vp8_cx_iface.c
+++ b/libvpx/vp8/vp8_cx_iface.c
@@ -9,7 +9,7 @@
*/
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx/vpx_codec.h"
#include "vpx/internal/vpx_codec_internal.h"
#include "vpx_version.h"
@@ -582,7 +582,7 @@ static vpx_codec_err_t vp8e_init(vpx_codec_ctx_t *ctx,
struct VP8_COMP *optr;
- vpx_rtcd();
+ vp8_rtcd();
if (!ctx->priv)
{
@@ -684,6 +684,8 @@ static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img,
yv12->u_buffer = img->planes[VPX_PLANE_U];
yv12->v_buffer = img->planes[VPX_PLANE_V];
+ yv12->y_crop_width = img->d_w;
+ yv12->y_crop_height = img->d_h;
yv12->y_width = img->d_w;
yv12->y_height = img->d_h;
yv12->uv_width = (1 + yv12->y_width) / 2;
@@ -1178,7 +1180,9 @@ static vpx_codec_err_t vp8e_set_scalemode(vpx_codec_alg_priv_t *ctx,
{
int res;
vpx_scaling_mode_t scalemode = *(vpx_scaling_mode_t *)data ;
- res = vp8_set_internal_size(ctx->cpi, scalemode.h_scaling_mode, scalemode.v_scaling_mode);
+ res = vp8_set_internal_size(ctx->cpi,
+ (VPX_SCALING)scalemode.h_scaling_mode,
+ (VPX_SCALING)scalemode.v_scaling_mode);
if (!res)
{
diff --git a/libvpx/vp8/vp8_dx_iface.c b/libvpx/vp8/vp8_dx_iface.c
index c13d697..c826f69 100644
--- a/libvpx/vp8/vp8_dx_iface.c
+++ b/libvpx/vp8/vp8_dx_iface.c
@@ -11,7 +11,7 @@
#include <stdlib.h>
#include <string.h>
-#include "vpx_rtcd.h"
+#include "vp8_rtcd.h"
#include "vpx/vpx_decoder.h"
#include "vpx/vp8dx.h"
#include "vpx/internal/vpx_codec_internal.h"
@@ -64,7 +64,6 @@ struct vpx_codec_alg_priv
vp8_stream_info_t si;
int defer_alloc;
int decoder_init;
- struct VP8D_COMP *pbi;
int postproc_cfg_set;
vp8_postproc_cfg_t postproc_cfg;
#if CONFIG_POSTPROC_VISUALIZER
@@ -74,9 +73,13 @@ struct vpx_codec_alg_priv
int dbg_color_b_modes_flag;
int dbg_display_mv_flag;
#endif
+ vp8_decrypt_cb *decrypt_cb;
+ void *decrypt_state;
vpx_image_t img;
int img_setup;
+ struct frame_buffers yv12_frame_buffers;
void *user_priv;
+ FRAGMENT_DATA fragments;
};
static unsigned long vp8_priv_sz(const vpx_codec_dec_cfg_t *si, vpx_codec_flags_t flags)
@@ -163,6 +166,8 @@ static void vp8_init_ctx(vpx_codec_ctx_t *ctx, const vpx_codec_mmap_t *mmap)
ctx->priv->alg_priv->mmaps[0] = *mmap;
ctx->priv->alg_priv->si.sz = sizeof(ctx->priv->alg_priv->si);
+ ctx->priv->alg_priv->decrypt_cb = NULL;
+ ctx->priv->alg_priv->decrypt_state = NULL;
ctx->priv->init_flags = ctx->init_flags;
if (ctx->config.dec)
@@ -194,7 +199,7 @@ static vpx_codec_err_t vp8_init(vpx_codec_ctx_t *ctx,
vpx_codec_err_t res = VPX_CODEC_OK;
(void) data;
- vpx_rtcd();
+ vp8_rtcd();
/* This function only allocates space for the vpx_codec_alg_priv_t
* structure. More memory may be required at the time the stream
@@ -210,14 +215,37 @@ static vpx_codec_err_t vp8_init(vpx_codec_ctx_t *ctx,
mmap.flags = vp8_mem_req_segs[0].flags;
res = vp8_mmap_alloc(&mmap);
+ if (res != VPX_CODEC_OK) return res;
- if (!res)
- {
- vp8_init_ctx(ctx, &mmap);
+ vp8_init_ctx(ctx, &mmap);
- ctx->priv->alg_priv->defer_alloc = 1;
- /*post processing level initialized to do nothing */
- }
+ /* initialize number of fragments to zero */
+ ctx->priv->alg_priv->fragments.count = 0;
+ /* is input fragments enabled? */
+ ctx->priv->alg_priv->fragments.enabled =
+ (ctx->priv->alg_priv->base.init_flags &
+ VPX_CODEC_USE_INPUT_FRAGMENTS);
+
+ ctx->priv->alg_priv->defer_alloc = 1;
+ /*post processing level initialized to do nothing */
+ }
+
+ ctx->priv->alg_priv->yv12_frame_buffers.use_frame_threads =
+ (ctx->priv->alg_priv->base.init_flags &
+ VPX_CODEC_USE_FRAME_THREADING);
+
+ /* for now, disable frame threading */
+ ctx->priv->alg_priv->yv12_frame_buffers.use_frame_threads = 0;
+
+ if(ctx->priv->alg_priv->yv12_frame_buffers.use_frame_threads &&
+ (( ctx->priv->alg_priv->base.init_flags &
+ VPX_CODEC_USE_ERROR_CONCEALMENT)
+ || ( ctx->priv->alg_priv->base.init_flags &
+ VPX_CODEC_USE_INPUT_FRAGMENTS) ) )
+ {
+ /* row-based threading, error concealment, and input fragments will
+ * not be supported when using frame-based threading */
+ res = VPX_CODEC_INVALID_PARAM;
}
return res;
@@ -227,7 +255,7 @@ static vpx_codec_err_t vp8_destroy(vpx_codec_alg_priv_t *ctx)
{
int i;
- vp8dx_remove_decompressor(ctx->pbi);
+ vp8_remove_decoder_instances(&ctx->yv12_frame_buffers);
for (i = NELEMENTS(ctx->mmaps) - 1; i >= 0; i--)
{
@@ -238,14 +266,18 @@ static vpx_codec_err_t vp8_destroy(vpx_codec_alg_priv_t *ctx)
return VPX_CODEC_OK;
}
-static vpx_codec_err_t vp8_peek_si(const uint8_t *data,
- unsigned int data_sz,
- vpx_codec_stream_info_t *si)
+static vpx_codec_err_t vp8_peek_si_internal(const uint8_t *data,
+ unsigned int data_sz,
+ vpx_codec_stream_info_t *si,
+ vp8_decrypt_cb *decrypt_cb,
+ void *decrypt_state)
{
vpx_codec_err_t res = VPX_CODEC_OK;
if(data + data_sz <= data)
+ {
res = VPX_CODEC_INVALID_PARAM;
+ }
else
{
/* Parse uncompresssed part of key frame header.
@@ -254,30 +286,44 @@ static vpx_codec_err_t vp8_peek_si(const uint8_t *data,
* 4 bytes:- including image width and height in the lowest 14 bits
* of each 2-byte value.
*/
+ uint8_t clear_buffer[10];
+ const uint8_t *clear = data;
+ if (decrypt_cb)
+ {
+ int n = data_sz > 10 ? 10 : data_sz;
+ decrypt_cb(decrypt_state, data, clear_buffer, n);
+ clear = clear_buffer;
+ }
si->is_kf = 0;
- if (data_sz >= 10 && !(data[0] & 0x01)) /* I-Frame */
+ if (data_sz >= 10 && !(clear[0] & 0x01)) /* I-Frame */
{
- const uint8_t *c = data + 3;
si->is_kf = 1;
/* vet via sync code */
- if (c[0] != 0x9d || c[1] != 0x01 || c[2] != 0x2a)
+ if (clear[3] != 0x9d || clear[4] != 0x01 || clear[5] != 0x2a)
res = VPX_CODEC_UNSUP_BITSTREAM;
- si->w = (c[3] | (c[4] << 8)) & 0x3fff;
- si->h = (c[5] | (c[6] << 8)) & 0x3fff;
+ si->w = (clear[6] | (clear[7] << 8)) & 0x3fff;
+ si->h = (clear[8] | (clear[9] << 8)) & 0x3fff;
/*printf("w=%d, h=%d\n", si->w, si->h);*/
if (!(si->h | si->w))
res = VPX_CODEC_UNSUP_BITSTREAM;
}
else
+ {
res = VPX_CODEC_UNSUP_BITSTREAM;
+ }
}
return res;
+}
+static vpx_codec_err_t vp8_peek_si(const uint8_t *data,
+ unsigned int data_sz,
+ vpx_codec_stream_info_t *si) {
+ return vp8_peek_si_internal(data, data_sz, si, NULL, NULL);
}
static vpx_codec_err_t vp8_get_si(vpx_codec_alg_priv_t *ctx,
@@ -343,6 +389,47 @@ static void yuvconfig2image(vpx_image_t *img,
img->self_allocd = 0;
}
+static int
+update_fragments(vpx_codec_alg_priv_t *ctx,
+ const uint8_t *data,
+ unsigned int data_sz,
+ vpx_codec_err_t *res)
+{
+ *res = VPX_CODEC_OK;
+
+ if (ctx->fragments.count == 0)
+ {
+ /* New frame, reset fragment pointers and sizes */
+ vpx_memset((void*)ctx->fragments.ptrs, 0, sizeof(ctx->fragments.ptrs));
+ vpx_memset(ctx->fragments.sizes, 0, sizeof(ctx->fragments.sizes));
+ }
+ if (ctx->fragments.enabled && !(data == NULL && data_sz == 0))
+ {
+ /* Store a pointer to this fragment and return. We haven't
+ * received the complete frame yet, so we will wait with decoding.
+ */
+ ctx->fragments.ptrs[ctx->fragments.count] = data;
+ ctx->fragments.sizes[ctx->fragments.count] = data_sz;
+ ctx->fragments.count++;
+ if (ctx->fragments.count > (1 << EIGHT_PARTITION) + 1)
+ {
+ ctx->fragments.count = 0;
+ *res = VPX_CODEC_INVALID_PARAM;
+ return -1;
+ }
+ return 0;
+ }
+
+ if (!ctx->fragments.enabled)
+ {
+ ctx->fragments.ptrs[0] = data;
+ ctx->fragments.sizes[0] = data_sz;
+ ctx->fragments.count = 1;
+ }
+
+ return 1;
+}
+
static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
const uint8_t *data,
unsigned int data_sz,
@@ -353,6 +440,11 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
unsigned int resolution_change = 0;
unsigned int w, h;
+
+ /* Update the input fragment data */
+ if(update_fragments(ctx, data, data_sz, &res) <= 0)
+ return res;
+
/* Determine the stream parameters. Note that we rely on peek_si to
* validate that we have a buffer that does not wrap around the top
* of the heap.
@@ -360,7 +452,8 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
w = ctx->si.w;
h = ctx->si.h;
- res = ctx->base.iface->dec.peek_si(data, data_sz, &ctx->si);
+ res = vp8_peek_si_internal(ctx->fragments.ptrs[0], ctx->fragments.sizes[0],
+ &ctx->si, ctx->decrypt_cb, ctx->decrypt_state);
if((res == VPX_CODEC_UNSUP_BITSTREAM) && !ctx->si.is_kf)
{
@@ -412,7 +505,6 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
if (!res)
{
VP8D_CONFIG oxcf;
- struct VP8D_COMP* optr;
oxcf.Width = ctx->si.w;
oxcf.Height = ctx->si.h;
@@ -421,10 +513,6 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
oxcf.max_threads = ctx->cfg.threads;
oxcf.error_concealment =
(ctx->base.init_flags & VPX_CODEC_USE_ERROR_CONCEALMENT);
- oxcf.input_fragments =
- (ctx->base.init_flags & VPX_CODEC_USE_INPUT_FRAGMENTS);
-
- optr = vp8dx_create_decompressor(&oxcf);
/* If postprocessing was enabled by the application and a
* configuration has not been provided, default it.
@@ -438,20 +526,19 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
ctx->postproc_cfg.noise_level = 0;
}
- if (!optr)
- res = VPX_CODEC_ERROR;
- else
- ctx->pbi = optr;
+ res = vp8_create_decoder_instances(&ctx->yv12_frame_buffers, &oxcf);
+ ctx->yv12_frame_buffers.pbi[0]->decrypt_cb = ctx->decrypt_cb;
+ ctx->yv12_frame_buffers.pbi[0]->decrypt_state = ctx->decrypt_state;
}
ctx->decoder_init = 1;
}
- if (!res && ctx->pbi)
+ if (!res)
{
+ VP8D_COMP *pbi = ctx->yv12_frame_buffers.pbi[0];
if(resolution_change)
{
- VP8D_COMP *pbi = ctx->pbi;
VP8_COMMON *const pc = & pbi->common;
MACROBLOCKD *const xd = & pbi->mb;
#if CONFIG_MULTITHREAD
@@ -541,15 +628,20 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
pbi->common.error.setjmp = 0;
/* required to get past the first get_free_fb() call */
- ctx->pbi->common.fb_idx_ref_cnt[0] = 0;
+ pbi->common.fb_idx_ref_cnt[0] = 0;
}
+ /* update the pbi fragment data */
+ pbi->fragments = ctx->fragments;
+
ctx->user_priv = user_priv;
- if (vp8dx_receive_compressed_data(ctx->pbi, data_sz, data, deadline))
+ if (vp8dx_receive_compressed_data(pbi, data_sz, data, deadline))
{
- VP8D_COMP *pbi = (VP8D_COMP *)ctx->pbi;
res = update_error_state(ctx, &pbi->common.error);
}
+
+ /* get ready for the next series of fragments */
+ ctx->fragments.count = 0;
}
return res;
@@ -563,7 +655,7 @@ static vpx_image_t *vp8_get_frame(vpx_codec_alg_priv_t *ctx,
/* iter acts as a flip flop, so an image is only returned on the first
* call to get_frame.
*/
- if (!(*iter))
+ if (!(*iter) && ctx->yv12_frame_buffers.pbi[0])
{
YV12_BUFFER_CONFIG sd;
int64_t time_stamp = 0, time_end_stamp = 0;
@@ -590,7 +682,8 @@ static vpx_image_t *vp8_get_frame(vpx_codec_alg_priv_t *ctx,
#endif
}
- if (0 == vp8dx_get_raw_frame(ctx->pbi, &sd, &time_stamp, &time_end_stamp, &flags))
+ if (0 == vp8dx_get_raw_frame(ctx->yv12_frame_buffers.pbi[0], &sd,
+ &time_stamp, &time_end_stamp, &flags))
{
yuvconfig2image(&ctx->img, &sd, ctx->user_priv);
@@ -693,6 +786,8 @@ static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img,
yv12->u_buffer = img->planes[VPX_PLANE_U];
yv12->v_buffer = img->planes[VPX_PLANE_V];
+ yv12->y_crop_width = img->d_w;
+ yv12->y_crop_height = img->d_h;
yv12->y_width = img->d_w;
yv12->y_height = img->d_h;
yv12->uv_width = yv12->y_width / 2;
@@ -715,14 +810,15 @@ static vpx_codec_err_t vp8_set_reference(vpx_codec_alg_priv_t *ctx,
vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *);
- if (data)
+ if (data && !ctx->yv12_frame_buffers.use_frame_threads)
{
vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data;
YV12_BUFFER_CONFIG sd;
image2yuvconfig(&frame->img, &sd);
- return vp8dx_set_reference(ctx->pbi, frame->frame_type, &sd);
+ return vp8dx_set_reference(ctx->yv12_frame_buffers.pbi[0],
+ frame->frame_type, &sd);
}
else
return VPX_CODEC_INVALID_PARAM;
@@ -736,14 +832,15 @@ static vpx_codec_err_t vp8_get_reference(vpx_codec_alg_priv_t *ctx,
vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *);
- if (data)
+ if (data && !ctx->yv12_frame_buffers.use_frame_threads)
{
vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data;
YV12_BUFFER_CONFIG sd;
image2yuvconfig(&frame->img, &sd);
- return vp8dx_get_reference(ctx->pbi, frame->frame_type, &sd);
+ return vp8dx_get_reference(ctx->yv12_frame_buffers.pbi[0],
+ frame->frame_type, &sd);
}
else
return VPX_CODEC_INVALID_PARAM;
@@ -799,10 +896,11 @@ static vpx_codec_err_t vp8_get_last_ref_updates(vpx_codec_alg_priv_t *ctx,
va_list args)
{
int *update_info = va_arg(args, int *);
- VP8D_COMP *pbi = (VP8D_COMP *)ctx->pbi;
- if (update_info)
+ if (update_info && !ctx->yv12_frame_buffers.use_frame_threads)
{
+ VP8D_COMP *pbi = (VP8D_COMP *)ctx->yv12_frame_buffers.pbi[0];
+
*update_info = pbi->common.refresh_alt_ref_frame * (int) VP8_ALTR_FRAME
+ pbi->common.refresh_golden_frame * (int) VP8_GOLD_FRAME
+ pbi->common.refresh_last_frame * (int) VP8_LAST_FRAME;
@@ -819,11 +917,11 @@ static vpx_codec_err_t vp8_get_last_ref_frame(vpx_codec_alg_priv_t *ctx,
va_list args)
{
int *ref_info = va_arg(args, int *);
- VP8D_COMP *pbi = (VP8D_COMP *)ctx->pbi;
- VP8_COMMON *oci = &pbi->common;
- if (ref_info)
+ if (ref_info && !ctx->yv12_frame_buffers.use_frame_threads)
{
+ VP8D_COMP *pbi = (VP8D_COMP *)ctx->yv12_frame_buffers.pbi[0];
+ VP8_COMMON *oci = &pbi->common;
*ref_info =
(vp8dx_references_buffer( oci, ALTREF_FRAME )?VP8_ALTR_FRAME:0) |
(vp8dx_references_buffer( oci, GOLDEN_FRAME )?VP8_GOLD_FRAME:0) |
@@ -841,10 +939,10 @@ static vpx_codec_err_t vp8_get_frame_corrupted(vpx_codec_alg_priv_t *ctx,
{
int *corrupted = va_arg(args, int *);
+ VP8D_COMP *pbi = (VP8D_COMP *)ctx->yv12_frame_buffers.pbi[0];
- if (corrupted)
+ if (corrupted && pbi)
{
- VP8D_COMP *pbi = (VP8D_COMP *)ctx->pbi;
*corrupted = pbi->common.frame_to_show->corrupted;
return VPX_CODEC_OK;
@@ -854,6 +952,25 @@ static vpx_codec_err_t vp8_get_frame_corrupted(vpx_codec_alg_priv_t *ctx,
}
+static vpx_codec_err_t vp8_set_decryptor(vpx_codec_alg_priv_t *ctx,
+ int ctrl_id,
+ va_list args)
+{
+ vp8_decrypt_init *init = va_arg(args, vp8_decrypt_init *);
+
+ if (init)
+ {
+ ctx->decrypt_cb = init->decrypt_cb;
+ ctx->decrypt_state = init->decrypt_state;
+ }
+ else
+ {
+ ctx->decrypt_cb = NULL;
+ ctx->decrypt_state = NULL;
+ }
+ return VPX_CODEC_OK;
+}
+
vpx_codec_ctrl_fn_map_t vp8_ctf_maps[] =
{
{VP8_SET_REFERENCE, vp8_set_reference},
@@ -866,6 +983,7 @@ vpx_codec_ctrl_fn_map_t vp8_ctf_maps[] =
{VP8D_GET_LAST_REF_UPDATES, vp8_get_last_ref_updates},
{VP8D_GET_FRAME_CORRUPTED, vp8_get_frame_corrupted},
{VP8D_GET_LAST_REF_USED, vp8_get_last_ref_frame},
+ {VP8D_SET_DECRYPTOR, vp8_set_decryptor},
{ -1, NULL},
};
diff --git a/libvpx/vp8/vp8cx.mk b/libvpx/vp8/vp8cx.mk
index 0ae2f10..cd091f3 100644
--- a/libvpx/vp8/vp8cx.mk
+++ b/libvpx/vp8/vp8cx.mk
@@ -9,8 +9,6 @@
##
-include $(SRC_PATH_BARE)/$(VP8_PREFIX)vp8_common.mk
-
VP8_CX_EXPORTS += exports_enc
VP8_CX_SRCS-yes += $(VP8_COMMON_SRCS-yes)
@@ -26,7 +24,6 @@ VP8_CX_SRCS-yes += vp8cx.mk
VP8_CX_SRCS-yes += vp8_cx_iface.c
-VP8_CX_SRCS-yes += encoder/asm_enc_offsets.c
VP8_CX_SRCS-yes += encoder/defaultcoefcounts.h
VP8_CX_SRCS-yes += encoder/bitstream.c
VP8_CX_SRCS-yes += encoder/boolhuff.c
@@ -80,6 +77,7 @@ VP8_CX_SRCS-$(CONFIG_INTERNAL_STATS) += common/postproc.c
VP8_CX_SRCS-yes += encoder/temporal_filter.c
VP8_CX_SRCS-$(CONFIG_MULTI_RES_ENCODING) += encoder/mr_dissim.c
VP8_CX_SRCS-$(CONFIG_MULTI_RES_ENCODING) += encoder/mr_dissim.h
+VP8_CX_SRCS-yes += encoder/vp8_asm_enc_offsets.c
ifeq ($(CONFIG_REALTIME_ONLY),yes)
VP8_CX_SRCS_REMOVE-yes += encoder/firstpass.c
@@ -91,13 +89,10 @@ VP8_CX_SRCS-$(HAVE_MMX) += encoder/x86/subtract_mmx.asm
VP8_CX_SRCS-$(HAVE_MMX) += encoder/x86/vp8_enc_stubs_mmx.c
VP8_CX_SRCS-$(HAVE_SSE2) += encoder/x86/dct_sse2.asm
VP8_CX_SRCS-$(HAVE_SSE2) += encoder/x86/fwalsh_sse2.asm
-VP8_CX_SRCS-$(HAVE_SSE2) += encoder/x86/quantize_sse2.asm
+VP8_CX_SRCS-$(HAVE_SSE2) += encoder/x86/quantize_sse2.c
ifeq ($(CONFIG_TEMPORAL_DENOISING),yes)
VP8_CX_SRCS-$(HAVE_SSE2) += encoder/x86/denoising_sse2.c
-ifeq ($(HAVE_SSE2),yes)
-vp8/encoder/x86/denoising_sse2.c.o: CFLAGS += -msse2
-endif
endif
VP8_CX_SRCS-$(HAVE_SSE2) += encoder/x86/subtract_sse2.asm
@@ -113,5 +108,7 @@ ifeq ($(CONFIG_REALTIME_ONLY),yes)
VP8_CX_SRCS_REMOVE-$(HAVE_SSE2) += encoder/x86/temporal_filter_apply_sse2.asm
endif
-
VP8_CX_SRCS-yes := $(filter-out $(VP8_CX_SRCS_REMOVE-yes),$(VP8_CX_SRCS-yes))
+
+$(eval $(call asm_offsets_template,\
+ vp8_asm_enc_offsets.asm, $(VP8_PREFIX)encoder/vp8_asm_enc_offsets.c))
diff --git a/libvpx/vp8/vp8dx.mk b/libvpx/vp8/vp8dx.mk
index dd39190..c26f42d 100644
--- a/libvpx/vp8/vp8dx.mk
+++ b/libvpx/vp8/vp8dx.mk
@@ -9,8 +9,6 @@
##
-include $(SRC_PATH_BARE)/$(VP8_PREFIX)vp8_common.mk
-
VP8_DX_EXPORTS += exports_dec
VP8_DX_SRCS-yes += $(VP8_COMMON_SRCS-yes)
@@ -22,31 +20,6 @@ VP8_DX_SRCS-yes += vp8dx.mk
VP8_DX_SRCS-yes += vp8_dx_iface.c
-# common
-#define ARM
-#define DISABLE_THREAD
-
-#INCLUDES += algo/vpx_common/vpx_mem/include
-#INCLUDES += common
-#INCLUDES += common
-#INCLUDES += common
-#INCLUDES += common
-#INCLUDES += decoder
-
-
-
-# decoder
-#define ARM
-#define DISABLE_THREAD
-
-#INCLUDES += algo/vpx_common/vpx_mem/include
-#INCLUDES += common
-#INCLUDES += common
-#INCLUDES += common
-#INCLUDES += common
-#INCLUDES += decoder
-
-VP8_DX_SRCS-yes += decoder/asm_dec_offsets.c
VP8_DX_SRCS-yes += decoder/dboolhuff.c
VP8_DX_SRCS-yes += decoder/decodemv.c
VP8_DX_SRCS-yes += decoder/decodframe.c
@@ -62,5 +35,9 @@ VP8_DX_SRCS-yes += decoder/onyxd_int.h
VP8_DX_SRCS-yes += decoder/treereader.h
VP8_DX_SRCS-yes += decoder/onyxd_if.c
VP8_DX_SRCS-$(CONFIG_MULTITHREAD) += decoder/threading.c
+VP8_DX_SRCS-yes += decoder/vp8_asm_dec_offsets.c
VP8_DX_SRCS-yes := $(filter-out $(VP8_DX_SRCS_REMOVE-yes),$(VP8_DX_SRCS-yes))
+
+$(eval $(call asm_offsets_template,\
+ vp8_asm_dec_offsets.asm, $(VP8_PREFIX)decoder/vp8_asm_dec_offsets.c))
diff --git a/libvpx/vp8_multi_resolution_encoder.c b/libvpx/vp8_multi_resolution_encoder.c
index eae36a4..4c29056 100644
--- a/libvpx/vp8_multi_resolution_encoder.c
+++ b/libvpx/vp8_multi_resolution_encoder.c
@@ -216,7 +216,7 @@ int main(int argc, char **argv)
* If target bitrate for highest-resolution level is set to 0,
* (i.e. target_bitrate[0]=0), we skip encoding at that level.
*/
- unsigned int target_bitrate[NUM_ENCODERS]={1400, 500, 100};
+ unsigned int target_bitrate[NUM_ENCODERS]={1000, 500, 100};
/* Enter the frame rate of the input video */
int framerate = 30;
/* Set down-sampling factor for each resolution level.
@@ -351,27 +351,26 @@ int main(int argc, char **argv)
if(vpx_codec_control(&codec[i], VP8E_SET_CPUUSED, speed))
die_codec(&codec[i], "Failed to set cpu_used");
}
- /* Set static thresh for highest-resolution encoder. Set it to 1000 for
- * better performance. */
- {
- unsigned int static_thresh = 1000;
- if(vpx_codec_control(&codec[0], VP8E_SET_STATIC_THRESHOLD, static_thresh))
- die_codec(&codec[0], "Failed to set static threshold");
- }
- /* Set static thresh = 0 for other encoders for better quality */
- for ( i=1; i<NUM_ENCODERS; i++)
+
+ /* Set static threshold. */
+ for ( i=0; i<NUM_ENCODERS; i++)
{
- unsigned int static_thresh = 0;
+ unsigned int static_thresh = 1;
if(vpx_codec_control(&codec[i], VP8E_SET_STATIC_THRESHOLD, static_thresh))
die_codec(&codec[i], "Failed to set static threshold");
}
+
/* Set NOISE_SENSITIVITY to do TEMPORAL_DENOISING */
- for ( i=0; i< NUM_ENCODERS; i++)
+ /* Enable denoising for the highest-resolution encoder. */
+ if(vpx_codec_control(&codec[0], VP8E_SET_NOISE_SENSITIVITY, 1))
+ die_codec(&codec[0], "Failed to set noise_sensitivity");
+ for ( i=1; i< NUM_ENCODERS; i++)
{
if(vpx_codec_control(&codec[i], VP8E_SET_NOISE_SENSITIVITY, 0))
die_codec(&codec[i], "Failed to set noise_sensitivity");
}
+
frame_avail = 1;
got_data = 0;
diff --git a/libvpx/vp9/common/generic/vp9_systemdependent.c b/libvpx/vp9/common/generic/vp9_systemdependent.c
new file mode 100644
index 0000000..79092cd
--- /dev/null
+++ b/libvpx/vp9/common/generic/vp9_systemdependent.c
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "./vpx_config.h"
+#include "vp9_rtcd.h"
+#include "vp9/common/vp9_onyxc_int.h"
+
+void vp9_machine_specific_config(VP9_COMMON *ctx) {
+ vp9_rtcd();
+}
diff --git a/libvpx/vp9/common/vp9_alloccommon.c b/libvpx/vp9/common/vp9_alloccommon.c
new file mode 100644
index 0000000..2660344
--- /dev/null
+++ b/libvpx/vp9/common/vp9_alloccommon.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "./vpx_config.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_entropymode.h"
+#include "vp9/common/vp9_entropymv.h"
+#include "vp9/common/vp9_findnearmv.h"
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/common/vp9_systemdependent.h"
+
+void vp9_update_mode_info_border(VP9_COMMON *cm, MODE_INFO *mi) {
+ const int stride = cm->mode_info_stride;
+ int i;
+
+ // Clear down top border row
+ vpx_memset(mi, 0, sizeof(MODE_INFO) * stride);
+
+ // Clear left border column
+ for (i = 1; i < cm->mi_rows + 1; i++)
+ vpx_memset(&mi[i * stride], 0, sizeof(MODE_INFO));
+}
+
+void vp9_update_mode_info_in_image(VP9_COMMON *cm, MODE_INFO *mi) {
+ int i, j;
+
+ // For each in image mode_info element set the in image flag to 1
+ for (i = 0; i < cm->mi_rows; i++) {
+ MODE_INFO *ptr = mi;
+ for (j = 0; j < cm->mi_cols; j++) {
+ ptr->mbmi.mb_in_image = 1;
+ ptr++; // Next element in the row
+ }
+
+ // Step over border element at start of next row
+ mi += cm->mode_info_stride;
+ }
+}
+
+void vp9_free_frame_buffers(VP9_COMMON *oci) {
+ int i;
+
+ for (i = 0; i < NUM_YV12_BUFFERS; i++)
+ vp9_free_frame_buffer(&oci->yv12_fb[i]);
+
+ vp9_free_frame_buffer(&oci->temp_scale_frame);
+ vp9_free_frame_buffer(&oci->post_proc_buffer);
+
+ vpx_free(oci->mip);
+ vpx_free(oci->prev_mip);
+ vpx_free(oci->above_seg_context);
+
+ vpx_free(oci->above_context[0]);
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ oci->above_context[i] = 0;
+ oci->mip = 0;
+ oci->prev_mip = 0;
+ oci->above_seg_context = 0;
+}
+
+static void set_mb_mi(VP9_COMMON *cm, int aligned_width, int aligned_height) {
+ cm->mb_cols = (aligned_width + 8) >> 4;
+ cm->mb_rows = (aligned_height + 8) >> 4;
+ cm->MBs = cm->mb_rows * cm->mb_cols;
+
+ cm->mi_cols = aligned_width >> LOG2_MI_SIZE;
+ cm->mi_rows = aligned_height >> LOG2_MI_SIZE;
+ cm->mode_info_stride = cm->mi_cols + 64 / MI_SIZE;
+}
+
+static void setup_mi(VP9_COMMON *cm) {
+ cm->mi = cm->mip + cm->mode_info_stride + 1;
+ cm->prev_mi = cm->prev_mip + cm->mode_info_stride + 1;
+
+ vpx_memset(cm->mip, 0,
+ cm->mode_info_stride * (cm->mi_rows + 1) * sizeof(MODE_INFO));
+
+ vp9_update_mode_info_border(cm, cm->mip);
+ vp9_update_mode_info_in_image(cm, cm->mi);
+
+ vp9_update_mode_info_border(cm, cm->prev_mip);
+ vp9_update_mode_info_in_image(cm, cm->prev_mi);
+}
+
+int vp9_alloc_frame_buffers(VP9_COMMON *oci, int width, int height) {
+ int i, mi_cols;
+
+ // Our internal buffers are always multiples of 16
+ const int aligned_width = multiple8(width);
+ const int aligned_height = multiple8(height);
+ const int ss_x = oci->subsampling_x;
+ const int ss_y = oci->subsampling_y;
+
+ vp9_free_frame_buffers(oci);
+
+ for (i = 0; i < NUM_YV12_BUFFERS; i++) {
+ oci->fb_idx_ref_cnt[i] = 0;
+ if (vp9_alloc_frame_buffer(&oci->yv12_fb[i], width, height, ss_x, ss_y,
+ VP9BORDERINPIXELS) < 0)
+ goto fail;
+ }
+
+ oci->new_fb_idx = NUM_YV12_BUFFERS - 1;
+ oci->fb_idx_ref_cnt[oci->new_fb_idx] = 1;
+
+ for (i = 0; i < ALLOWED_REFS_PER_FRAME; i++)
+ oci->active_ref_idx[i] = i;
+
+ for (i = 0; i < NUM_REF_FRAMES; i++) {
+ oci->ref_frame_map[i] = i;
+ oci->fb_idx_ref_cnt[i] = 1;
+ }
+
+ if (vp9_alloc_frame_buffer(&oci->temp_scale_frame, width, 16, ss_x, ss_y,
+ VP9BORDERINPIXELS) < 0)
+ goto fail;
+
+ if (vp9_alloc_frame_buffer(&oci->post_proc_buffer, width, height, ss_x, ss_y,
+ VP9BORDERINPIXELS) < 0)
+ goto fail;
+
+ set_mb_mi(oci, aligned_width, aligned_height);
+
+ // Allocation
+ oci->mip = vpx_calloc(oci->mode_info_stride * (oci->mi_rows + 64 / MI_SIZE),
+ sizeof(MODE_INFO));
+ if (!oci->mip)
+ goto fail;
+
+ oci->prev_mip = vpx_calloc(oci->mode_info_stride *
+ (oci->mi_rows + 64 / MI_SIZE),
+ sizeof(MODE_INFO));
+ if (!oci->prev_mip)
+ goto fail;
+
+ setup_mi(oci);
+
+ // FIXME(jkoleszar): allocate subsampled arrays for U/V once subsampling
+ // information is exposed at this level
+ mi_cols = mi_cols_aligned_to_sb(oci);
+
+ // 2 contexts per 'mi unit', so that we have one context per 4x4 txfm
+ // block where mi unit size is 8x8.
+# if CONFIG_ALPHA
+ oci->above_context[0] = vpx_calloc(sizeof(ENTROPY_CONTEXT) * 8 * mi_cols, 1);
+#else
+ oci->above_context[0] = vpx_calloc(sizeof(ENTROPY_CONTEXT) * 6 * mi_cols, 1);
+#endif
+ if (!oci->above_context[0])
+ goto fail;
+
+ for (i = 1; i < MAX_MB_PLANE; i++)
+ oci->above_context[i] =
+ oci->above_context[0] + i * sizeof(ENTROPY_CONTEXT) * 2 * mi_cols;
+
+ oci->above_seg_context = vpx_calloc(sizeof(PARTITION_CONTEXT) * mi_cols, 1);
+ if (!oci->above_seg_context)
+ goto fail;
+
+ return 0;
+
+ fail:
+ vp9_free_frame_buffers(oci);
+ return 1;
+}
+
+void vp9_create_common(VP9_COMMON *oci) {
+ vp9_machine_specific_config(oci);
+
+ vp9_init_mbmode_probs(oci);
+
+ oci->txfm_mode = ONLY_4X4;
+ oci->comp_pred_mode = HYBRID_PREDICTION;
+ oci->clr_type = REG_YUV;
+
+ // Initialize reference frame sign bias structure to defaults
+ vpx_memset(oci->ref_frame_sign_bias, 0, sizeof(oci->ref_frame_sign_bias));
+}
+
+void vp9_remove_common(VP9_COMMON *oci) {
+ vp9_free_frame_buffers(oci);
+}
+
+void vp9_initialize_common() {
+ vp9_coef_tree_initialize();
+ vp9_entropy_mode_init();
+ vp9_entropy_mv_init();
+}
+
+void vp9_update_frame_size(VP9_COMMON *cm) {
+ const int aligned_width = multiple8(cm->width);
+ const int aligned_height = multiple8(cm->height);
+
+ set_mb_mi(cm, aligned_width, aligned_height);
+ setup_mi(cm);
+}
diff --git a/libvpx/vp9/common/vp9_alloccommon.h b/libvpx/vp9/common/vp9_alloccommon.h
new file mode 100644
index 0000000..8bf5ed1
--- /dev/null
+++ b/libvpx/vp9/common/vp9_alloccommon.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_COMMON_VP9_ALLOCCOMMON_H_
+#define VP9_COMMON_VP9_ALLOCCOMMON_H_
+
+#include "vp9/common/vp9_onyxc_int.h"
+
+void vp9_initialize_common();
+
+void vp9_update_mode_info_border(VP9_COMMON *cpi, MODE_INFO *mi);
+void vp9_update_mode_info_in_image(VP9_COMMON *cpi, MODE_INFO *mi);
+
+void vp9_create_common(VP9_COMMON *oci);
+void vp9_remove_common(VP9_COMMON *oci);
+
+int vp9_alloc_frame_buffers(VP9_COMMON *oci, int width, int height);
+void vp9_free_frame_buffers(VP9_COMMON *oci);
+
+
+void vp9_update_frame_size(VP9_COMMON *cm);
+
+#endif // VP9_COMMON_VP9_ALLOCCOMMON_H_
diff --git a/libvpx/vp9/common/vp9_asm_com_offsets.c b/libvpx/vp9/common/vp9_asm_com_offsets.c
new file mode 100644
index 0000000..94ccb6e
--- /dev/null
+++ b/libvpx/vp9/common/vp9_asm_com_offsets.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vpx_config.h"
+#include "vpx/vpx_codec.h"
+#include "vpx_ports/asm_offsets.h"
+
+BEGIN
+
+END
+
+/* add asserts for any offset that is not supported by assembly code */
+/* add asserts for any size that is not supported by assembly code */
diff --git a/libvpx/vp9/common/vp9_blockd.h b/libvpx/vp9/common/vp9_blockd.h
new file mode 100644
index 0000000..37d29af
--- /dev/null
+++ b/libvpx/vp9/common/vp9_blockd.h
@@ -0,0 +1,904 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_COMMON_VP9_BLOCKD_H_
+#define VP9_COMMON_VP9_BLOCKD_H_
+
+#include "./vpx_config.h"
+#include "vpx_scale/yv12config.h"
+#include "vp9/common/vp9_convolve.h"
+#include "vp9/common/vp9_mv.h"
+#include "vp9/common/vp9_treecoder.h"
+#include "vpx_ports/mem.h"
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_enums.h"
+
+#define BLOCK_SIZE_GROUPS 4
+#define MAX_MB_SEGMENTS 8
+#define MB_SEG_TREE_PROBS (MAX_MB_SEGMENTS-1)
+
+#define PREDICTION_PROBS 3
+
+#define MBSKIP_CONTEXTS 3
+
+#define MAX_REF_LF_DELTAS 4
+#define MAX_MODE_LF_DELTAS 2
+
+/* Segment Feature Masks */
+#define SEGMENT_DELTADATA 0
+#define SEGMENT_ABSDATA 1
+#define MAX_MV_REF_CANDIDATES 2
+
+#define INTRA_INTER_CONTEXTS 4
+#define COMP_INTER_CONTEXTS 5
+#define REF_CONTEXTS 5
+
+typedef enum {
+ PLANE_TYPE_Y_WITH_DC,
+ PLANE_TYPE_UV,
+} PLANE_TYPE;
+
+typedef char ENTROPY_CONTEXT;
+
+typedef char PARTITION_CONTEXT;
+
+static INLINE int combine_entropy_contexts(ENTROPY_CONTEXT a,
+ ENTROPY_CONTEXT b) {
+ return (a != 0) + (b != 0);
+}
+
+typedef enum {
+ KEY_FRAME = 0,
+ INTER_FRAME = 1,
+ NUM_FRAME_TYPES,
+} FRAME_TYPE;
+
+typedef enum {
+ EIGHTTAP_SMOOTH,
+ EIGHTTAP,
+ EIGHTTAP_SHARP,
+ BILINEAR,
+ SWITCHABLE /* should be the last one */
+} INTERPOLATIONFILTERTYPE;
+
+typedef enum {
+ DC_PRED, // Average of above and left pixels
+ V_PRED, // Vertical
+ H_PRED, // Horizontal
+ D45_PRED, // Directional 45 deg = round(arctan(1/1) * 180/pi)
+ D135_PRED, // Directional 135 deg = 180 - 45
+ D117_PRED, // Directional 117 deg = 180 - 63
+ D153_PRED, // Directional 153 deg = 180 - 27
+ D27_PRED, // Directional 27 deg = round(arctan(1/2) * 180/pi)
+ D63_PRED, // Directional 63 deg = round(arctan(2/1) * 180/pi)
+ TM_PRED, // True-motion
+ NEARESTMV,
+ NEARMV,
+ ZEROMV,
+ NEWMV,
+ MB_MODE_COUNT
+} MB_PREDICTION_MODE;
+
+static INLINE int is_inter_mode(MB_PREDICTION_MODE mode) {
+ return mode >= NEARESTMV && mode <= NEWMV;
+}
+
+// Segment level features.
+typedef enum {
+ SEG_LVL_ALT_Q = 0, // Use alternate Quantizer ....
+ SEG_LVL_ALT_LF = 1, // Use alternate loop filter value...
+ SEG_LVL_REF_FRAME = 2, // Optional Segment reference frame
+ SEG_LVL_SKIP = 3, // Optional Segment (0,0) + skip mode
+ SEG_LVL_MAX = 4 // Number of MB level features supported
+} SEG_LVL_FEATURES;
+
+// Segment level features.
+typedef enum {
+ TX_4X4 = 0, // 4x4 dct transform
+ TX_8X8 = 1, // 8x8 dct transform
+ TX_16X16 = 2, // 16x16 dct transform
+ TX_32X32 = 3, // 32x32 dct transform
+ TX_SIZE_MAX_SB, // Number of transforms available to SBs
+} TX_SIZE;
+
+typedef enum {
+ DCT_DCT = 0, // DCT in both horizontal and vertical
+ ADST_DCT = 1, // ADST in vertical, DCT in horizontal
+ DCT_ADST = 2, // DCT in vertical, ADST in horizontal
+ ADST_ADST = 3 // ADST in both directions
+} TX_TYPE;
+
+#define VP9_INTRA_MODES (TM_PRED + 1)
+
+#define VP9_INTER_MODES (1 + NEWMV - NEARESTMV)
+
+#define WHT_UPSCALE_FACTOR 2
+
+#define TX_SIZE_PROBS 6 // (TX_SIZE_MAX_SB * (TX_SIZE_MAX_SB - 1) / 2)
+
+#define get_tx_probs(c, b) ((b) < BLOCK_SIZE_MB16X16 ? \
+ (c)->fc.tx_probs_8x8p : \
+ (b) < BLOCK_SIZE_SB32X32 ? \
+ (c)->fc.tx_probs_16x16p : (c)->fc.tx_probs_32x32p)
+
+/* For keyframes, intra block modes are predicted by the (already decoded)
+ modes for the Y blocks to the left and above us; for interframes, there
+ is a single probability table. */
+
+union b_mode_info {
+ struct {
+ MB_PREDICTION_MODE first;
+ } as_mode;
+ int_mv as_mv[2]; // first, second inter predictor motion vectors
+};
+
+typedef enum {
+ NONE = -1,
+ INTRA_FRAME = 0,
+ LAST_FRAME = 1,
+ GOLDEN_FRAME = 2,
+ ALTREF_FRAME = 3,
+ MAX_REF_FRAMES = 4
+} MV_REFERENCE_FRAME;
+
+static INLINE int b_width_log2(BLOCK_SIZE_TYPE sb_type) {
+ switch (sb_type) {
+ case BLOCK_SIZE_SB4X8:
+ case BLOCK_SIZE_AB4X4: return 0;
+ case BLOCK_SIZE_SB8X4:
+ case BLOCK_SIZE_SB8X8:
+ case BLOCK_SIZE_SB8X16: return 1;
+ case BLOCK_SIZE_SB16X8:
+ case BLOCK_SIZE_MB16X16:
+ case BLOCK_SIZE_SB16X32: return 2;
+ case BLOCK_SIZE_SB32X16:
+ case BLOCK_SIZE_SB32X32:
+ case BLOCK_SIZE_SB32X64: return 3;
+ case BLOCK_SIZE_SB64X32:
+ case BLOCK_SIZE_SB64X64: return 4;
+ default: assert(0);
+ return -1;
+ }
+}
+
+static INLINE int b_height_log2(BLOCK_SIZE_TYPE sb_type) {
+ switch (sb_type) {
+ case BLOCK_SIZE_SB8X4:
+ case BLOCK_SIZE_AB4X4: return 0;
+ case BLOCK_SIZE_SB4X8:
+ case BLOCK_SIZE_SB8X8:
+ case BLOCK_SIZE_SB16X8: return 1;
+ case BLOCK_SIZE_SB8X16:
+ case BLOCK_SIZE_MB16X16:
+ case BLOCK_SIZE_SB32X16: return 2;
+ case BLOCK_SIZE_SB16X32:
+ case BLOCK_SIZE_SB32X32:
+ case BLOCK_SIZE_SB64X32: return 3;
+ case BLOCK_SIZE_SB32X64:
+ case BLOCK_SIZE_SB64X64: return 4;
+ default: assert(0);
+ return -1;
+ }
+}
+
+static INLINE int mi_width_log2(BLOCK_SIZE_TYPE sb_type) {
+ int a = b_width_log2(sb_type) - 1;
+ // align 4x4 block to mode_info
+ if (a < 0)
+ a = 0;
+ assert(a >= 0);
+ return a;
+}
+
+static INLINE int mi_height_log2(BLOCK_SIZE_TYPE sb_type) {
+ int a = b_height_log2(sb_type) - 1;
+ if (a < 0)
+ a = 0;
+ assert(a >= 0);
+ return a;
+}
+
+typedef struct {
+ MB_PREDICTION_MODE mode, uv_mode;
+ MV_REFERENCE_FRAME ref_frame[2];
+ TX_SIZE txfm_size;
+ int_mv mv[2]; // for each reference frame used
+ int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES];
+ int_mv best_mv, best_second_mv;
+
+ int mb_mode_context[MAX_REF_FRAMES];
+
+ unsigned char mb_skip_coeff; /* does this mb has coefficients at all, 1=no coefficients, 0=need decode tokens */
+ unsigned char segment_id; // Segment id for current frame
+
+ // Flags used for prediction status of various bistream signals
+ unsigned char seg_id_predicted;
+
+ // Indicates if the mb is part of the image (1) vs border (0)
+ // This can be useful in determining whether the MB provides
+ // a valid predictor
+ unsigned char mb_in_image;
+
+ INTERPOLATIONFILTERTYPE interp_filter;
+
+ BLOCK_SIZE_TYPE sb_type;
+} MB_MODE_INFO;
+
+typedef struct {
+ MB_MODE_INFO mbmi;
+ union b_mode_info bmi[4];
+} MODE_INFO;
+
+#define VP9_REF_SCALE_SHIFT 14
+struct scale_factors {
+ int x_scale_fp; // horizontal fixed point scale factor
+ int y_scale_fp; // vertical fixed point scale factor
+ int x_offset_q4;
+ int x_step_q4;
+ int y_offset_q4;
+ int y_step_q4;
+
+ int (*scale_value_x)(int val, const struct scale_factors *scale);
+ int (*scale_value_y)(int val, const struct scale_factors *scale);
+ void (*set_scaled_offsets)(struct scale_factors *scale, int row, int col);
+ int_mv32 (*scale_mv_q3_to_q4)(const int_mv *src_mv,
+ const struct scale_factors *scale);
+ int32_t (*scale_mv_component_q4)(int mv_q4, int scale_fp, int offset_q4);
+
+ convolve_fn_t predict[2][2][2]; // horiz, vert, avg
+};
+
+#if CONFIG_ALPHA
+enum { MAX_MB_PLANE = 4 };
+#else
+enum { MAX_MB_PLANE = 3 };
+#endif
+
+struct buf_2d {
+ uint8_t *buf;
+ int stride;
+};
+
+struct macroblockd_plane {
+ DECLARE_ALIGNED(16, int16_t, qcoeff[64 * 64]);
+ DECLARE_ALIGNED(16, int16_t, dqcoeff[64 * 64]);
+ DECLARE_ALIGNED(16, uint16_t, eobs[256]);
+ PLANE_TYPE plane_type;
+ int subsampling_x;
+ int subsampling_y;
+ struct buf_2d dst;
+ struct buf_2d pre[2];
+ int16_t *dequant;
+ ENTROPY_CONTEXT *above_context;
+ ENTROPY_CONTEXT *left_context;
+};
+
+#define BLOCK_OFFSET(x, i, n) ((x) + (i) * (n))
+
+typedef struct macroblockd {
+ struct macroblockd_plane plane[MAX_MB_PLANE];
+
+ struct scale_factors scale_factor[2];
+ struct scale_factors scale_factor_uv[2];
+
+ MODE_INFO *prev_mode_info_context;
+ MODE_INFO *mode_info_context;
+ int mode_info_stride;
+
+ FRAME_TYPE frame_type;
+
+ int up_available;
+ int left_available;
+ int right_available;
+
+ // partition contexts
+ PARTITION_CONTEXT *above_seg_context;
+ PARTITION_CONTEXT *left_seg_context;
+
+ /* 0 (disable) 1 (enable) segmentation */
+ unsigned char segmentation_enabled;
+
+ /* 0 (do not update) 1 (update) the macroblock segmentation map. */
+ unsigned char update_mb_segmentation_map;
+
+ /* 0 (do not update) 1 (update) the macroblock segmentation feature data. */
+ unsigned char update_mb_segmentation_data;
+
+ /* 0 (do not update) 1 (update) the macroblock segmentation feature data. */
+ unsigned char mb_segment_abs_delta;
+
+ /* Per frame flags that define which MB level features (such as quantizer or loop filter level) */
+ /* are enabled and when enabled the proabilities used to decode the per MB flags in MB_MODE_INFO */
+
+ // Probability Tree used to code Segment number
+ vp9_prob mb_segment_tree_probs[MB_SEG_TREE_PROBS];
+
+ // Segment features
+ int16_t segment_feature_data[MAX_MB_SEGMENTS][SEG_LVL_MAX];
+ unsigned int segment_feature_mask[MAX_MB_SEGMENTS];
+
+ /* mode_based Loop filter adjustment */
+ unsigned char mode_ref_lf_delta_enabled;
+ unsigned char mode_ref_lf_delta_update;
+
+ /* Delta values have the range +/- MAX_LOOP_FILTER */
+ /* 0 = Intra, Last, GF, ARF */
+ signed char last_ref_lf_deltas[MAX_REF_LF_DELTAS];
+ /* 0 = Intra, Last, GF, ARF */
+ signed char ref_lf_deltas[MAX_REF_LF_DELTAS];
+ /* 0 = ZERO_MV, MV */
+ signed char last_mode_lf_deltas[MAX_MODE_LF_DELTAS];
+ /* 0 = ZERO_MV, MV */
+ signed char mode_lf_deltas[MAX_MODE_LF_DELTAS];
+
+ /* Distance of MB away from frame edges */
+ int mb_to_left_edge;
+ int mb_to_right_edge;
+ int mb_to_top_edge;
+ int mb_to_bottom_edge;
+
+ unsigned int frames_since_golden;
+ unsigned int frames_till_alt_ref_frame;
+
+ int lossless;
+ /* Inverse transform function pointers. */
+ void (*inv_txm4x4_1_add)(int16_t *input, uint8_t *dest, int stride);
+ void (*inv_txm4x4_add)(int16_t *input, uint8_t *dest, int stride);
+ void (*itxm_add)(int16_t *input, uint8_t *dest, int stride, int eob);
+
+ struct subpix_fn_table subpix;
+
+ int allow_high_precision_mv;
+
+ int corrupted;
+
+ int sb_index; // index of 32x32 block inside the 64x64 block
+ int mb_index; // index of 16x16 block inside the 32x32 block
+ int b_index; // index of 8x8 block inside the 16x16 block
+ int ab_index; // index of 4x4 block inside the 8x8 block
+ int q_index;
+
+} MACROBLOCKD;
+
+static int *get_sb_index(MACROBLOCKD *xd, BLOCK_SIZE_TYPE subsize) {
+ switch (subsize) {
+ case BLOCK_SIZE_SB64X64:
+ case BLOCK_SIZE_SB64X32:
+ case BLOCK_SIZE_SB32X64:
+ case BLOCK_SIZE_SB32X32:
+ return &xd->sb_index;
+ case BLOCK_SIZE_SB32X16:
+ case BLOCK_SIZE_SB16X32:
+ case BLOCK_SIZE_MB16X16:
+ return &xd->mb_index;
+ case BLOCK_SIZE_SB16X8:
+ case BLOCK_SIZE_SB8X16:
+ case BLOCK_SIZE_SB8X8:
+ return &xd->b_index;
+ case BLOCK_SIZE_SB8X4:
+ case BLOCK_SIZE_SB4X8:
+ case BLOCK_SIZE_AB4X4:
+ return &xd->ab_index;
+ default:
+ assert(0);
+ return NULL;
+ }
+}
+
+static INLINE void update_partition_context(MACROBLOCKD *xd,
+ BLOCK_SIZE_TYPE sb_type,
+ BLOCK_SIZE_TYPE sb_size) {
+ int bsl = b_width_log2(sb_size), bs = (1 << bsl) / 2;
+ int bwl = b_width_log2(sb_type);
+ int bhl = b_height_log2(sb_type);
+ int boffset = b_width_log2(BLOCK_SIZE_SB64X64) - bsl;
+ int i;
+
+ // update the partition context at the end notes. set partition bits
+ // of block sizes larger than the current one to be one, and partition
+ // bits of smaller block sizes to be zero.
+ if ((bwl == bsl) && (bhl == bsl)) {
+ for (i = 0; i < bs; i++)
+ xd->left_seg_context[i] = ~(0xf << boffset);
+ for (i = 0; i < bs; i++)
+ xd->above_seg_context[i] = ~(0xf << boffset);
+ } else if ((bwl == bsl) && (bhl < bsl)) {
+ for (i = 0; i < bs; i++)
+ xd->left_seg_context[i] = ~(0xe << boffset);
+ for (i = 0; i < bs; i++)
+ xd->above_seg_context[i] = ~(0xf << boffset);
+ } else if ((bwl < bsl) && (bhl == bsl)) {
+ for (i = 0; i < bs; i++)
+ xd->left_seg_context[i] = ~(0xf << boffset);
+ for (i = 0; i < bs; i++)
+ xd->above_seg_context[i] = ~(0xe << boffset);
+ } else if ((bwl < bsl) && (bhl < bsl)) {
+ for (i = 0; i < bs; i++)
+ xd->left_seg_context[i] = ~(0xe << boffset);
+ for (i = 0; i < bs; i++)
+ xd->above_seg_context[i] = ~(0xe << boffset);
+ } else {
+ assert(0);
+ }
+}
+
+static INLINE int partition_plane_context(MACROBLOCKD *xd,
+ BLOCK_SIZE_TYPE sb_type) {
+ int bsl = mi_width_log2(sb_type), bs = 1 << bsl;
+ int above = 0, left = 0, i;
+ int boffset = mi_width_log2(BLOCK_SIZE_SB64X64) - bsl;
+
+ assert(mi_width_log2(sb_type) == mi_height_log2(sb_type));
+ assert(bsl >= 0);
+ assert(boffset >= 0);
+
+ for (i = 0; i < bs; i++)
+ above |= (xd->above_seg_context[i] & (1 << boffset));
+ for (i = 0; i < bs; i++)
+ left |= (xd->left_seg_context[i] & (1 << boffset));
+
+ above = (above > 0);
+ left = (left > 0);
+
+ return (left * 2 + above) + bsl * PARTITION_PLOFFSET;
+}
+
+static BLOCK_SIZE_TYPE get_subsize(BLOCK_SIZE_TYPE bsize,
+ PARTITION_TYPE partition) {
+ BLOCK_SIZE_TYPE subsize;
+ switch (partition) {
+ case PARTITION_NONE:
+ subsize = bsize;
+ break;
+ case PARTITION_HORZ:
+ if (bsize == BLOCK_SIZE_SB64X64)
+ subsize = BLOCK_SIZE_SB64X32;
+ else if (bsize == BLOCK_SIZE_SB32X32)
+ subsize = BLOCK_SIZE_SB32X16;
+ else if (bsize == BLOCK_SIZE_MB16X16)
+ subsize = BLOCK_SIZE_SB16X8;
+ else if (bsize == BLOCK_SIZE_SB8X8)
+ subsize = BLOCK_SIZE_SB8X4;
+ else
+ assert(0);
+ break;
+ case PARTITION_VERT:
+ if (bsize == BLOCK_SIZE_SB64X64)
+ subsize = BLOCK_SIZE_SB32X64;
+ else if (bsize == BLOCK_SIZE_SB32X32)
+ subsize = BLOCK_SIZE_SB16X32;
+ else if (bsize == BLOCK_SIZE_MB16X16)
+ subsize = BLOCK_SIZE_SB8X16;
+ else if (bsize == BLOCK_SIZE_SB8X8)
+ subsize = BLOCK_SIZE_SB4X8;
+ else
+ assert(0);
+ break;
+ case PARTITION_SPLIT:
+ if (bsize == BLOCK_SIZE_SB64X64)
+ subsize = BLOCK_SIZE_SB32X32;
+ else if (bsize == BLOCK_SIZE_SB32X32)
+ subsize = BLOCK_SIZE_MB16X16;
+ else if (bsize == BLOCK_SIZE_MB16X16)
+ subsize = BLOCK_SIZE_SB8X8;
+ else if (bsize == BLOCK_SIZE_SB8X8)
+ subsize = BLOCK_SIZE_AB4X4;
+ else
+ assert(0);
+ break;
+ default:
+ assert(0);
+ }
+ return subsize;
+}
+
+// transform mapping
+static TX_TYPE txfm_map(MB_PREDICTION_MODE bmode) {
+ switch (bmode) {
+ case TM_PRED :
+ case D135_PRED :
+ return ADST_ADST;
+
+ case V_PRED :
+ case D117_PRED :
+ case D63_PRED:
+ return ADST_DCT;
+
+ case H_PRED :
+ case D153_PRED :
+ case D27_PRED :
+ return DCT_ADST;
+
+ default:
+ return DCT_DCT;
+ }
+}
+
+static TX_TYPE get_tx_type_4x4(const MACROBLOCKD *xd, int ib) {
+ TX_TYPE tx_type;
+ MODE_INFO *mi = xd->mode_info_context;
+ MB_MODE_INFO *const mbmi = &mi->mbmi;
+ if (xd->lossless || mbmi->ref_frame[0] != INTRA_FRAME)
+ return DCT_DCT;
+ if (mbmi->sb_type < BLOCK_SIZE_SB8X8) {
+ tx_type = txfm_map(mi->bmi[ib].as_mode.first);
+ } else {
+ assert(mbmi->mode <= TM_PRED);
+ tx_type = txfm_map(mbmi->mode);
+ }
+ return tx_type;
+}
+
+static TX_TYPE get_tx_type_8x8(const MACROBLOCKD *xd, int ib) {
+ TX_TYPE tx_type = DCT_DCT;
+ if (xd->mode_info_context->mbmi.mode <= TM_PRED) {
+ tx_type = txfm_map(xd->mode_info_context->mbmi.mode);
+ }
+ return tx_type;
+}
+
+static TX_TYPE get_tx_type_16x16(const MACROBLOCKD *xd, int ib) {
+ TX_TYPE tx_type = DCT_DCT;
+ if (xd->mode_info_context->mbmi.mode <= TM_PRED) {
+ tx_type = txfm_map(xd->mode_info_context->mbmi.mode);
+ }
+ return tx_type;
+}
+
+void vp9_setup_block_dptrs(MACROBLOCKD *xd,
+ int subsampling_x, int subsampling_y);
+
+static TX_SIZE get_uv_tx_size(const MB_MODE_INFO *mbmi) {
+ const TX_SIZE size = mbmi->txfm_size;
+
+ switch (mbmi->sb_type) {
+ case BLOCK_SIZE_SB64X64:
+ return size;
+ case BLOCK_SIZE_SB64X32:
+ case BLOCK_SIZE_SB32X64:
+ case BLOCK_SIZE_SB32X32:
+ if (size == TX_32X32)
+ return TX_16X16;
+ else
+ return size;
+ case BLOCK_SIZE_SB32X16:
+ case BLOCK_SIZE_SB16X32:
+ case BLOCK_SIZE_MB16X16:
+ if (size == TX_16X16)
+ return TX_8X8;
+ else
+ return size;
+ default:
+ return TX_4X4;
+ }
+
+ return size;
+}
+
+struct plane_block_idx {
+ int plane;
+ int block;
+};
+
+// TODO(jkoleszar): returning a struct so it can be used in a const context,
+// expect to refactor this further later.
+static INLINE struct plane_block_idx plane_block_idx(int y_blocks,
+ int b_idx) {
+ const int v_offset = y_blocks * 5 / 4;
+ struct plane_block_idx res;
+
+ if (b_idx < y_blocks) {
+ res.plane = 0;
+ res.block = b_idx;
+ } else if (b_idx < v_offset) {
+ res.plane = 1;
+ res.block = b_idx - y_blocks;
+ } else {
+ assert(b_idx < y_blocks * 3 / 2);
+ res.plane = 2;
+ res.block = b_idx - v_offset;
+ }
+ return res;
+}
+
+static INLINE int plane_block_width(BLOCK_SIZE_TYPE bsize,
+ const struct macroblockd_plane* plane) {
+ return 4 << (b_width_log2(bsize) - plane->subsampling_x);
+}
+
+static INLINE int plane_block_height(BLOCK_SIZE_TYPE bsize,
+ const struct macroblockd_plane* plane) {
+ return 4 << (b_height_log2(bsize) - plane->subsampling_y);
+}
+
+typedef void (*foreach_transformed_block_visitor)(int plane, int block,
+ BLOCK_SIZE_TYPE bsize,
+ int ss_txfrm_size,
+ void *arg);
+
+static INLINE void foreach_transformed_block_in_plane(
+ const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize, int plane,
+ foreach_transformed_block_visitor visit, void *arg) {
+ const int bw = b_width_log2(bsize), bh = b_height_log2(bsize);
+
+ // block and transform sizes, in number of 4x4 blocks log 2 ("*_b")
+ // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
+ // transform size varies per plane, look it up in a common way.
+ const MB_MODE_INFO* mbmi = &xd->mode_info_context->mbmi;
+ const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi)
+ : mbmi->txfm_size;
+ const int block_size_b = bw + bh;
+ const int txfrm_size_b = tx_size * 2;
+
+ // subsampled size of the block
+ const int ss_sum = xd->plane[plane].subsampling_x
+ + xd->plane[plane].subsampling_y;
+ const int ss_block_size = block_size_b - ss_sum;
+
+ const int step = 1 << txfrm_size_b;
+
+ int i;
+
+ assert(txfrm_size_b <= block_size_b);
+ assert(txfrm_size_b <= ss_block_size);
+
+ // If mb_to_right_edge is < 0 we are in a situation in which
+ // the current block size extends into the UMV and we won't
+ // visit the sub blocks that are wholly within the UMV.
+ if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0) {
+ int r, c;
+ const int sw = bw - xd->plane[plane].subsampling_x;
+ const int sh = bh - xd->plane[plane].subsampling_y;
+ int max_blocks_wide = 1 << sw;
+ int max_blocks_high = 1 << sh;
+
+ // xd->mb_to_right_edge is in units of pixels * 8. This converts
+ // it to 4x4 block sizes.
+ if (xd->mb_to_right_edge < 0)
+ max_blocks_wide +=
+ + (xd->mb_to_right_edge >> (5 + xd->plane[plane].subsampling_x));
+
+ if (xd->mb_to_bottom_edge < 0)
+ max_blocks_high +=
+ + (xd->mb_to_bottom_edge >> (5 + xd->plane[plane].subsampling_y));
+
+ i = 0;
+ // Unlike the normal case - in here we have to keep track of the
+ // row and column of the blocks we use so that we know if we are in
+ // the unrestricted motion border..
+ for (r = 0; r < (1 << sh); r += (1 << tx_size)) {
+ for (c = 0; c < (1 << sw); c += (1 << tx_size)) {
+ if (r < max_blocks_high && c < max_blocks_wide)
+ visit(plane, i, bsize, txfrm_size_b, arg);
+ i += step;
+ }
+ }
+ } else {
+ for (i = 0; i < (1 << ss_block_size); i += step) {
+ visit(plane, i, bsize, txfrm_size_b, arg);
+ }
+ }
+}
+
+static INLINE void foreach_transformed_block(
+ const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize,
+ foreach_transformed_block_visitor visit, void *arg) {
+ int plane;
+
+ for (plane = 0; plane < MAX_MB_PLANE; plane++) {
+ foreach_transformed_block_in_plane(xd, bsize, plane,
+ visit, arg);
+ }
+}
+
+static INLINE void foreach_transformed_block_uv(
+ const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize,
+ foreach_transformed_block_visitor visit, void *arg) {
+ int plane;
+
+ for (plane = 1; plane < MAX_MB_PLANE; plane++) {
+ foreach_transformed_block_in_plane(xd, bsize, plane,
+ visit, arg);
+ }
+}
+
+// TODO(jkoleszar): In principle, pred_w, pred_h are unnecessary, as we could
+// calculate the subsampled BLOCK_SIZE_TYPE, but that type isn't defined for
+// sizes smaller than 16x16 yet.
+typedef void (*foreach_predicted_block_visitor)(int plane, int block,
+ BLOCK_SIZE_TYPE bsize,
+ int pred_w, int pred_h,
+ void *arg);
+static INLINE void foreach_predicted_block_in_plane(
+ const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize, int plane,
+ foreach_predicted_block_visitor visit, void *arg) {
+ int i, x, y;
+
+ // block sizes in number of 4x4 blocks log 2 ("*_b")
+ // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
+ // subsampled size of the block
+ const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
+ const int bhl = b_height_log2(bsize) - xd->plane[plane].subsampling_y;
+
+ // size of the predictor to use.
+ int pred_w, pred_h;
+
+ if (xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
+ assert(bsize == BLOCK_SIZE_SB8X8);
+ pred_w = 0;
+ pred_h = 0;
+ } else {
+ pred_w = bwl;
+ pred_h = bhl;
+ }
+ assert(pred_w <= bwl);
+ assert(pred_h <= bhl);
+
+ // visit each subblock in raster order
+ i = 0;
+ for (y = 0; y < 1 << bhl; y += 1 << pred_h) {
+ for (x = 0; x < 1 << bwl; x += 1 << pred_w) {
+ visit(plane, i, bsize, pred_w, pred_h, arg);
+ i += 1 << pred_w;
+ }
+ i += (1 << (bwl + pred_h)) - (1 << bwl);
+ }
+}
+static INLINE void foreach_predicted_block(
+ const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize,
+ foreach_predicted_block_visitor visit, void *arg) {
+ int plane;
+
+ for (plane = 0; plane < MAX_MB_PLANE; plane++) {
+ foreach_predicted_block_in_plane(xd, bsize, plane, visit, arg);
+ }
+}
+static INLINE void foreach_predicted_block_uv(
+ const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize,
+ foreach_predicted_block_visitor visit, void *arg) {
+ int plane;
+
+ for (plane = 1; plane < MAX_MB_PLANE; plane++) {
+ foreach_predicted_block_in_plane(xd, bsize, plane, visit, arg);
+ }
+}
+static int raster_block_offset(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize,
+ int plane, int block, int stride) {
+ const int bw = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
+ const int y = 4 * (block >> bw), x = 4 * (block & ((1 << bw) - 1));
+ return y * stride + x;
+}
+static int16_t* raster_block_offset_int16(MACROBLOCKD *xd,
+ BLOCK_SIZE_TYPE bsize,
+ int plane, int block, int16_t *base) {
+ const int stride = plane_block_width(bsize, &xd->plane[plane]);
+ return base + raster_block_offset(xd, bsize, plane, block, stride);
+}
+static uint8_t* raster_block_offset_uint8(MACROBLOCKD *xd,
+ BLOCK_SIZE_TYPE bsize,
+ int plane, int block,
+ uint8_t *base, int stride) {
+ return base + raster_block_offset(xd, bsize, plane, block, stride);
+}
+
+static int txfrm_block_to_raster_block(MACROBLOCKD *xd,
+ BLOCK_SIZE_TYPE bsize,
+ int plane, int block,
+ int ss_txfrm_size) {
+ const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
+ const int txwl = ss_txfrm_size / 2;
+ const int tx_cols_lg2 = bwl - txwl;
+ const int tx_cols = 1 << tx_cols_lg2;
+ const int raster_mb = block >> ss_txfrm_size;
+ const int x = (raster_mb & (tx_cols - 1)) << (txwl);
+ const int y = raster_mb >> tx_cols_lg2 << (txwl);
+ return x + (y << bwl);
+}
+
+static void txfrm_block_to_raster_xy(MACROBLOCKD *xd,
+ BLOCK_SIZE_TYPE bsize,
+ int plane, int block,
+ int ss_txfrm_size,
+ int *x, int *y) {
+ const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
+ const int txwl = ss_txfrm_size / 2;
+ const int tx_cols_lg2 = bwl - txwl;
+ const int tx_cols = 1 << tx_cols_lg2;
+ const int raster_mb = block >> ss_txfrm_size;
+ *x = (raster_mb & (tx_cols - 1)) << (txwl);
+ *y = raster_mb >> tx_cols_lg2 << (txwl);
+}
+
+static void extend_for_intra(MACROBLOCKD* const xd, int plane, int block,
+ BLOCK_SIZE_TYPE bsize, int ss_txfrm_size) {
+ const int bw = plane_block_width(bsize, &xd->plane[plane]);
+ const int bh = plane_block_height(bsize, &xd->plane[plane]);
+ int x, y;
+ txfrm_block_to_raster_xy(xd, bsize, plane, block, ss_txfrm_size, &x, &y);
+ x = x * 4 - 1;
+ y = y * 4 - 1;
+ // Copy a pixel into the umv if we are in a situation where the block size
+ // extends into the UMV.
+ // TODO(JBB): Should be able to do the full extend in place so we don't have
+ // to do this multiple times.
+ if (xd->mb_to_right_edge < 0) {
+ int umv_border_start = bw
+ + (xd->mb_to_right_edge >> (3 + xd->plane[plane].subsampling_x));
+
+ if (x + bw > umv_border_start)
+ vpx_memset(
+ xd->plane[plane].dst.buf + y * xd->plane[plane].dst.stride
+ + umv_border_start,
+ *(xd->plane[plane].dst.buf + y * xd->plane[plane].dst.stride
+ + umv_border_start - 1),
+ bw);
+ }
+ if (xd->mb_to_bottom_edge < 0) {
+ int umv_border_start = bh
+ + (xd->mb_to_bottom_edge >> (3 + xd->plane[plane].subsampling_y));
+ int i;
+ uint8_t c = *(xd->plane[plane].dst.buf
+ + (umv_border_start - 1) * xd->plane[plane].dst.stride + x);
+
+ uint8_t *d = xd->plane[plane].dst.buf
+ + umv_border_start * xd->plane[plane].dst.stride + x;
+
+ if (y + bh > umv_border_start)
+ for (i = 0; i < bh; i++, d += xd->plane[plane].dst.stride)
+ *d = c;
+ }
+}
+static void set_contexts_on_border(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize,
+ int plane, int ss_tx_size, int eob, int aoff,
+ int loff, ENTROPY_CONTEXT *A,
+ ENTROPY_CONTEXT *L) {
+ const int bw = b_width_log2(bsize), bh = b_height_log2(bsize);
+ const int sw = bw - xd->plane[plane].subsampling_x;
+ const int sh = bh - xd->plane[plane].subsampling_y;
+ int mi_blocks_wide = 1 << sw;
+ int mi_blocks_high = 1 << sh;
+ int tx_size_in_blocks = (1 << ss_tx_size);
+ int above_contexts = tx_size_in_blocks;
+ int left_contexts = tx_size_in_blocks;
+ int pt;
+
+ // xd->mb_to_right_edge is in units of pixels * 8. This converts
+ // it to 4x4 block sizes.
+ if (xd->mb_to_right_edge < 0) {
+ mi_blocks_wide += (xd->mb_to_right_edge
+ >> (5 + xd->plane[plane].subsampling_x));
+ }
+
+ // this code attempts to avoid copying into contexts that are outside
+ // our border. Any blocks that do are set to 0...
+ if (above_contexts + aoff > mi_blocks_wide)
+ above_contexts = mi_blocks_wide - aoff;
+
+ if (xd->mb_to_bottom_edge < 0) {
+ mi_blocks_high += (xd->mb_to_bottom_edge
+ >> (5 + xd->plane[plane].subsampling_y));
+ }
+ if (left_contexts + loff > mi_blocks_high) {
+ left_contexts = mi_blocks_high - loff;
+ }
+
+ for (pt = 0; pt < above_contexts; pt++)
+ A[pt] = eob > 0;
+ for (pt = above_contexts; pt < (1 << ss_tx_size); pt++)
+ A[pt] = 0;
+ for (pt = 0; pt < left_contexts; pt++)
+ L[pt] = eob > 0;
+ for (pt = left_contexts; pt < (1 << ss_tx_size); pt++)
+ L[pt] = 0;
+}
+
+
+#endif // VP9_COMMON_VP9_BLOCKD_H_
diff --git a/libvpx/vp9/common/vp9_common.h b/libvpx/vp9/common/vp9_common.h
new file mode 100644
index 0000000..0d7babf
--- /dev/null
+++ b/libvpx/vp9/common/vp9_common.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_COMMON_H_
+#define VP9_COMMON_VP9_COMMON_H_
+
+/* Interface header for common constant data structures and lookup tables */
+
+#include <assert.h>
+
+#include "./vpx_config.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx/vpx_integer.h"
+
+#define MIN(x, y) (((x) < (y)) ? (x) : (y))
+#define MAX(x, y) (((x) > (y)) ? (x) : (y))
+
+#define ROUND_POWER_OF_TWO(value, n) (((value) + (1 << ((n) - 1))) >> (n))
+
+/* If we don't want to use ROUND_POWER_OF_TWO macro
+static INLINE int16_t round_power_of_two(int16_t value, int n) {
+ return (value + (1 << (n - 1))) >> n;
+}*/
+
+// Only need this for fixed-size arrays, for structs just assign.
+#define vp9_copy(dest, src) { \
+ assert(sizeof(dest) == sizeof(src)); \
+ vpx_memcpy(dest, src, sizeof(src)); \
+ }
+
+// Use this for variably-sized arrays.
+#define vp9_copy_array(dest, src, n) { \
+ assert(sizeof(*dest) == sizeof(*src)); \
+ vpx_memcpy(dest, src, n * sizeof(*src)); \
+ }
+
+#define vp9_zero(dest) vpx_memset(&dest, 0, sizeof(dest));
+#define vp9_zero_array(dest, n) vpx_memset(dest, 0, n * sizeof(*dest));
+
+static INLINE uint8_t clip_pixel(int val) {
+ return (val > 255) ? 255u : (val < 0) ? 0u : val;
+}
+
+static INLINE int clamp(int value, int low, int high) {
+ return value < low ? low : (value > high ? high : value);
+}
+
+static INLINE double fclamp(double value, double low, double high) {
+ return value < low ? low : (value > high ? high : value);
+}
+
+static INLINE int multiple8(int value) {
+ return (value + 7) & ~7;
+}
+
+#define SYNC_CODE_0 0x49
+#define SYNC_CODE_1 0x83
+#define SYNC_CODE_2 0x42
+
+
+#endif // VP9_COMMON_VP9_COMMON_H_
diff --git a/libvpx/vp9/common/vp9_convolve.c b/libvpx/vp9/common/vp9_convolve.c
new file mode 100644
index 0000000..46ae503
--- /dev/null
+++ b/libvpx/vp9/common/vp9_convolve.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "vp9/common/vp9_convolve.h"
+
+#include <assert.h>
+
+#include "./vpx_config.h"
+#include "./vp9_rtcd.h"
+#include "vp9/common/vp9_common.h"
+#include "vpx/vpx_integer.h"
+#include "vpx_ports/mem.h"
+
+#define VP9_FILTER_WEIGHT 128
+#define VP9_FILTER_SHIFT 7
+
+/* Assume a bank of 16 filters to choose from. There are two implementations
+ * for filter wrapping behavior, since we want to be able to pick which filter
+ * to start with. We could either:
+ *
+ * 1) make filter_ a pointer to the base of the filter array, and then add an
+ * additional offset parameter, to choose the starting filter.
+ * 2) use a pointer to 2 periods worth of filters, so that even if the original
+ * phase offset is at 15/16, we'll have valid data to read. The filter
+ * tables become [32][8], and the second half is duplicated.
+ * 3) fix the alignment of the filter tables, so that we know the 0/16 is
+ * always 256 byte aligned.
+ *
+ * Implementations 2 and 3 are likely preferable, as they avoid an extra 2
+ * parameters, and switching between them is trivial, with the
+ * ALIGN_FILTERS_256 macro, below.
+ */
+ #define ALIGN_FILTERS_256 1
+
+static void convolve_horiz_c(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int16_t *filter_x0, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h, int taps) {
+ int x, y, k, sum;
+ const int16_t *filter_x_base = filter_x0;
+
+#if ALIGN_FILTERS_256
+ filter_x_base = (const int16_t *)(((intptr_t)filter_x0) & ~(intptr_t)0xff);
+#endif
+
+ /* Adjust base pointer address for this source line */
+ src -= taps / 2 - 1;
+
+ for (y = 0; y < h; ++y) {
+ /* Pointer to filter to use */
+ const int16_t *filter_x = filter_x0;
+
+ /* Initial phase offset */
+ int x0_q4 = (filter_x - filter_x_base) / taps;
+ int x_q4 = x0_q4;
+
+ for (x = 0; x < w; ++x) {
+ /* Per-pixel src offset */
+ int src_x = (x_q4 - x0_q4) >> 4;
+
+ for (sum = 0, k = 0; k < taps; ++k) {
+ sum += src[src_x + k] * filter_x[k];
+ }
+ sum += (VP9_FILTER_WEIGHT >> 1);
+ dst[x] = clip_pixel(sum >> VP9_FILTER_SHIFT);
+
+ /* Adjust source and filter to use for the next pixel */
+ x_q4 += x_step_q4;
+ filter_x = filter_x_base + (x_q4 & 0xf) * taps;
+ }
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+static void convolve_avg_horiz_c(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int16_t *filter_x0, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h, int taps) {
+ int x, y, k, sum;
+ const int16_t *filter_x_base = filter_x0;
+
+#if ALIGN_FILTERS_256
+ filter_x_base = (const int16_t *)(((intptr_t)filter_x0) & ~(intptr_t)0xff);
+#endif
+
+ /* Adjust base pointer address for this source line */
+ src -= taps / 2 - 1;
+
+ for (y = 0; y < h; ++y) {
+ /* Pointer to filter to use */
+ const int16_t *filter_x = filter_x0;
+
+ /* Initial phase offset */
+ int x0_q4 = (filter_x - filter_x_base) / taps;
+ int x_q4 = x0_q4;
+
+ for (x = 0; x < w; ++x) {
+ /* Per-pixel src offset */
+ int src_x = (x_q4 - x0_q4) >> 4;
+
+ for (sum = 0, k = 0; k < taps; ++k) {
+ sum += src[src_x + k] * filter_x[k];
+ }
+ sum += (VP9_FILTER_WEIGHT >> 1);
+ dst[x] = (dst[x] + clip_pixel(sum >> VP9_FILTER_SHIFT) + 1) >> 1;
+
+ /* Adjust source and filter to use for the next pixel */
+ x_q4 += x_step_q4;
+ filter_x = filter_x_base + (x_q4 & 0xf) * taps;
+ }
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+static void convolve_vert_c(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y0, int y_step_q4,
+ int w, int h, int taps) {
+ int x, y, k, sum;
+
+ const int16_t *filter_y_base = filter_y0;
+
+#if ALIGN_FILTERS_256
+ filter_y_base = (const int16_t *)(((intptr_t)filter_y0) & ~(intptr_t)0xff);
+#endif
+
+ /* Adjust base pointer address for this source column */
+ src -= src_stride * (taps / 2 - 1);
+ for (x = 0; x < w; ++x) {
+ /* Pointer to filter to use */
+ const int16_t *filter_y = filter_y0;
+
+ /* Initial phase offset */
+ int y0_q4 = (filter_y - filter_y_base) / taps;
+ int y_q4 = y0_q4;
+
+ for (y = 0; y < h; ++y) {
+ /* Per-pixel src offset */
+ int src_y = (y_q4 - y0_q4) >> 4;
+
+ for (sum = 0, k = 0; k < taps; ++k) {
+ sum += src[(src_y + k) * src_stride] * filter_y[k];
+ }
+ sum += (VP9_FILTER_WEIGHT >> 1);
+ dst[y * dst_stride] = clip_pixel(sum >> VP9_FILTER_SHIFT);
+
+ /* Adjust source and filter to use for the next pixel */
+ y_q4 += y_step_q4;
+ filter_y = filter_y_base + (y_q4 & 0xf) * taps;
+ }
+ ++src;
+ ++dst;
+ }
+}
+
+static void convolve_avg_vert_c(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y0, int y_step_q4,
+ int w, int h, int taps) {
+ int x, y, k, sum;
+
+ const int16_t *filter_y_base = filter_y0;
+
+#if ALIGN_FILTERS_256
+ filter_y_base = (const int16_t *)(((intptr_t)filter_y0) & ~(intptr_t)0xff);
+#endif
+
+ /* Adjust base pointer address for this source column */
+ src -= src_stride * (taps / 2 - 1);
+ for (x = 0; x < w; ++x) {
+ /* Pointer to filter to use */
+ const int16_t *filter_y = filter_y0;
+
+ /* Initial phase offset */
+ int y0_q4 = (filter_y - filter_y_base) / taps;
+ int y_q4 = y0_q4;
+
+ for (y = 0; y < h; ++y) {
+ /* Per-pixel src offset */
+ int src_y = (y_q4 - y0_q4) >> 4;
+
+ for (sum = 0, k = 0; k < taps; ++k) {
+ sum += src[(src_y + k) * src_stride] * filter_y[k];
+ }
+ sum += (VP9_FILTER_WEIGHT >> 1);
+ dst[y * dst_stride] =
+ (dst[y * dst_stride] + clip_pixel(sum >> VP9_FILTER_SHIFT) + 1) >> 1;
+
+ /* Adjust source and filter to use for the next pixel */
+ y_q4 += y_step_q4;
+ filter_y = filter_y_base + (y_q4 & 0xf) * taps;
+ }
+ ++src;
+ ++dst;
+ }
+}
+
+static void convolve_c(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h, int taps) {
+ /* Fixed size intermediate buffer places limits on parameters.
+ * Maximum intermediate_height is 135, for y_step_q4 == 32,
+ * h == 64, taps == 8.
+ */
+ uint8_t temp[64 * 135];
+ int intermediate_height = ((h * y_step_q4) >> 4) + taps - 1;
+
+ assert(w <= 64);
+ assert(h <= 64);
+ assert(taps <= 8);
+ assert(y_step_q4 <= 32);
+
+ if (intermediate_height < h)
+ intermediate_height = h;
+
+ convolve_horiz_c(src - src_stride * (taps / 2 - 1), src_stride,
+ temp, 64,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, intermediate_height, taps);
+ convolve_vert_c(temp + 64 * (taps / 2 - 1), 64, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h, taps);
+}
+
+static void convolve_avg_c(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h, int taps) {
+ /* Fixed size intermediate buffer places limits on parameters.
+ * Maximum intermediate_height is 135, for y_step_q4 == 32,
+ * h == 64, taps == 8.
+ */
+ uint8_t temp[64 * 135];
+ int intermediate_height = ((h * y_step_q4) >> 4) + taps - 1;
+
+ assert(w <= 64);
+ assert(h <= 64);
+ assert(taps <= 8);
+ assert(y_step_q4 <= 32);
+
+ if (intermediate_height < h)
+ intermediate_height = h;
+
+ convolve_horiz_c(src - src_stride * (taps / 2 - 1), src_stride,
+ temp, 64,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, intermediate_height, taps);
+ convolve_avg_vert_c(temp + 64 * (taps / 2 - 1), 64, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h, taps);
+}
+
+void vp9_convolve8_horiz_c(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ convolve_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h, 8);
+}
+
+void vp9_convolve8_avg_horiz_c(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ convolve_avg_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h, 8);
+}
+
+void vp9_convolve8_vert_c(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ convolve_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h, 8);
+}
+
+void vp9_convolve8_avg_vert_c(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ convolve_avg_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h, 8);
+}
+
+void vp9_convolve8_c(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ convolve_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h, 8);
+}
+
+void vp9_convolve8_avg_c(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ /* Fixed size intermediate buffer places limits on parameters. */
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp, 64 * 64);
+ assert(w <= 64);
+ assert(h <= 64);
+
+ vp9_convolve8(src, src_stride,
+ temp, 64,
+ filter_x, x_step_q4,
+ filter_y, y_step_q4,
+ w, h);
+ vp9_convolve_avg(temp, 64,
+ dst, dst_stride,
+ NULL, 0, /* These unused parameter should be removed! */
+ NULL, 0, /* These unused parameter should be removed! */
+ w, h);
+}
+
+void vp9_convolve_copy(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int16_t *filter_x, int filter_x_stride,
+ const int16_t *filter_y, int filter_y_stride,
+ int w, int h) {
+ if (w == 16 && h == 16) {
+ vp9_copy_mem16x16(src, src_stride, dst, dst_stride);
+ } else if (w == 8 && h == 8) {
+ vp9_copy_mem8x8(src, src_stride, dst, dst_stride);
+ } else if (w == 8 && h == 4) {
+ vp9_copy_mem8x4(src, src_stride, dst, dst_stride);
+ } else {
+ int r;
+
+ for (r = h; r > 0; --r) {
+ memcpy(dst, src, w);
+ src += src_stride;
+ dst += dst_stride;
+ }
+ }
+}
+
+void vp9_convolve_avg(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int16_t *filter_x, int filter_x_stride,
+ const int16_t *filter_y, int filter_y_stride,
+ int w, int h) {
+ int x, y;
+
+ for (y = 0; y < h; ++y) {
+ for (x = 0; x < w; ++x) {
+ dst[x] = (dst[x] + src[x] + 1) >> 1;
+ }
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
diff --git a/libvpx/vp9/common/vp9_convolve.h b/libvpx/vp9/common/vp9_convolve.h
new file mode 100644
index 0000000..0596080
--- /dev/null
+++ b/libvpx/vp9/common/vp9_convolve.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef VP9_COMMON_CONVOLVE_H_
+#define VP9_COMMON_CONVOLVE_H_
+
+#include "./vpx_config.h"
+#include "vpx/vpx_integer.h"
+
+typedef void (*convolve_fn_t)(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h);
+
+// Not a convolution, a block copy conforming to the convolution prototype
+void vp9_convolve_copy(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h);
+
+// Not a convolution, a block average conforming to the convolution prototype
+void vp9_convolve_avg(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h);
+
+struct subpix_fn_table {
+ const int16_t (*filter_x)[8];
+ const int16_t (*filter_y)[8];
+};
+
+#endif // VP9_COMMON_CONVOLVE_H_
diff --git a/libvpx/vp9/common/vp9_debugmodes.c b/libvpx/vp9/common/vp9_debugmodes.c
new file mode 100644
index 0000000..5841f80
--- /dev/null
+++ b/libvpx/vp9/common/vp9_debugmodes.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+
+#include "vp9/common/vp9_blockd.h"
+
+void vp9_print_modes_and_motion_vectors(MODE_INFO *mi, int rows, int cols,
+ int frame, char *file) {
+ int mi_row;
+ int mi_col;
+ int mi_index = 0;
+ FILE *mvs = fopen(file, "a");
+
+ // Print out the macroblock Y modes
+ fprintf(mvs, "SB Types for Frame %d\n", frame);
+
+ for (mi_row = 0; mi_row < rows; mi_row++) {
+ for (mi_col = 0; mi_col < cols; mi_col++) {
+ fprintf(mvs, "%2d ", mi[mi_index].mbmi.sb_type);
+
+ mi_index++;
+ }
+
+ fprintf(mvs, "\n");
+ mi_index += 8;
+ }
+
+ // Print out the macroblock Y modes
+ fprintf(mvs, "Mb Modes for Frame %d\n", frame);
+ mi_index = 0;
+ for (mi_row = 0; mi_row < rows; mi_row++) {
+ for (mi_col = 0; mi_col < cols; mi_col++) {
+ fprintf(mvs, "%2d ", mi[mi_index].mbmi.mode);
+
+ mi_index++;
+ }
+
+ fprintf(mvs, "\n");
+ mi_index += 8;
+ }
+
+ fprintf(mvs, "\n");
+
+ mi_index = 0;
+ fprintf(mvs, "Mb mv ref for Frame %d\n", frame);
+
+ for (mi_row = 0; mi_row < rows; mi_row++) {
+ for (mi_col = 0; mi_col < cols; mi_col++) {
+ fprintf(mvs, "%2d ", mi[mi_index].mbmi.ref_frame[0]);
+
+ mi_index++;
+ }
+
+ fprintf(mvs, "\n");
+ mi_index += 8;
+ }
+ fprintf(mvs, "\n");
+
+ mi_index = 0;
+ fprintf(mvs, "Mb mv ref for Frame %d\n", frame);
+
+ for (mi_row = 0; mi_row < rows; mi_row++) {
+ for (mi_col = 0; mi_col < cols; mi_col++) {
+ fprintf(mvs, "%4d:%4d ", mi[mi_index].mbmi.mv[0].as_mv.row,
+ mi[mi_index].mbmi.mv[0].as_mv.col);
+
+ mi_index++;
+ }
+
+ fprintf(mvs, "\n");
+ mi_index += 8;
+ }
+
+ fprintf(mvs, "\n");
+
+ /* print out the macroblock txform sizes */
+ mi_index = 0;
+ fprintf(mvs, "TXFM size for Frame %d\n", frame);
+
+ for (mi_row = 0; mi_row < rows; mi_row++) {
+ for (mi_col = 0; mi_col < cols; mi_col++) {
+ fprintf(mvs, "%2d ", mi[mi_index].mbmi.txfm_size);
+
+ mi_index++;
+ }
+
+ mi_index += 8;
+ fprintf(mvs, "\n");
+ }
+
+ fprintf(mvs, "\n");
+
+ /* print out the macroblock UV modes */
+ mi_index = 0;
+ fprintf(mvs, "UV Modes for Frame %d\n", frame);
+
+ for (mi_row = 0; mi_row < rows; mi_row++) {
+ for (mi_col = 0; mi_col < cols; mi_col++) {
+ fprintf(mvs, "%2d ", mi[mi_index].mbmi.uv_mode);
+
+ mi_index++;
+ }
+
+ mi_index += 8;
+ fprintf(mvs, "\n");
+ }
+
+ fprintf(mvs, "\n");
+
+ /* print out the macroblock mvs */
+ mi_index = 0;
+ fprintf(mvs, "MVs for Frame %d\n", frame);
+
+ for (mi_row = 0; mi_row < rows; mi_row++) {
+ for (mi_col = 0; mi_col < cols; mi_col++) {
+ fprintf(mvs, "%5d:%-5d", mi[mi_index].mbmi.mv[0].as_mv.row / 2,
+ mi[mi_index].mbmi.mv[0].as_mv.col / 2);
+
+ mi_index++;
+ }
+
+ mi_index += 8;
+ fprintf(mvs, "\n");
+ }
+
+ fprintf(mvs, "\n");
+
+ fclose(mvs);
+}
diff --git a/libvpx/vp9/common/vp9_default_coef_probs.h b/libvpx/vp9/common/vp9_default_coef_probs.h
new file mode 100644
index 0000000..1954093
--- /dev/null
+++ b/libvpx/vp9/common/vp9_default_coef_probs.h
@@ -0,0 +1,1384 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+*/
+
+
+/*Generated file, included by vp9_entropy.c*/
+
+#if CONFIG_BALANCED_COEFTREE
+static const vp9_coeff_probs_model default_coef_probs_4x4[BLOCK_TYPES] = {
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 6, 213, 178 },
+ { 26, 113, 132 },
+ { 34, 17, 68 }
+ }, { /* Coeff Band 1 */
+ { 66, 96, 178 },
+ { 63, 96, 174 },
+ { 67, 54, 154 },
+ { 62, 28, 126 },
+ { 48, 9, 84 },
+ { 20, 1, 32 }
+ }, { /* Coeff Band 2 */
+ { 64, 144, 206 },
+ { 70, 99, 191 },
+ { 69, 36, 152 },
+ { 55, 9, 106 },
+ { 35, 1, 60 },
+ { 14, 1, 22 }
+ }, { /* Coeff Band 3 */
+ { 82, 154, 222 },
+ { 83, 112, 205 },
+ { 81, 31, 164 },
+ { 62, 7, 118 },
+ { 42, 1, 74 },
+ { 18, 1, 30 }
+ }, { /* Coeff Band 4 */
+ { 52, 179, 233 },
+ { 64, 132, 214 },
+ { 73, 36, 170 },
+ { 59, 8, 116 },
+ { 38, 1, 65 },
+ { 15, 1, 26 }
+ }, { /* Coeff Band 5 */
+ { 29, 175, 238 },
+ { 26, 169, 223 },
+ { 41, 80, 182 },
+ { 39, 32, 127 },
+ { 26, 10, 69 },
+ { 11, 2, 28 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 21, 226, 234 },
+ { 52, 182, 212 },
+ { 80, 112, 177 }
+ }, { /* Coeff Band 1 */
+ { 111, 164, 243 },
+ { 88, 152, 231 },
+ { 90, 43, 186 },
+ { 70, 12, 132 },
+ { 44, 2, 76 },
+ { 19, 1, 33 }
+ }, { /* Coeff Band 2 */
+ { 96, 185, 246 },
+ { 99, 127, 231 },
+ { 88, 21, 177 },
+ { 64, 5, 122 },
+ { 38, 1, 69 },
+ { 18, 1, 30 }
+ }, { /* Coeff Band 3 */
+ { 84, 206, 249 },
+ { 94, 147, 237 },
+ { 95, 33, 187 },
+ { 71, 8, 131 },
+ { 47, 1, 83 },
+ { 26, 1, 44 }
+ }, { /* Coeff Band 4 */
+ { 38, 221, 252 },
+ { 58, 177, 241 },
+ { 78, 46, 188 },
+ { 59, 9, 122 },
+ { 34, 1, 66 },
+ { 18, 1, 34 }
+ }, { /* Coeff Band 5 */
+ { 21, 216, 253 },
+ { 21, 206, 244 },
+ { 42, 93, 200 },
+ { 43, 41, 146 },
+ { 36, 13, 93 },
+ { 31, 1, 55 }
+ }
+ }
+ }, { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 7, 213, 219 },
+ { 23, 139, 182 },
+ { 38, 60, 125 }
+ }, { /* Coeff Band 1 */
+ { 69, 156, 220 },
+ { 52, 178, 213 },
+ { 69, 111, 190 },
+ { 69, 58, 155 },
+ { 58, 21, 104 },
+ { 39, 7, 60 }
+ }, { /* Coeff Band 2 */
+ { 68, 189, 228 },
+ { 70, 158, 221 },
+ { 83, 64, 189 },
+ { 73, 18, 141 },
+ { 48, 4, 88 },
+ { 23, 1, 41 }
+ }, { /* Coeff Band 3 */
+ { 99, 194, 236 },
+ { 91, 138, 224 },
+ { 91, 53, 189 },
+ { 74, 20, 142 },
+ { 48, 6, 90 },
+ { 22, 1, 41 }
+ }, { /* Coeff Band 4 */
+ { 52, 203, 244 },
+ { 60, 168, 231 },
+ { 75, 62, 189 },
+ { 61, 18, 132 },
+ { 38, 4, 72 },
+ { 17, 1, 39 }
+ }, { /* Coeff Band 5 */
+ { 33, 192, 247 },
+ { 31, 185, 234 },
+ { 46, 85, 185 },
+ { 39, 35, 132 },
+ { 28, 15, 80 },
+ { 13, 5, 38 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 5, 247, 246 },
+ { 28, 209, 228 },
+ { 65, 137, 203 }
+ }, { /* Coeff Band 1 */
+ { 69, 208, 250 },
+ { 54, 207, 242 },
+ { 81, 92, 204 },
+ { 70, 54, 153 },
+ { 58, 40, 108 },
+ { 58, 35, 71 }
+ }, { /* Coeff Band 2 */
+ { 65, 215, 250 },
+ { 72, 185, 239 },
+ { 92, 50, 197 },
+ { 75, 14, 147 },
+ { 49, 2, 99 },
+ { 26, 1, 53 }
+ }, { /* Coeff Band 3 */
+ { 70, 220, 251 },
+ { 76, 186, 241 },
+ { 90, 65, 198 },
+ { 75, 26, 151 },
+ { 58, 12, 112 },
+ { 34, 6, 49 }
+ }, { /* Coeff Band 4 */
+ { 34, 224, 253 },
+ { 44, 204, 245 },
+ { 69, 85, 204 },
+ { 64, 31, 150 },
+ { 44, 2, 78 },
+ { 1, 1, 128 }
+ }, { /* Coeff Band 5 */
+ { 25, 216, 253 },
+ { 21, 215, 248 },
+ { 47, 108, 214 },
+ { 47, 48, 160 },
+ { 26, 20, 90 },
+ { 64, 171, 128 }
+ }
+ }
+ }
+};
+static const vp9_coeff_probs_model default_coef_probs_8x8[BLOCK_TYPES] = {
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 9, 203, 199 },
+ { 26, 92, 128 },
+ { 28, 11, 55 }
+ }, { /* Coeff Band 1 */
+ { 99, 54, 160 },
+ { 78, 99, 155 },
+ { 80, 44, 138 },
+ { 71, 17, 115 },
+ { 51, 5, 80 },
+ { 27, 1, 40 }
+ }, { /* Coeff Band 2 */
+ { 135, 81, 190 },
+ { 113, 61, 182 },
+ { 93, 16, 153 },
+ { 70, 4, 115 },
+ { 41, 1, 68 },
+ { 16, 1, 27 }
+ }, { /* Coeff Band 3 */
+ { 155, 103, 214 },
+ { 129, 48, 199 },
+ { 95, 10, 159 },
+ { 63, 1, 110 },
+ { 32, 1, 58 },
+ { 12, 1, 21 }
+ }, { /* Coeff Band 4 */
+ { 163, 149, 231 },
+ { 137, 69, 213 },
+ { 95, 11, 164 },
+ { 62, 3, 108 },
+ { 32, 1, 57 },
+ { 13, 1, 22 }
+ }, { /* Coeff Band 5 */
+ { 136, 189, 239 },
+ { 123, 102, 223 },
+ { 97, 19, 170 },
+ { 66, 4, 111 },
+ { 38, 1, 60 },
+ { 18, 1, 26 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 24, 226, 244 },
+ { 54, 178, 211 },
+ { 80, 74, 152 }
+ }, { /* Coeff Band 1 */
+ { 145, 153, 236 },
+ { 101, 163, 223 },
+ { 108, 50, 187 },
+ { 90, 22, 145 },
+ { 66, 8, 97 },
+ { 42, 4, 50 }
+ }, { /* Coeff Band 2 */
+ { 150, 159, 238 },
+ { 128, 90, 218 },
+ { 94, 9, 163 },
+ { 64, 3, 110 },
+ { 34, 1, 61 },
+ { 13, 1, 24 }
+ }, { /* Coeff Band 3 */
+ { 151, 162, 242 },
+ { 135, 80, 222 },
+ { 93, 9, 166 },
+ { 61, 3, 111 },
+ { 31, 1, 59 },
+ { 12, 1, 22 }
+ }, { /* Coeff Band 4 */
+ { 161, 170, 245 },
+ { 140, 84, 228 },
+ { 99, 8, 174 },
+ { 64, 1, 116 },
+ { 34, 1, 63 },
+ { 14, 1, 26 }
+ }, { /* Coeff Band 5 */
+ { 138, 197, 246 },
+ { 127, 109, 233 },
+ { 100, 16, 179 },
+ { 66, 3, 119 },
+ { 37, 1, 66 },
+ { 16, 1, 30 }
+ }
+ }
+ }, { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 6, 216, 212 },
+ { 25, 134, 171 },
+ { 43, 48, 118 }
+ }, { /* Coeff Band 1 */
+ { 93, 112, 209 },
+ { 66, 159, 206 },
+ { 82, 78, 184 },
+ { 75, 28, 148 },
+ { 46, 4, 82 },
+ { 18, 1, 28 }
+ }, { /* Coeff Band 2 */
+ { 108, 148, 220 },
+ { 90, 130, 216 },
+ { 92, 40, 186 },
+ { 73, 10, 135 },
+ { 46, 1, 79 },
+ { 20, 1, 35 }
+ }, { /* Coeff Band 3 */
+ { 125, 173, 232 },
+ { 109, 117, 223 },
+ { 97, 31, 183 },
+ { 71, 7, 127 },
+ { 44, 1, 76 },
+ { 21, 1, 36 }
+ }, { /* Coeff Band 4 */
+ { 133, 195, 236 },
+ { 112, 121, 224 },
+ { 97, 23, 178 },
+ { 69, 3, 122 },
+ { 42, 1, 72 },
+ { 19, 1, 34 }
+ }, { /* Coeff Band 5 */
+ { 132, 180, 238 },
+ { 119, 102, 225 },
+ { 101, 18, 179 },
+ { 71, 3, 124 },
+ { 42, 1, 70 },
+ { 17, 1, 28 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 5, 242, 250 },
+ { 26, 198, 226 },
+ { 58, 98, 168 }
+ }, { /* Coeff Band 1 */
+ { 82, 201, 246 },
+ { 50, 219, 237 },
+ { 94, 107, 205 },
+ { 89, 61, 167 },
+ { 77, 31, 131 },
+ { 57, 14, 91 }
+ }, { /* Coeff Band 2 */
+ { 99, 202, 247 },
+ { 96, 165, 234 },
+ { 100, 31, 190 },
+ { 72, 8, 131 },
+ { 41, 1, 72 },
+ { 14, 1, 24 }
+ }, { /* Coeff Band 3 */
+ { 108, 204, 248 },
+ { 107, 156, 235 },
+ { 103, 27, 186 },
+ { 71, 4, 124 },
+ { 39, 1, 66 },
+ { 14, 1, 19 }
+ }, { /* Coeff Band 4 */
+ { 120, 211, 248 },
+ { 118, 149, 234 },
+ { 107, 19, 182 },
+ { 72, 3, 126 },
+ { 40, 1, 69 },
+ { 16, 1, 24 }
+ }, { /* Coeff Band 5 */
+ { 127, 199, 245 },
+ { 122, 125, 232 },
+ { 112, 20, 186 },
+ { 82, 3, 136 },
+ { 55, 1, 88 },
+ { 10, 1, 38 }
+ }
+ }
+ }
+};
+static const vp9_coeff_probs_model default_coef_probs_16x16[BLOCK_TYPES] = {
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 25, 9, 101 },
+ { 25, 2, 67 },
+ { 15, 1, 28 }
+ }, { /* Coeff Band 1 */
+ { 67, 30, 118 },
+ { 61, 56, 116 },
+ { 60, 31, 105 },
+ { 52, 11, 85 },
+ { 34, 2, 54 },
+ { 14, 1, 22 }
+ }, { /* Coeff Band 2 */
+ { 107, 58, 149 },
+ { 92, 53, 147 },
+ { 78, 14, 123 },
+ { 56, 3, 87 },
+ { 35, 1, 56 },
+ { 17, 1, 27 }
+ }, { /* Coeff Band 3 */
+ { 142, 61, 171 },
+ { 111, 30, 162 },
+ { 80, 4, 128 },
+ { 53, 1, 87 },
+ { 31, 1, 52 },
+ { 14, 1, 24 }
+ }, { /* Coeff Band 4 */
+ { 171, 73, 200 },
+ { 129, 28, 184 },
+ { 86, 3, 140 },
+ { 54, 1, 90 },
+ { 28, 1, 49 },
+ { 12, 1, 21 }
+ }, { /* Coeff Band 5 */
+ { 193, 129, 227 },
+ { 148, 28, 200 },
+ { 90, 2, 144 },
+ { 53, 1, 90 },
+ { 28, 1, 50 },
+ { 13, 1, 22 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 60, 7, 234 },
+ { 64, 4, 184 },
+ { 56, 1, 104 }
+ }, { /* Coeff Band 1 */
+ { 150, 111, 210 },
+ { 87, 185, 202 },
+ { 101, 81, 177 },
+ { 90, 34, 142 },
+ { 67, 11, 95 },
+ { 38, 2, 51 }
+ }, { /* Coeff Band 2 */
+ { 153, 139, 218 },
+ { 120, 72, 195 },
+ { 90, 11, 147 },
+ { 63, 3, 101 },
+ { 39, 1, 61 },
+ { 20, 1, 33 }
+ }, { /* Coeff Band 3 */
+ { 171, 132, 223 },
+ { 131, 56, 200 },
+ { 92, 6, 147 },
+ { 58, 1, 95 },
+ { 32, 1, 52 },
+ { 14, 1, 23 }
+ }, { /* Coeff Band 4 */
+ { 183, 137, 227 },
+ { 139, 48, 204 },
+ { 91, 3, 148 },
+ { 55, 1, 91 },
+ { 28, 1, 47 },
+ { 13, 1, 21 }
+ }, { /* Coeff Band 5 */
+ { 198, 149, 234 },
+ { 153, 32, 208 },
+ { 95, 2, 148 },
+ { 55, 1, 90 },
+ { 30, 1, 51 },
+ { 16, 1, 25 }
+ }
+ }
+ }, { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 7, 209, 217 },
+ { 31, 106, 151 },
+ { 40, 21, 86 }
+ }, { /* Coeff Band 1 */
+ { 101, 71, 184 },
+ { 74, 131, 177 },
+ { 88, 50, 158 },
+ { 78, 16, 129 },
+ { 51, 2, 82 },
+ { 18, 1, 29 }
+ }, { /* Coeff Band 2 */
+ { 116, 115, 199 },
+ { 102, 88, 191 },
+ { 94, 22, 160 },
+ { 74, 6, 122 },
+ { 47, 1, 77 },
+ { 18, 1, 30 }
+ }, { /* Coeff Band 3 */
+ { 157, 124, 210 },
+ { 130, 53, 201 },
+ { 102, 10, 165 },
+ { 73, 1, 120 },
+ { 42, 1, 69 },
+ { 16, 1, 27 }
+ }, { /* Coeff Band 4 */
+ { 174, 147, 225 },
+ { 134, 67, 212 },
+ { 100, 10, 168 },
+ { 66, 1, 111 },
+ { 36, 1, 60 },
+ { 16, 1, 27 }
+ }, { /* Coeff Band 5 */
+ { 185, 165, 232 },
+ { 147, 56, 214 },
+ { 105, 5, 165 },
+ { 66, 1, 108 },
+ { 35, 1, 59 },
+ { 16, 1, 27 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 3, 232, 245 },
+ { 18, 162, 210 },
+ { 38, 64, 131 }
+ }, { /* Coeff Band 1 */
+ { 84, 187, 239 },
+ { 35, 231, 231 },
+ { 82, 150, 209 },
+ { 87, 97, 181 },
+ { 81, 64, 151 },
+ { 67, 60, 119 }
+ }, { /* Coeff Band 2 */
+ { 107, 185, 239 },
+ { 100, 149, 224 },
+ { 107, 34, 185 },
+ { 83, 12, 141 },
+ { 49, 4, 92 },
+ { 21, 1, 40 }
+ }, { /* Coeff Band 3 */
+ { 125, 184, 243 },
+ { 121, 127, 228 },
+ { 113, 25, 185 },
+ { 82, 6, 134 },
+ { 48, 1, 82 },
+ { 26, 1, 38 }
+ }, { /* Coeff Band 4 */
+ { 143, 185, 245 },
+ { 133, 115, 231 },
+ { 114, 14, 184 },
+ { 77, 3, 126 },
+ { 43, 1, 68 },
+ { 34, 1, 40 }
+ }, { /* Coeff Band 5 */
+ { 170, 194, 241 },
+ { 151, 80, 226 },
+ { 118, 9, 180 },
+ { 81, 1, 130 },
+ { 51, 1, 78 },
+ { 18, 1, 49 }
+ }
+ }
+ }
+};
+static const vp9_coeff_probs_model default_coef_probs_32x32[BLOCK_TYPES] = {
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 29, 42, 137 },
+ { 26, 3, 60 },
+ { 13, 1, 23 }
+ }, { /* Coeff Band 1 */
+ { 69, 36, 122 },
+ { 63, 57, 123 },
+ { 60, 33, 112 },
+ { 52, 11, 90 },
+ { 32, 2, 52 },
+ { 10, 1, 15 }
+ }, { /* Coeff Band 2 */
+ { 107, 55, 143 },
+ { 86, 69, 143 },
+ { 74, 24, 116 },
+ { 52, 5, 78 },
+ { 29, 1, 44 },
+ { 12, 1, 18 }
+ }, { /* Coeff Band 3 */
+ { 137, 71, 160 },
+ { 107, 34, 152 },
+ { 73, 6, 114 },
+ { 44, 1, 69 },
+ { 25, 1, 40 },
+ { 12, 1, 18 }
+ }, { /* Coeff Band 4 */
+ { 165, 70, 174 },
+ { 118, 24, 159 },
+ { 74, 3, 117 },
+ { 45, 1, 73 },
+ { 26, 1, 43 },
+ { 12, 1, 19 }
+ }, { /* Coeff Band 5 */
+ { 220, 93, 223 },
+ { 153, 10, 187 },
+ { 86, 2, 131 },
+ { 49, 1, 79 },
+ { 26, 1, 43 },
+ { 12, 1, 20 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 30, 58, 227 },
+ { 35, 10, 172 },
+ { 24, 23, 112 }
+ }, { /* Coeff Band 1 */
+ { 117, 145, 219 },
+ { 51, 221, 216 },
+ { 75, 169, 196 },
+ { 88, 96, 165 },
+ { 77, 43, 117 },
+ { 53, 18, 60 }
+ }, { /* Coeff Band 2 */
+ { 128, 176, 225 },
+ { 108, 114, 202 },
+ { 92, 19, 152 },
+ { 65, 4, 103 },
+ { 38, 1, 61 },
+ { 19, 1, 30 }
+ }, { /* Coeff Band 3 */
+ { 146, 184, 228 },
+ { 122, 95, 205 },
+ { 92, 11, 149 },
+ { 62, 1, 98 },
+ { 35, 1, 57 },
+ { 17, 1, 26 }
+ }, { /* Coeff Band 4 */
+ { 165, 192, 230 },
+ { 132, 81, 206 },
+ { 93, 6, 147 },
+ { 58, 1, 94 },
+ { 32, 1, 52 },
+ { 15, 1, 24 }
+ }, { /* Coeff Band 5 */
+ { 204, 223, 234 },
+ { 156, 49, 204 },
+ { 97, 3, 145 },
+ { 59, 1, 92 },
+ { 33, 1, 52 },
+ { 15, 1, 24 }
+ }
+ }
+ }, { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 7, 184, 200 },
+ { 25, 67, 113 },
+ { 30, 9, 59 }
+ }, { /* Coeff Band 1 */
+ { 92, 42, 158 },
+ { 65, 121, 159 },
+ { 77, 56, 146 },
+ { 70, 22, 120 },
+ { 47, 4, 76 },
+ { 18, 1, 26 }
+ }, { /* Coeff Band 2 */
+ { 113, 81, 177 },
+ { 96, 75, 167 },
+ { 84, 24, 136 },
+ { 63, 8, 100 },
+ { 37, 1, 58 },
+ { 13, 1, 19 }
+ }, { /* Coeff Band 3 */
+ { 147, 85, 194 },
+ { 119, 36, 178 },
+ { 88, 8, 139 },
+ { 59, 1, 93 },
+ { 31, 1, 49 },
+ { 10, 1, 18 }
+ }, { /* Coeff Band 4 */
+ { 169, 108, 210 },
+ { 131, 41, 191 },
+ { 92, 5, 144 },
+ { 56, 1, 88 },
+ { 29, 1, 47 },
+ { 14, 1, 22 }
+ }, { /* Coeff Band 5 */
+ { 210, 106, 223 },
+ { 148, 14, 192 },
+ { 89, 2, 138 },
+ { 52, 1, 84 },
+ { 29, 1, 47 },
+ { 14, 1, 23 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 3, 207, 245 },
+ { 12, 102, 213 },
+ { 18, 33, 144 }
+ }, { /* Coeff Band 1 */
+ { 85, 205, 245 },
+ { 18, 249, 242 },
+ { 59, 221, 229 },
+ { 91, 166, 213 },
+ { 88, 117, 183 },
+ { 70, 95, 149 }
+ }, { /* Coeff Band 2 */
+ { 114, 193, 241 },
+ { 104, 155, 221 },
+ { 100, 33, 181 },
+ { 78, 10, 132 },
+ { 43, 2, 75 },
+ { 15, 1, 48 }
+ }, { /* Coeff Band 3 */
+ { 118, 198, 244 },
+ { 117, 142, 224 },
+ { 111, 25, 179 },
+ { 83, 4, 134 },
+ { 57, 1, 84 },
+ { 1, 1, 1 }
+ }, { /* Coeff Band 4 */
+ { 144, 201, 248 },
+ { 136, 130, 234 },
+ { 124, 12, 188 },
+ { 83, 1, 130 },
+ { 61, 1, 66 },
+ { 64, 171, 128 }
+ }, { /* Coeff Band 5 */
+ { 174, 227, 250 },
+ { 165, 118, 242 },
+ { 132, 21, 197 },
+ { 84, 3, 134 },
+ { 70, 1, 69 },
+ { 1, 1, 1 }
+ }
+ }
+ }
+};
+#else
+static const vp9_coeff_probs_model default_coef_probs_4x4[BLOCK_TYPES] = {
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 195, 29, 183 },
+ { 84, 49, 136 },
+ { 8, 42, 71 }
+ }, { /* Coeff Band 1 */
+ { 31, 107, 169 },
+ { 35, 99, 159 },
+ { 17, 82, 140 },
+ { 8, 66, 114 },
+ { 2, 44, 76 },
+ { 1, 19, 32 }
+ }, { /* Coeff Band 2 */
+ { 40, 132, 201 },
+ { 29, 114, 187 },
+ { 13, 91, 157 },
+ { 7, 75, 127 },
+ { 3, 58, 95 },
+ { 1, 28, 47 }
+ }, { /* Coeff Band 3 */
+ { 69, 142, 221 },
+ { 42, 122, 201 },
+ { 15, 91, 159 },
+ { 6, 67, 121 },
+ { 1, 42, 77 },
+ { 1, 17, 31 }
+ }, { /* Coeff Band 4 */
+ { 102, 148, 228 },
+ { 67, 117, 204 },
+ { 17, 82, 154 },
+ { 6, 59, 114 },
+ { 2, 39, 75 },
+ { 1, 15, 29 }
+ }, { /* Coeff Band 5 */
+ { 156, 57, 233 },
+ { 119, 57, 212 },
+ { 58, 48, 163 },
+ { 29, 40, 124 },
+ { 12, 30, 81 },
+ { 3, 12, 31 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 191, 107, 226 },
+ { 124, 117, 204 },
+ { 25, 99, 155 }
+ }, { /* Coeff Band 1 */
+ { 29, 148, 210 },
+ { 37, 126, 194 },
+ { 8, 93, 157 },
+ { 2, 68, 118 },
+ { 1, 39, 69 },
+ { 1, 17, 33 }
+ }, { /* Coeff Band 2 */
+ { 41, 151, 213 },
+ { 27, 123, 193 },
+ { 3, 82, 144 },
+ { 1, 58, 105 },
+ { 1, 32, 60 },
+ { 1, 13, 26 }
+ }, { /* Coeff Band 3 */
+ { 59, 159, 220 },
+ { 23, 126, 198 },
+ { 4, 88, 151 },
+ { 1, 66, 114 },
+ { 1, 38, 71 },
+ { 1, 18, 34 }
+ }, { /* Coeff Band 4 */
+ { 114, 136, 232 },
+ { 51, 114, 207 },
+ { 11, 83, 155 },
+ { 3, 56, 105 },
+ { 1, 33, 65 },
+ { 1, 17, 34 }
+ }, { /* Coeff Band 5 */
+ { 149, 65, 234 },
+ { 121, 57, 215 },
+ { 61, 49, 166 },
+ { 28, 36, 114 },
+ { 12, 25, 76 },
+ { 3, 16, 42 }
+ }
+ }
+ }, { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 214, 49, 220 },
+ { 132, 63, 188 },
+ { 42, 65, 137 }
+ }, { /* Coeff Band 1 */
+ { 85, 137, 221 },
+ { 104, 131, 216 },
+ { 49, 111, 192 },
+ { 21, 87, 155 },
+ { 2, 49, 87 },
+ { 1, 16, 28 }
+ }, { /* Coeff Band 2 */
+ { 89, 163, 230 },
+ { 90, 137, 220 },
+ { 29, 100, 183 },
+ { 10, 70, 135 },
+ { 2, 42, 81 },
+ { 1, 17, 33 }
+ }, { /* Coeff Band 3 */
+ { 108, 167, 237 },
+ { 55, 133, 222 },
+ { 15, 97, 179 },
+ { 4, 72, 135 },
+ { 1, 45, 85 },
+ { 1, 19, 38 }
+ }, { /* Coeff Band 4 */
+ { 124, 146, 240 },
+ { 66, 124, 224 },
+ { 17, 88, 175 },
+ { 4, 58, 122 },
+ { 1, 36, 75 },
+ { 1, 18, 37 }
+ }, { /* Coeff Band 5 */
+ { 141, 79, 241 },
+ { 126, 70, 227 },
+ { 66, 58, 182 },
+ { 30, 44, 136 },
+ { 12, 34, 96 },
+ { 2, 20, 47 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 229, 99, 249 },
+ { 143, 111, 235 },
+ { 46, 109, 192 }
+ }, { /* Coeff Band 1 */
+ { 82, 158, 236 },
+ { 94, 146, 224 },
+ { 25, 117, 191 },
+ { 9, 87, 149 },
+ { 3, 56, 99 },
+ { 1, 33, 57 }
+ }, { /* Coeff Band 2 */
+ { 83, 167, 237 },
+ { 68, 145, 222 },
+ { 10, 103, 177 },
+ { 2, 72, 131 },
+ { 1, 41, 79 },
+ { 1, 20, 39 }
+ }, { /* Coeff Band 3 */
+ { 99, 167, 239 },
+ { 47, 141, 224 },
+ { 10, 104, 178 },
+ { 2, 73, 133 },
+ { 1, 44, 85 },
+ { 1, 22, 47 }
+ }, { /* Coeff Band 4 */
+ { 127, 145, 243 },
+ { 71, 129, 228 },
+ { 17, 93, 177 },
+ { 3, 61, 124 },
+ { 1, 41, 84 },
+ { 1, 21, 52 }
+ }, { /* Coeff Band 5 */
+ { 157, 78, 244 },
+ { 140, 72, 231 },
+ { 69, 58, 184 },
+ { 31, 44, 137 },
+ { 14, 38, 105 },
+ { 8, 23, 61 }
+ }
+ }
+ }
+};
+static const vp9_coeff_probs_model default_coef_probs_8x8[BLOCK_TYPES] = {
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 125, 34, 187 },
+ { 52, 41, 133 },
+ { 6, 31, 56 }
+ }, { /* Coeff Band 1 */
+ { 37, 109, 153 },
+ { 51, 102, 147 },
+ { 23, 87, 128 },
+ { 8, 67, 101 },
+ { 1, 41, 63 },
+ { 1, 19, 29 }
+ }, { /* Coeff Band 2 */
+ { 31, 154, 185 },
+ { 17, 127, 175 },
+ { 6, 96, 145 },
+ { 2, 73, 114 },
+ { 1, 51, 82 },
+ { 1, 28, 45 }
+ }, { /* Coeff Band 3 */
+ { 23, 163, 200 },
+ { 10, 131, 185 },
+ { 2, 93, 148 },
+ { 1, 67, 111 },
+ { 1, 41, 69 },
+ { 1, 14, 24 }
+ }, { /* Coeff Band 4 */
+ { 29, 176, 217 },
+ { 12, 145, 201 },
+ { 3, 101, 156 },
+ { 1, 69, 111 },
+ { 1, 39, 63 },
+ { 1, 14, 23 }
+ }, { /* Coeff Band 5 */
+ { 57, 192, 233 },
+ { 25, 154, 215 },
+ { 6, 109, 167 },
+ { 3, 78, 118 },
+ { 1, 48, 69 },
+ { 1, 21, 29 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 202, 105, 245 },
+ { 108, 106, 216 },
+ { 18, 90, 144 }
+ }, { /* Coeff Band 1 */
+ { 33, 172, 219 },
+ { 64, 149, 206 },
+ { 14, 117, 177 },
+ { 5, 90, 141 },
+ { 2, 61, 95 },
+ { 1, 37, 57 }
+ }, { /* Coeff Band 2 */
+ { 33, 179, 220 },
+ { 11, 140, 198 },
+ { 1, 89, 148 },
+ { 1, 60, 104 },
+ { 1, 33, 57 },
+ { 1, 12, 21 }
+ }, { /* Coeff Band 3 */
+ { 30, 181, 221 },
+ { 8, 141, 198 },
+ { 1, 87, 145 },
+ { 1, 58, 100 },
+ { 1, 31, 55 },
+ { 1, 12, 20 }
+ }, { /* Coeff Band 4 */
+ { 32, 186, 224 },
+ { 7, 142, 198 },
+ { 1, 86, 143 },
+ { 1, 58, 100 },
+ { 1, 31, 55 },
+ { 1, 12, 22 }
+ }, { /* Coeff Band 5 */
+ { 57, 192, 227 },
+ { 20, 143, 204 },
+ { 3, 96, 154 },
+ { 1, 68, 112 },
+ { 1, 42, 69 },
+ { 1, 19, 32 }
+ }
+ }
+ }, { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 212, 35, 215 },
+ { 113, 47, 169 },
+ { 29, 48, 105 }
+ }, { /* Coeff Band 1 */
+ { 74, 129, 203 },
+ { 106, 120, 203 },
+ { 49, 107, 178 },
+ { 19, 84, 144 },
+ { 4, 50, 84 },
+ { 1, 15, 25 }
+ }, { /* Coeff Band 2 */
+ { 71, 172, 217 },
+ { 44, 141, 209 },
+ { 15, 102, 173 },
+ { 6, 76, 133 },
+ { 2, 51, 89 },
+ { 1, 24, 42 }
+ }, { /* Coeff Band 3 */
+ { 64, 185, 231 },
+ { 31, 148, 216 },
+ { 8, 103, 175 },
+ { 3, 74, 131 },
+ { 1, 46, 81 },
+ { 1, 18, 30 }
+ }, { /* Coeff Band 4 */
+ { 65, 196, 235 },
+ { 25, 157, 221 },
+ { 5, 105, 174 },
+ { 1, 67, 120 },
+ { 1, 38, 69 },
+ { 1, 15, 30 }
+ }, { /* Coeff Band 5 */
+ { 65, 204, 238 },
+ { 30, 156, 224 },
+ { 7, 107, 177 },
+ { 2, 70, 124 },
+ { 1, 42, 73 },
+ { 1, 18, 34 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 225, 86, 251 },
+ { 144, 104, 235 },
+ { 42, 99, 181 }
+ }, { /* Coeff Band 1 */
+ { 85, 175, 239 },
+ { 112, 165, 229 },
+ { 29, 136, 200 },
+ { 12, 103, 162 },
+ { 6, 77, 123 },
+ { 2, 53, 84 }
+ }, { /* Coeff Band 2 */
+ { 75, 183, 239 },
+ { 30, 155, 221 },
+ { 3, 106, 171 },
+ { 1, 74, 128 },
+ { 1, 44, 76 },
+ { 1, 17, 28 }
+ }, { /* Coeff Band 3 */
+ { 73, 185, 240 },
+ { 27, 159, 222 },
+ { 2, 107, 172 },
+ { 1, 75, 127 },
+ { 1, 42, 73 },
+ { 1, 17, 29 }
+ }, { /* Coeff Band 4 */
+ { 62, 190, 238 },
+ { 21, 159, 222 },
+ { 2, 107, 172 },
+ { 1, 72, 122 },
+ { 1, 40, 71 },
+ { 1, 18, 32 }
+ }, { /* Coeff Band 5 */
+ { 61, 199, 240 },
+ { 27, 161, 226 },
+ { 4, 113, 180 },
+ { 1, 76, 129 },
+ { 1, 46, 80 },
+ { 1, 23, 41 }
+ }
+ }
+ }
+};
+static const vp9_coeff_probs_model default_coef_probs_16x16[BLOCK_TYPES] = {
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 7, 27, 153 },
+ { 5, 30, 95 },
+ { 1, 16, 30 }
+ }, { /* Coeff Band 1 */
+ { 50, 75, 127 },
+ { 57, 75, 124 },
+ { 27, 67, 108 },
+ { 10, 54, 86 },
+ { 1, 33, 52 },
+ { 1, 12, 18 }
+ }, { /* Coeff Band 2 */
+ { 43, 125, 151 },
+ { 26, 108, 148 },
+ { 7, 83, 122 },
+ { 2, 59, 89 },
+ { 1, 38, 60 },
+ { 1, 17, 27 }
+ }, { /* Coeff Band 3 */
+ { 23, 144, 163 },
+ { 13, 112, 154 },
+ { 2, 75, 117 },
+ { 1, 50, 81 },
+ { 1, 31, 51 },
+ { 1, 14, 23 }
+ }, { /* Coeff Band 4 */
+ { 18, 162, 185 },
+ { 6, 123, 171 },
+ { 1, 78, 125 },
+ { 1, 51, 86 },
+ { 1, 31, 54 },
+ { 1, 14, 23 }
+ }, { /* Coeff Band 5 */
+ { 15, 199, 227 },
+ { 3, 150, 204 },
+ { 1, 91, 146 },
+ { 1, 55, 95 },
+ { 1, 30, 53 },
+ { 1, 11, 20 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 19, 55, 240 },
+ { 19, 59, 196 },
+ { 3, 52, 105 }
+ }, { /* Coeff Band 1 */
+ { 41, 166, 207 },
+ { 104, 153, 199 },
+ { 31, 123, 181 },
+ { 14, 101, 152 },
+ { 5, 72, 106 },
+ { 1, 36, 52 }
+ }, { /* Coeff Band 2 */
+ { 35, 176, 211 },
+ { 12, 131, 190 },
+ { 2, 88, 144 },
+ { 1, 60, 101 },
+ { 1, 36, 60 },
+ { 1, 16, 28 }
+ }, { /* Coeff Band 3 */
+ { 28, 183, 213 },
+ { 8, 134, 191 },
+ { 1, 86, 142 },
+ { 1, 56, 96 },
+ { 1, 30, 53 },
+ { 1, 12, 20 }
+ }, { /* Coeff Band 4 */
+ { 20, 190, 215 },
+ { 4, 135, 192 },
+ { 1, 84, 139 },
+ { 1, 53, 91 },
+ { 1, 28, 49 },
+ { 1, 11, 20 }
+ }, { /* Coeff Band 5 */
+ { 13, 196, 216 },
+ { 2, 137, 192 },
+ { 1, 86, 143 },
+ { 1, 57, 99 },
+ { 1, 32, 56 },
+ { 1, 13, 24 }
+ }
+ }
+ }, { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 211, 29, 217 },
+ { 96, 47, 156 },
+ { 22, 43, 87 }
+ }, { /* Coeff Band 1 */
+ { 78, 120, 193 },
+ { 111, 116, 186 },
+ { 46, 102, 164 },
+ { 15, 80, 128 },
+ { 2, 49, 76 },
+ { 1, 18, 28 }
+ }, { /* Coeff Band 2 */
+ { 71, 161, 203 },
+ { 42, 132, 192 },
+ { 10, 98, 150 },
+ { 3, 69, 109 },
+ { 1, 44, 70 },
+ { 1, 18, 29 }
+ }, { /* Coeff Band 3 */
+ { 57, 186, 211 },
+ { 30, 140, 196 },
+ { 4, 93, 146 },
+ { 1, 62, 102 },
+ { 1, 38, 65 },
+ { 1, 16, 27 }
+ }, { /* Coeff Band 4 */
+ { 47, 199, 217 },
+ { 14, 145, 196 },
+ { 1, 88, 142 },
+ { 1, 57, 98 },
+ { 1, 36, 62 },
+ { 1, 15, 26 }
+ }, { /* Coeff Band 5 */
+ { 26, 219, 229 },
+ { 5, 155, 207 },
+ { 1, 94, 151 },
+ { 1, 60, 104 },
+ { 1, 36, 62 },
+ { 1, 16, 28 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 233, 29, 248 },
+ { 146, 47, 220 },
+ { 43, 52, 140 }
+ }, { /* Coeff Band 1 */
+ { 100, 163, 232 },
+ { 179, 161, 222 },
+ { 63, 142, 204 },
+ { 37, 113, 174 },
+ { 26, 89, 137 },
+ { 18, 68, 97 }
+ }, { /* Coeff Band 2 */
+ { 85, 181, 230 },
+ { 32, 146, 209 },
+ { 7, 100, 164 },
+ { 3, 71, 121 },
+ { 1, 45, 77 },
+ { 1, 18, 30 }
+ }, { /* Coeff Band 3 */
+ { 65, 187, 230 },
+ { 20, 148, 207 },
+ { 2, 97, 159 },
+ { 1, 68, 116 },
+ { 1, 40, 70 },
+ { 1, 14, 29 }
+ }, { /* Coeff Band 4 */
+ { 40, 194, 227 },
+ { 8, 147, 204 },
+ { 1, 94, 155 },
+ { 1, 65, 112 },
+ { 1, 39, 66 },
+ { 1, 14, 26 }
+ }, { /* Coeff Band 5 */
+ { 16, 208, 228 },
+ { 3, 151, 207 },
+ { 1, 98, 160 },
+ { 1, 67, 117 },
+ { 1, 41, 74 },
+ { 1, 17, 31 }
+ }
+ }
+ }
+};
+static const vp9_coeff_probs_model default_coef_probs_32x32[BLOCK_TYPES] = {
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 17, 38, 140 },
+ { 7, 34, 80 },
+ { 1, 17, 29 }
+ }, { /* Coeff Band 1 */
+ { 37, 75, 128 },
+ { 41, 76, 128 },
+ { 26, 66, 116 },
+ { 12, 52, 94 },
+ { 2, 32, 55 },
+ { 1, 10, 16 }
+ }, { /* Coeff Band 2 */
+ { 50, 127, 154 },
+ { 37, 109, 152 },
+ { 16, 82, 121 },
+ { 5, 59, 85 },
+ { 1, 35, 54 },
+ { 1, 13, 20 }
+ }, { /* Coeff Band 3 */
+ { 40, 142, 167 },
+ { 17, 110, 157 },
+ { 2, 71, 112 },
+ { 1, 44, 72 },
+ { 1, 27, 45 },
+ { 1, 11, 17 }
+ }, { /* Coeff Band 4 */
+ { 30, 175, 188 },
+ { 9, 124, 169 },
+ { 1, 74, 116 },
+ { 1, 48, 78 },
+ { 1, 30, 49 },
+ { 1, 11, 18 }
+ }, { /* Coeff Band 5 */
+ { 10, 222, 223 },
+ { 2, 150, 194 },
+ { 1, 83, 128 },
+ { 1, 48, 79 },
+ { 1, 27, 45 },
+ { 1, 11, 17 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 36, 41, 235 },
+ { 29, 36, 193 },
+ { 10, 27, 111 }
+ }, { /* Coeff Band 1 */
+ { 85, 165, 222 },
+ { 177, 162, 215 },
+ { 110, 135, 195 },
+ { 57, 113, 168 },
+ { 23, 83, 120 },
+ { 10, 49, 61 }
+ }, { /* Coeff Band 2 */
+ { 85, 190, 223 },
+ { 36, 139, 200 },
+ { 5, 90, 146 },
+ { 1, 60, 103 },
+ { 1, 38, 65 },
+ { 1, 18, 30 }
+ }, { /* Coeff Band 3 */
+ { 72, 202, 223 },
+ { 23, 141, 199 },
+ { 2, 86, 140 },
+ { 1, 56, 97 },
+ { 1, 36, 61 },
+ { 1, 16, 27 }
+ }, { /* Coeff Band 4 */
+ { 55, 218, 225 },
+ { 13, 145, 200 },
+ { 1, 86, 141 },
+ { 1, 57, 99 },
+ { 1, 35, 61 },
+ { 1, 13, 22 }
+ }, { /* Coeff Band 5 */
+ { 15, 235, 212 },
+ { 1, 132, 184 },
+ { 1, 84, 139 },
+ { 1, 57, 97 },
+ { 1, 34, 56 },
+ { 1, 14, 23 }
+ }
+ }
+ }, { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 181, 21, 201 },
+ { 61, 37, 123 },
+ { 10, 38, 71 }
+ }, { /* Coeff Band 1 */
+ { 47, 106, 172 },
+ { 95, 104, 173 },
+ { 42, 93, 159 },
+ { 18, 77, 131 },
+ { 4, 50, 81 },
+ { 1, 17, 23 }
+ }, { /* Coeff Band 2 */
+ { 62, 147, 199 },
+ { 44, 130, 189 },
+ { 28, 102, 154 },
+ { 18, 75, 115 },
+ { 2, 44, 65 },
+ { 1, 12, 19 }
+ }, { /* Coeff Band 3 */
+ { 55, 153, 210 },
+ { 24, 130, 194 },
+ { 3, 93, 146 },
+ { 1, 61, 97 },
+ { 1, 31, 50 },
+ { 1, 10, 16 }
+ }, { /* Coeff Band 4 */
+ { 49, 186, 223 },
+ { 17, 148, 204 },
+ { 1, 96, 142 },
+ { 1, 53, 83 },
+ { 1, 26, 44 },
+ { 1, 11, 17 }
+ }, { /* Coeff Band 5 */
+ { 13, 217, 212 },
+ { 2, 136, 180 },
+ { 1, 78, 124 },
+ { 1, 50, 83 },
+ { 1, 29, 49 },
+ { 1, 14, 23 }
+ }
+ }, { /* Inter */
+ { /* Coeff Band 0 */
+ { 197, 13, 247 },
+ { 82, 17, 222 },
+ { 25, 17, 162 }
+ }, { /* Coeff Band 1 */
+ { 126, 186, 247 },
+ { 234, 191, 243 },
+ { 176, 177, 234 },
+ { 104, 158, 220 },
+ { 66, 128, 186 },
+ { 55, 90, 137 }
+ }, { /* Coeff Band 2 */
+ { 111, 197, 242 },
+ { 46, 158, 219 },
+ { 9, 104, 171 },
+ { 2, 65, 125 },
+ { 1, 44, 80 },
+ { 1, 17, 91 }
+ }, { /* Coeff Band 3 */
+ { 104, 208, 245 },
+ { 39, 168, 224 },
+ { 3, 109, 162 },
+ { 1, 79, 124 },
+ { 1, 50, 102 },
+ { 1, 43, 102 }
+ }, { /* Coeff Band 4 */
+ { 84, 220, 246 },
+ { 31, 177, 231 },
+ { 2, 115, 180 },
+ { 1, 79, 134 },
+ { 1, 55, 77 },
+ { 1, 60, 79 }
+ }, { /* Coeff Band 5 */
+ { 43, 243, 240 },
+ { 8, 180, 217 },
+ { 1, 115, 166 },
+ { 1, 84, 121 },
+ { 1, 51, 67 },
+ { 1, 16, 6 }
+ }
+ }
+ }
+};
+#endif
diff --git a/libvpx/vp9/common/vp9_entropy.c b/libvpx/vp9/common/vp9_entropy.c
new file mode 100644
index 0000000..080867e
--- /dev/null
+++ b/libvpx/vp9/common/vp9_entropy.c
@@ -0,0 +1,737 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/common/vp9_entropymode.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx/vpx_integer.h"
+
+DECLARE_ALIGNED(16, const uint8_t, vp9_norm[256]) = {
+ 0, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+DECLARE_ALIGNED(16, const uint8_t,
+ vp9_coefband_trans_8x8plus[MAXBAND_INDEX + 1]) = {
+ 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 5
+};
+
+DECLARE_ALIGNED(16, const uint8_t,
+ vp9_coefband_trans_4x4[MAXBAND_INDEX + 1]) = {
+ 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5
+};
+
+DECLARE_ALIGNED(16, const uint8_t, vp9_pt_energy_class[MAX_ENTROPY_TOKENS]) = {
+ 0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 5
+};
+
+DECLARE_ALIGNED(16, const int, vp9_default_scan_4x4[16]) = {
+ 0, 4, 1, 5,
+ 8, 2, 12, 9,
+ 3, 6, 13, 10,
+ 7, 14, 11, 15,
+};
+
+DECLARE_ALIGNED(16, const int, vp9_col_scan_4x4[16]) = {
+ 0, 4, 8, 1,
+ 12, 5, 9, 2,
+ 13, 6, 10, 3,
+ 7, 14, 11, 15,
+};
+
+DECLARE_ALIGNED(16, const int, vp9_row_scan_4x4[16]) = {
+ 0, 1, 4, 2,
+ 5, 3, 6, 8,
+ 9, 7, 12, 10,
+ 13, 11, 14, 15,
+};
+
+DECLARE_ALIGNED(64, const int, vp9_default_scan_8x8[64]) = {
+ 0, 8, 1, 16, 9, 2, 17, 24,
+ 10, 3, 18, 25, 32, 11, 4, 26,
+ 33, 19, 40, 12, 34, 27, 5, 41,
+ 20, 48, 13, 35, 42, 28, 21, 6,
+ 49, 56, 36, 43, 29, 7, 14, 50,
+ 57, 44, 22, 37, 15, 51, 58, 30,
+ 45, 23, 52, 59, 38, 31, 60, 53,
+ 46, 39, 61, 54, 47, 62, 55, 63,
+};
+
+DECLARE_ALIGNED(16, const int, vp9_col_scan_8x8[64]) = {
+ 0, 8, 16, 1, 24, 9, 32, 17,
+ 2, 40, 25, 10, 33, 18, 48, 3,
+ 26, 41, 11, 56, 19, 34, 4, 49,
+ 27, 42, 12, 35, 20, 57, 50, 28,
+ 5, 43, 13, 36, 58, 51, 21, 44,
+ 6, 29, 59, 37, 14, 52, 22, 7,
+ 45, 60, 30, 15, 38, 53, 23, 46,
+ 31, 61, 39, 54, 47, 62, 55, 63,
+};
+
+DECLARE_ALIGNED(16, const int, vp9_row_scan_8x8[64]) = {
+ 0, 1, 2, 8, 9, 3, 16, 10,
+ 4, 17, 11, 24, 5, 18, 25, 12,
+ 19, 26, 32, 6, 13, 20, 33, 27,
+ 7, 34, 40, 21, 28, 41, 14, 35,
+ 48, 42, 29, 36, 49, 22, 43, 15,
+ 56, 37, 50, 44, 30, 57, 23, 51,
+ 58, 45, 38, 52, 31, 59, 53, 46,
+ 60, 39, 61, 47, 54, 55, 62, 63,
+};
+
+DECLARE_ALIGNED(16, const int, vp9_default_scan_16x16[256]) = {
+ 0, 16, 1, 32, 17, 2, 48, 33, 18, 3, 64, 34, 49, 19, 65, 80,
+ 50, 4, 35, 66, 20, 81, 96, 51, 5, 36, 82, 97, 67, 112, 21, 52,
+ 98, 37, 83, 113, 6, 68, 128, 53, 22, 99, 114, 84, 7, 129, 38, 69,
+ 100, 115, 144, 130, 85, 54, 23, 8, 145, 39, 70, 116, 101, 131, 160, 146,
+ 55, 86, 24, 71, 132, 117, 161, 40, 9, 102, 147, 176, 162, 87, 56, 25,
+ 133, 118, 177, 148, 72, 103, 41, 163, 10, 192, 178, 88, 57, 134, 149, 119,
+ 26, 164, 73, 104, 193, 42, 179, 208, 11, 135, 89, 165, 120, 150, 58, 194,
+ 180, 27, 74, 209, 105, 151, 136, 43, 90, 224, 166, 195, 181, 121, 210, 59,
+ 12, 152, 106, 167, 196, 75, 137, 225, 211, 240, 182, 122, 91, 28, 197, 13,
+ 226, 168, 183, 153, 44, 212, 138, 107, 241, 60, 29, 123, 198, 184, 227, 169,
+ 242, 76, 213, 154, 45, 92, 14, 199, 139, 61, 228, 214, 170, 185, 243, 108,
+ 77, 155, 30, 15, 200, 229, 124, 215, 244, 93, 46, 186, 171, 201, 109, 140,
+ 230, 62, 216, 245, 31, 125, 78, 156, 231, 47, 187, 202, 217, 94, 246, 141,
+ 63, 232, 172, 110, 247, 157, 79, 218, 203, 126, 233, 188, 248, 95, 173, 142,
+ 219, 111, 249, 234, 158, 127, 189, 204, 250, 235, 143, 174, 220, 205, 159, 251,
+ 190, 221, 175, 236, 237, 191, 206, 252, 222, 253, 207, 238, 223, 254, 239, 255,
+};
+
+DECLARE_ALIGNED(16, const int, vp9_col_scan_16x16[256]) = {
+ 0, 16, 32, 48, 1, 64, 17, 80, 33, 96, 49, 2, 65, 112, 18, 81,
+ 34, 128, 50, 97, 3, 66, 144, 19, 113, 35, 82, 160, 98, 51, 129, 4,
+ 67, 176, 20, 114, 145, 83, 36, 99, 130, 52, 192, 5, 161, 68, 115, 21,
+ 146, 84, 208, 177, 37, 131, 100, 53, 162, 224, 69, 6, 116, 193, 147, 85,
+ 22, 240, 132, 38, 178, 101, 163, 54, 209, 117, 70, 7, 148, 194, 86, 179,
+ 225, 23, 133, 39, 164, 8, 102, 210, 241, 55, 195, 118, 149, 71, 180, 24,
+ 87, 226, 134, 165, 211, 40, 103, 56, 72, 150, 196, 242, 119, 9, 181, 227,
+ 88, 166, 25, 135, 41, 104, 212, 57, 151, 197, 120, 73, 243, 182, 136, 167,
+ 213, 89, 10, 228, 105, 152, 198, 26, 42, 121, 183, 244, 168, 58, 137, 229,
+ 74, 214, 90, 153, 199, 184, 11, 106, 245, 27, 122, 230, 169, 43, 215, 59,
+ 200, 138, 185, 246, 75, 12, 91, 154, 216, 231, 107, 28, 44, 201, 123, 170,
+ 60, 247, 232, 76, 139, 13, 92, 217, 186, 248, 155, 108, 29, 124, 45, 202,
+ 233, 171, 61, 14, 77, 140, 15, 249, 93, 30, 187, 156, 218, 46, 109, 125,
+ 62, 172, 78, 203, 31, 141, 234, 94, 47, 188, 63, 157, 110, 250, 219, 79,
+ 126, 204, 173, 142, 95, 189, 111, 235, 158, 220, 251, 127, 174, 143, 205, 236,
+ 159, 190, 221, 252, 175, 206, 237, 191, 253, 222, 238, 207, 254, 223, 239, 255,
+};
+
+DECLARE_ALIGNED(16, const int, vp9_row_scan_16x16[256]) = {
+ 0, 1, 2, 16, 3, 17, 4, 18, 32, 5, 33, 19, 6, 34, 48, 20,
+ 49, 7, 35, 21, 50, 64, 8, 36, 65, 22, 51, 37, 80, 9, 66, 52,
+ 23, 38, 81, 67, 10, 53, 24, 82, 68, 96, 39, 11, 54, 83, 97, 69,
+ 25, 98, 84, 40, 112, 55, 12, 70, 99, 113, 85, 26, 41, 56, 114, 100,
+ 13, 71, 128, 86, 27, 115, 101, 129, 42, 57, 72, 116, 14, 87, 130, 102,
+ 144, 73, 131, 117, 28, 58, 15, 88, 43, 145, 103, 132, 146, 118, 74, 160,
+ 89, 133, 104, 29, 59, 147, 119, 44, 161, 148, 90, 105, 134, 162, 120, 176,
+ 75, 135, 149, 30, 60, 163, 177, 45, 121, 91, 106, 164, 178, 150, 192, 136,
+ 165, 179, 31, 151, 193, 76, 122, 61, 137, 194, 107, 152, 180, 208, 46, 166,
+ 167, 195, 92, 181, 138, 209, 123, 153, 224, 196, 77, 168, 210, 182, 240, 108,
+ 197, 62, 154, 225, 183, 169, 211, 47, 139, 93, 184, 226, 212, 241, 198, 170,
+ 124, 155, 199, 78, 213, 185, 109, 227, 200, 63, 228, 242, 140, 214, 171, 186,
+ 156, 229, 243, 125, 94, 201, 244, 215, 216, 230, 141, 187, 202, 79, 172, 110,
+ 157, 245, 217, 231, 95, 246, 232, 126, 203, 247, 233, 173, 218, 142, 111, 158,
+ 188, 248, 127, 234, 219, 249, 189, 204, 143, 174, 159, 250, 235, 205, 220, 175,
+ 190, 251, 221, 191, 206, 236, 207, 237, 252, 222, 253, 223, 238, 239, 254, 255,
+};
+
+DECLARE_ALIGNED(16, const int, vp9_default_scan_32x32[1024]) = {
+ 0, 32, 1, 64, 33, 2, 96, 65, 34, 128, 3, 97, 66, 160, 129, 35, 98, 4, 67, 130, 161, 192, 36, 99, 224, 5, 162, 193, 68, 131, 37, 100,
+ 225, 194, 256, 163, 69, 132, 6, 226, 257, 288, 195, 101, 164, 38, 258, 7, 227, 289, 133, 320, 70, 196, 165, 290, 259, 228, 39, 321, 102, 352, 8, 197,
+ 71, 134, 322, 291, 260, 353, 384, 229, 166, 103, 40, 354, 323, 292, 135, 385, 198, 261, 72, 9, 416, 167, 386, 355, 230, 324, 104, 293, 41, 417, 199, 136,
+ 262, 387, 448, 325, 356, 10, 73, 418, 231, 168, 449, 294, 388, 105, 419, 263, 42, 200, 357, 450, 137, 480, 74, 326, 232, 11, 389, 169, 295, 420, 106, 451,
+ 481, 358, 264, 327, 201, 43, 138, 512, 482, 390, 296, 233, 170, 421, 75, 452, 359, 12, 513, 265, 483, 328, 107, 202, 514, 544, 422, 391, 453, 139, 44, 234,
+ 484, 297, 360, 171, 76, 515, 545, 266, 329, 454, 13, 423, 203, 108, 546, 485, 576, 298, 235, 140, 361, 330, 172, 547, 45, 455, 267, 577, 486, 77, 204, 362,
+ 608, 14, 299, 578, 109, 236, 487, 609, 331, 141, 579, 46, 15, 173, 610, 363, 78, 205, 16, 110, 237, 611, 142, 47, 174, 79, 206, 17, 111, 238, 48, 143,
+ 80, 175, 112, 207, 49, 18, 239, 81, 113, 19, 50, 82, 114, 51, 83, 115, 640, 516, 392, 268, 144, 20, 672, 641, 548, 517, 424, 393, 300, 269, 176, 145,
+ 52, 21, 704, 673, 642, 580, 549, 518, 456, 425, 394, 332, 301, 270, 208, 177, 146, 84, 53, 22, 736, 705, 674, 643, 612, 581, 550, 519, 488, 457, 426, 395,
+ 364, 333, 302, 271, 240, 209, 178, 147, 116, 85, 54, 23, 737, 706, 675, 613, 582, 551, 489, 458, 427, 365, 334, 303, 241, 210, 179, 117, 86, 55, 738, 707,
+ 614, 583, 490, 459, 366, 335, 242, 211, 118, 87, 739, 615, 491, 367, 243, 119, 768, 644, 520, 396, 272, 148, 24, 800, 769, 676, 645, 552, 521, 428, 397, 304,
+ 273, 180, 149, 56, 25, 832, 801, 770, 708, 677, 646, 584, 553, 522, 460, 429, 398, 336, 305, 274, 212, 181, 150, 88, 57, 26, 864, 833, 802, 771, 740, 709,
+ 678, 647, 616, 585, 554, 523, 492, 461, 430, 399, 368, 337, 306, 275, 244, 213, 182, 151, 120, 89, 58, 27, 865, 834, 803, 741, 710, 679, 617, 586, 555, 493,
+ 462, 431, 369, 338, 307, 245, 214, 183, 121, 90, 59, 866, 835, 742, 711, 618, 587, 494, 463, 370, 339, 246, 215, 122, 91, 867, 743, 619, 495, 371, 247, 123,
+ 896, 772, 648, 524, 400, 276, 152, 28, 928, 897, 804, 773, 680, 649, 556, 525, 432, 401, 308, 277, 184, 153, 60, 29, 960, 929, 898, 836, 805, 774, 712, 681,
+ 650, 588, 557, 526, 464, 433, 402, 340, 309, 278, 216, 185, 154, 92, 61, 30, 992, 961, 930, 899, 868, 837, 806, 775, 744, 713, 682, 651, 620, 589, 558, 527,
+ 496, 465, 434, 403, 372, 341, 310, 279, 248, 217, 186, 155, 124, 93, 62, 31, 993, 962, 931, 869, 838, 807, 745, 714, 683, 621, 590, 559, 497, 466, 435, 373,
+ 342, 311, 249, 218, 187, 125, 94, 63, 994, 963, 870, 839, 746, 715, 622, 591, 498, 467, 374, 343, 250, 219, 126, 95, 995, 871, 747, 623, 499, 375, 251, 127,
+ 900, 776, 652, 528, 404, 280, 156, 932, 901, 808, 777, 684, 653, 560, 529, 436, 405, 312, 281, 188, 157, 964, 933, 902, 840, 809, 778, 716, 685, 654, 592, 561,
+ 530, 468, 437, 406, 344, 313, 282, 220, 189, 158, 996, 965, 934, 903, 872, 841, 810, 779, 748, 717, 686, 655, 624, 593, 562, 531, 500, 469, 438, 407, 376, 345,
+ 314, 283, 252, 221, 190, 159, 997, 966, 935, 873, 842, 811, 749, 718, 687, 625, 594, 563, 501, 470, 439, 377, 346, 315, 253, 222, 191, 998, 967, 874, 843, 750,
+ 719, 626, 595, 502, 471, 378, 347, 254, 223, 999, 875, 751, 627, 503, 379, 255, 904, 780, 656, 532, 408, 284, 936, 905, 812, 781, 688, 657, 564, 533, 440, 409,
+ 316, 285, 968, 937, 906, 844, 813, 782, 720, 689, 658, 596, 565, 534, 472, 441, 410, 348, 317, 286, 1000, 969, 938, 907, 876, 845, 814, 783, 752, 721, 690, 659,
+ 628, 597, 566, 535, 504, 473, 442, 411, 380, 349, 318, 287, 1001, 970, 939, 877, 846, 815, 753, 722, 691, 629, 598, 567, 505, 474, 443, 381, 350, 319, 1002, 971,
+ 878, 847, 754, 723, 630, 599, 506, 475, 382, 351, 1003, 879, 755, 631, 507, 383, 908, 784, 660, 536, 412, 940, 909, 816, 785, 692, 661, 568, 537, 444, 413, 972,
+ 941, 910, 848, 817, 786, 724, 693, 662, 600, 569, 538, 476, 445, 414, 1004, 973, 942, 911, 880, 849, 818, 787, 756, 725, 694, 663, 632, 601, 570, 539, 508, 477,
+ 446, 415, 1005, 974, 943, 881, 850, 819, 757, 726, 695, 633, 602, 571, 509, 478, 447, 1006, 975, 882, 851, 758, 727, 634, 603, 510, 479, 1007, 883, 759, 635, 511,
+ 912, 788, 664, 540, 944, 913, 820, 789, 696, 665, 572, 541, 976, 945, 914, 852, 821, 790, 728, 697, 666, 604, 573, 542, 1008, 977, 946, 915, 884, 853, 822, 791,
+ 760, 729, 698, 667, 636, 605, 574, 543, 1009, 978, 947, 885, 854, 823, 761, 730, 699, 637, 606, 575, 1010, 979, 886, 855, 762, 731, 638, 607, 1011, 887, 763, 639,
+ 916, 792, 668, 948, 917, 824, 793, 700, 669, 980, 949, 918, 856, 825, 794, 732, 701, 670, 1012, 981, 950, 919, 888, 857, 826, 795, 764, 733, 702, 671, 1013, 982,
+ 951, 889, 858, 827, 765, 734, 703, 1014, 983, 890, 859, 766, 735, 1015, 891, 767, 920, 796, 952, 921, 828, 797, 984, 953, 922, 860, 829, 798, 1016, 985, 954, 923,
+ 892, 861, 830, 799, 1017, 986, 955, 893, 862, 831, 1018, 987, 894, 863, 1019, 895, 924, 956, 925, 988, 957, 926, 1020, 989, 958, 927, 1021, 990, 959, 1022, 991, 1023,
+};
+
+/* Array indices are identical to previously-existing CONTEXT_NODE indices */
+
+const vp9_tree_index vp9_coef_tree[ 22] = /* corresponding _CONTEXT_NODEs */
+{
+#if CONFIG_BALANCED_COEFTREE
+ -ZERO_TOKEN, 2, /* 0 = ZERO */
+ -DCT_EOB_TOKEN, 4, /* 1 = EOB */
+#else
+ -DCT_EOB_TOKEN, 2, /* 0 = EOB */
+ -ZERO_TOKEN, 4, /* 1 = ZERO */
+#endif
+ -ONE_TOKEN, 6, /* 2 = ONE */
+ 8, 12, /* 3 = LOW_VAL */
+ -TWO_TOKEN, 10, /* 4 = TWO */
+ -THREE_TOKEN, -FOUR_TOKEN, /* 5 = THREE */
+ 14, 16, /* 6 = HIGH_LOW */
+ -DCT_VAL_CATEGORY1, -DCT_VAL_CATEGORY2, /* 7 = CAT_ONE */
+ 18, 20, /* 8 = CAT_THREEFOUR */
+ -DCT_VAL_CATEGORY3, -DCT_VAL_CATEGORY4, /* 9 = CAT_THREE */
+ -DCT_VAL_CATEGORY5, -DCT_VAL_CATEGORY6 /* 10 = CAT_FIVE */
+};
+
+struct vp9_token vp9_coef_encodings[MAX_ENTROPY_TOKENS];
+
+/* Trees for extra bits. Probabilities are constant and
+ do not depend on previously encoded bits */
+
+static const vp9_prob Pcat1[] = { 159};
+static const vp9_prob Pcat2[] = { 165, 145};
+static const vp9_prob Pcat3[] = { 173, 148, 140};
+static const vp9_prob Pcat4[] = { 176, 155, 140, 135};
+static const vp9_prob Pcat5[] = { 180, 157, 141, 134, 130};
+static const vp9_prob Pcat6[] = {
+ 254, 254, 254, 252, 249, 243, 230, 196, 177, 153, 140, 133, 130, 129
+};
+
+const vp9_tree_index vp9_coefmodel_tree[6] = {
+#if CONFIG_BALANCED_COEFTREE
+ -ZERO_TOKEN, 2,
+ -DCT_EOB_MODEL_TOKEN, 4,
+#else
+ -DCT_EOB_MODEL_TOKEN, 2, /* 0 = EOB */
+ -ZERO_TOKEN, 4, /* 1 = ZERO */
+#endif
+ -ONE_TOKEN, -TWO_TOKEN,
+};
+
+// Model obtained from a 2-sided zero-centerd distribuition derived
+// from a Pareto distribution. The cdf of the distribution is:
+// cdf(x) = 0.5 + 0.5 * sgn(x) * [1 - {alpha/(alpha + |x|)} ^ beta]
+//
+// For a given beta and a given probablity of the 1-node, the alpha
+// is first solved, and then the {alpha, beta} pair is used to generate
+// the probabilities for the rest of the nodes.
+
+// beta = 8
+const vp9_prob vp9_modelcoefprobs_pareto8[COEFPROB_MODELS][MODEL_NODES] = {
+ { 3, 86, 128, 6, 86, 23, 88, 29},
+ { 9, 86, 129, 17, 88, 61, 94, 76},
+ { 15, 87, 129, 28, 89, 93, 100, 110},
+ { 20, 88, 130, 38, 91, 118, 106, 136},
+ { 26, 89, 131, 48, 92, 139, 111, 156},
+ { 31, 90, 131, 58, 94, 156, 117, 171},
+ { 37, 90, 132, 66, 95, 171, 122, 184},
+ { 42, 91, 132, 75, 97, 183, 127, 194},
+ { 47, 92, 133, 83, 98, 193, 132, 202},
+ { 52, 93, 133, 90, 100, 201, 137, 208},
+ { 57, 94, 134, 98, 101, 208, 142, 214},
+ { 62, 94, 135, 105, 103, 214, 146, 218},
+ { 66, 95, 135, 111, 104, 219, 151, 222},
+ { 71, 96, 136, 117, 106, 224, 155, 225},
+ { 76, 97, 136, 123, 107, 227, 159, 228},
+ { 80, 98, 137, 129, 109, 231, 162, 231},
+ { 84, 98, 138, 134, 110, 234, 166, 233},
+ { 89, 99, 138, 140, 112, 236, 170, 235},
+ { 93, 100, 139, 145, 113, 238, 173, 236},
+ { 97, 101, 140, 149, 115, 240, 176, 238},
+ {101, 102, 140, 154, 116, 242, 179, 239},
+ {105, 103, 141, 158, 118, 243, 182, 240},
+ {109, 104, 141, 162, 119, 244, 185, 241},
+ {113, 104, 142, 166, 120, 245, 187, 242},
+ {116, 105, 143, 170, 122, 246, 190, 243},
+ {120, 106, 143, 173, 123, 247, 192, 244},
+ {123, 107, 144, 177, 125, 248, 195, 244},
+ {127, 108, 145, 180, 126, 249, 197, 245},
+ {130, 109, 145, 183, 128, 249, 199, 245},
+ {134, 110, 146, 186, 129, 250, 201, 246},
+ {137, 111, 147, 189, 131, 251, 203, 246},
+ {140, 112, 147, 192, 132, 251, 205, 247},
+ {143, 113, 148, 194, 133, 251, 207, 247},
+ {146, 114, 149, 197, 135, 252, 208, 248},
+ {149, 115, 149, 199, 136, 252, 210, 248},
+ {152, 115, 150, 201, 138, 252, 211, 248},
+ {155, 116, 151, 204, 139, 253, 213, 249},
+ {158, 117, 151, 206, 140, 253, 214, 249},
+ {161, 118, 152, 208, 142, 253, 216, 249},
+ {163, 119, 153, 210, 143, 253, 217, 249},
+ {166, 120, 153, 212, 144, 254, 218, 250},
+ {168, 121, 154, 213, 146, 254, 220, 250},
+ {171, 122, 155, 215, 147, 254, 221, 250},
+ {173, 123, 155, 217, 148, 254, 222, 250},
+ {176, 124, 156, 218, 150, 254, 223, 250},
+ {178, 125, 157, 220, 151, 254, 224, 251},
+ {180, 126, 157, 221, 152, 254, 225, 251},
+ {183, 127, 158, 222, 153, 254, 226, 251},
+ {185, 128, 159, 224, 155, 255, 227, 251},
+ {187, 129, 160, 225, 156, 255, 228, 251},
+ {189, 131, 160, 226, 157, 255, 228, 251},
+ {191, 132, 161, 227, 159, 255, 229, 251},
+ {193, 133, 162, 228, 160, 255, 230, 252},
+ {195, 134, 163, 230, 161, 255, 231, 252},
+ {197, 135, 163, 231, 162, 255, 231, 252},
+ {199, 136, 164, 232, 163, 255, 232, 252},
+ {201, 137, 165, 233, 165, 255, 233, 252},
+ {202, 138, 166, 233, 166, 255, 233, 252},
+ {204, 139, 166, 234, 167, 255, 234, 252},
+ {206, 140, 167, 235, 168, 255, 235, 252},
+ {207, 141, 168, 236, 169, 255, 235, 252},
+ {209, 142, 169, 237, 171, 255, 236, 252},
+ {210, 144, 169, 237, 172, 255, 236, 252},
+ {212, 145, 170, 238, 173, 255, 237, 252},
+ {214, 146, 171, 239, 174, 255, 237, 253},
+ {215, 147, 172, 240, 175, 255, 238, 253},
+ {216, 148, 173, 240, 176, 255, 238, 253},
+ {218, 149, 173, 241, 177, 255, 239, 253},
+ {219, 150, 174, 241, 179, 255, 239, 253},
+ {220, 152, 175, 242, 180, 255, 240, 253},
+ {222, 153, 176, 242, 181, 255, 240, 253},
+ {223, 154, 177, 243, 182, 255, 240, 253},
+ {224, 155, 178, 244, 183, 255, 241, 253},
+ {225, 156, 178, 244, 184, 255, 241, 253},
+ {226, 158, 179, 244, 185, 255, 242, 253},
+ {228, 159, 180, 245, 186, 255, 242, 253},
+ {229, 160, 181, 245, 187, 255, 242, 253},
+ {230, 161, 182, 246, 188, 255, 243, 253},
+ {231, 163, 183, 246, 189, 255, 243, 253},
+ {232, 164, 184, 247, 190, 255, 243, 253},
+ {233, 165, 185, 247, 191, 255, 244, 253},
+ {234, 166, 185, 247, 192, 255, 244, 253},
+ {235, 168, 186, 248, 193, 255, 244, 253},
+ {236, 169, 187, 248, 194, 255, 244, 253},
+ {236, 170, 188, 248, 195, 255, 245, 253},
+ {237, 171, 189, 249, 196, 255, 245, 254},
+ {238, 173, 190, 249, 197, 255, 245, 254},
+ {239, 174, 191, 249, 198, 255, 245, 254},
+ {240, 175, 192, 249, 199, 255, 246, 254},
+ {240, 177, 193, 250, 200, 255, 246, 254},
+ {241, 178, 194, 250, 201, 255, 246, 254},
+ {242, 179, 195, 250, 202, 255, 246, 254},
+ {242, 181, 196, 250, 203, 255, 247, 254},
+ {243, 182, 197, 251, 204, 255, 247, 254},
+ {244, 184, 198, 251, 205, 255, 247, 254},
+ {244, 185, 199, 251, 206, 255, 247, 254},
+ {245, 186, 200, 251, 207, 255, 247, 254},
+ {246, 188, 201, 252, 207, 255, 248, 254},
+ {246, 189, 202, 252, 208, 255, 248, 254},
+ {247, 191, 203, 252, 209, 255, 248, 254},
+ {247, 192, 204, 252, 210, 255, 248, 254},
+ {248, 194, 205, 252, 211, 255, 248, 254},
+ {248, 195, 206, 252, 212, 255, 249, 254},
+ {249, 197, 207, 253, 213, 255, 249, 254},
+ {249, 198, 208, 253, 214, 255, 249, 254},
+ {250, 200, 210, 253, 215, 255, 249, 254},
+ {250, 201, 211, 253, 215, 255, 249, 254},
+ {250, 203, 212, 253, 216, 255, 249, 254},
+ {251, 204, 213, 253, 217, 255, 250, 254},
+ {251, 206, 214, 254, 218, 255, 250, 254},
+ {252, 207, 216, 254, 219, 255, 250, 254},
+ {252, 209, 217, 254, 220, 255, 250, 254},
+ {252, 211, 218, 254, 221, 255, 250, 254},
+ {253, 213, 219, 254, 222, 255, 250, 254},
+ {253, 214, 221, 254, 223, 255, 250, 254},
+ {253, 216, 222, 254, 224, 255, 251, 254},
+ {253, 218, 224, 254, 225, 255, 251, 254},
+ {254, 220, 225, 254, 225, 255, 251, 254},
+ {254, 222, 227, 255, 226, 255, 251, 254},
+ {254, 224, 228, 255, 227, 255, 251, 254},
+ {254, 226, 230, 255, 228, 255, 251, 254},
+ {255, 228, 231, 255, 230, 255, 251, 254},
+ {255, 230, 233, 255, 231, 255, 252, 254},
+ {255, 232, 235, 255, 232, 255, 252, 254},
+ {255, 235, 237, 255, 233, 255, 252, 254},
+ {255, 238, 240, 255, 235, 255, 252, 255},
+ {255, 241, 243, 255, 236, 255, 252, 254},
+ {255, 246, 247, 255, 239, 255, 253, 255}
+};
+
+static void extend_model_to_full_distribution(vp9_prob p,
+ vp9_prob *tree_probs) {
+ const int l = ((p - 1) / 2);
+ const vp9_prob (*model)[MODEL_NODES];
+ model = vp9_modelcoefprobs_pareto8;
+ if (p & 1) {
+ vpx_memcpy(tree_probs + UNCONSTRAINED_NODES,
+ model[l], MODEL_NODES * sizeof(vp9_prob));
+ } else {
+ // interpolate
+ int i;
+ for (i = UNCONSTRAINED_NODES; i < ENTROPY_NODES; ++i)
+ tree_probs[i] = (model[l][i - UNCONSTRAINED_NODES] +
+ model[l + 1][i - UNCONSTRAINED_NODES]) >> 1;
+ }
+}
+
+void vp9_model_to_full_probs(const vp9_prob *model, vp9_prob *full) {
+ if (full != model)
+ vpx_memcpy(full, model, sizeof(vp9_prob) * UNCONSTRAINED_NODES);
+ extend_model_to_full_distribution(model[PIVOT_NODE], full);
+}
+
+void vp9_model_to_full_probs_sb(
+ vp9_prob model[COEF_BANDS][PREV_COEF_CONTEXTS][UNCONSTRAINED_NODES],
+ vp9_prob full[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES]) {
+ int c, p;
+ for (c = 0; c < COEF_BANDS; ++c)
+ for (p = 0; p < PREV_COEF_CONTEXTS; ++p) {
+ vp9_model_to_full_probs(model[c][p], full[c][p]);
+ }
+}
+
+static vp9_tree_index cat1[2], cat2[4], cat3[6], cat4[8], cat5[10], cat6[28];
+
+static void init_bit_tree(vp9_tree_index *p, int n) {
+ int i = 0;
+
+ while (++i < n) {
+ p[0] = p[1] = i << 1;
+ p += 2;
+ }
+
+ p[0] = p[1] = 0;
+}
+
+static void init_bit_trees() {
+ init_bit_tree(cat1, 1);
+ init_bit_tree(cat2, 2);
+ init_bit_tree(cat3, 3);
+ init_bit_tree(cat4, 4);
+ init_bit_tree(cat5, 5);
+ init_bit_tree(cat6, 14);
+}
+
+vp9_extra_bit vp9_extra_bits[12] = {
+ { 0, 0, 0, 0},
+ { 0, 0, 0, 1},
+ { 0, 0, 0, 2},
+ { 0, 0, 0, 3},
+ { 0, 0, 0, 4},
+ { cat1, Pcat1, 1, 5},
+ { cat2, Pcat2, 2, 7},
+ { cat3, Pcat3, 3, 11},
+ { cat4, Pcat4, 4, 19},
+ { cat5, Pcat5, 5, 35},
+ { cat6, Pcat6, 14, 67},
+ { 0, 0, 0, 0}
+};
+
+#include "vp9/common/vp9_default_coef_probs.h"
+
+// This function updates and then returns n AC coefficient context
+// This is currently a placeholder function to allow experimentation
+// using various context models based on the energy earlier tokens
+// within the current block.
+//
+// For now it just returns the previously used context.
+#define MAX_NEIGHBORS 2
+int vp9_get_coef_context(const int *scan, const int *neighbors,
+ int nb_pad, uint8_t *token_cache, int c, int l) {
+ int eob = l;
+ assert(nb_pad == MAX_NEIGHBORS);
+ if (c == eob) {
+ return 0;
+ } else {
+ int ctx;
+ assert(neighbors[MAX_NEIGHBORS * c + 0] >= 0);
+ if (neighbors[MAX_NEIGHBORS * c + 1] >= 0) {
+ ctx = (1 + token_cache[scan[neighbors[MAX_NEIGHBORS * c + 0]]] +
+ token_cache[scan[neighbors[MAX_NEIGHBORS * c + 1]]]) >> 1;
+ } else {
+ ctx = token_cache[scan[neighbors[MAX_NEIGHBORS * c + 0]]];
+ }
+ return ctx;
+ }
+};
+
+void vp9_default_coef_probs(VP9_COMMON *pc) {
+ vpx_memcpy(pc->fc.coef_probs[TX_4X4], default_coef_probs_4x4,
+ sizeof(pc->fc.coef_probs[TX_4X4]));
+ vpx_memcpy(pc->fc.coef_probs[TX_8X8], default_coef_probs_8x8,
+ sizeof(pc->fc.coef_probs[TX_8X8]));
+ vpx_memcpy(pc->fc.coef_probs[TX_16X16], default_coef_probs_16x16,
+ sizeof(pc->fc.coef_probs[TX_16X16]));
+ vpx_memcpy(pc->fc.coef_probs[TX_32X32], default_coef_probs_32x32,
+ sizeof(pc->fc.coef_probs[TX_32X32]));
+}
+
+// Neighborhood 5-tuples for various scans and blocksizes,
+// in {top, left, topleft, topright, bottomleft} order
+// for each position in raster scan order.
+// -1 indicates the neighbor does not exist.
+DECLARE_ALIGNED(16, int,
+ vp9_default_scan_4x4_neighbors[16 * MAX_NEIGHBORS]);
+DECLARE_ALIGNED(16, int,
+ vp9_col_scan_4x4_neighbors[16 * MAX_NEIGHBORS]);
+DECLARE_ALIGNED(16, int,
+ vp9_row_scan_4x4_neighbors[16 * MAX_NEIGHBORS]);
+DECLARE_ALIGNED(16, int,
+ vp9_col_scan_8x8_neighbors[64 * MAX_NEIGHBORS]);
+DECLARE_ALIGNED(16, int,
+ vp9_row_scan_8x8_neighbors[64 * MAX_NEIGHBORS]);
+DECLARE_ALIGNED(16, int,
+ vp9_default_scan_8x8_neighbors[64 * MAX_NEIGHBORS]);
+DECLARE_ALIGNED(16, int,
+ vp9_col_scan_16x16_neighbors[256 * MAX_NEIGHBORS]);
+DECLARE_ALIGNED(16, int,
+ vp9_row_scan_16x16_neighbors[256 * MAX_NEIGHBORS]);
+DECLARE_ALIGNED(16, int,
+ vp9_default_scan_16x16_neighbors[256 * MAX_NEIGHBORS]);
+DECLARE_ALIGNED(16, int,
+ vp9_default_scan_32x32_neighbors[1024 * MAX_NEIGHBORS]);
+
+static int find_in_scan(const int *scan, int l, int idx) {
+ int n, l2 = l * l;
+ for (n = 0; n < l2; n++) {
+ int rc = scan[n];
+ if (rc == idx)
+ return n;
+ }
+ assert(0);
+ return -1;
+}
+static void init_scan_neighbors(const int *scan, int l, int *neighbors,
+ int max_neighbors) {
+ int l2 = l * l;
+ int n, i, j;
+
+ for (n = 0; n < l2; n++) {
+ int rc = scan[n];
+ assert(max_neighbors == MAX_NEIGHBORS);
+ i = rc / l;
+ j = rc % l;
+ if (i > 0 && j > 0) {
+ // col/row scan is used for adst/dct, and generally means that
+ // energy decreases to zero much faster in the dimension in
+ // which ADST is used compared to the direction in which DCT
+ // is used. Likewise, we find much higher correlation between
+ // coefficients within the direction in which DCT is used.
+ // Therefore, if we use ADST/DCT, prefer the DCT neighbor coeff
+ // as a context. If ADST or DCT is used in both directions, we
+ // use the combination of the two as a context.
+ int a = find_in_scan(scan, l, (i - 1) * l + j);
+ int b = find_in_scan(scan, l, i * l + j - 1);
+ if (scan == vp9_col_scan_4x4 || scan == vp9_col_scan_8x8 ||
+ scan == vp9_col_scan_16x16) {
+ neighbors[max_neighbors * n + 0] = a;
+ neighbors[max_neighbors * n + 1] = -1;
+ } else if (scan == vp9_row_scan_4x4 || scan == vp9_row_scan_8x8 ||
+ scan == vp9_row_scan_16x16) {
+ neighbors[max_neighbors * n + 0] = b;
+ neighbors[max_neighbors * n + 1] = -1;
+ } else {
+ neighbors[max_neighbors * n + 0] = a;
+ neighbors[max_neighbors * n + 1] = b;
+ }
+ } else if (i > 0) {
+ neighbors[max_neighbors * n + 0] = find_in_scan(scan, l, (i - 1) * l + j);
+ neighbors[max_neighbors * n + 1] = -1;
+ } else if (j > 0) {
+ neighbors[max_neighbors * n + 0] =
+ find_in_scan(scan, l, i * l + j - 1);
+ neighbors[max_neighbors * n + 1] = -1;
+ } else {
+ assert(n == 0);
+ // dc predictor doesn't use previous tokens
+ neighbors[max_neighbors * n + 0] = -1;
+ }
+ assert(neighbors[max_neighbors * n + 0] < n);
+ }
+}
+
+void vp9_init_neighbors() {
+ init_scan_neighbors(vp9_default_scan_4x4, 4,
+ vp9_default_scan_4x4_neighbors, MAX_NEIGHBORS);
+ init_scan_neighbors(vp9_row_scan_4x4, 4,
+ vp9_row_scan_4x4_neighbors, MAX_NEIGHBORS);
+ init_scan_neighbors(vp9_col_scan_4x4, 4,
+ vp9_col_scan_4x4_neighbors, MAX_NEIGHBORS);
+ init_scan_neighbors(vp9_default_scan_8x8, 8,
+ vp9_default_scan_8x8_neighbors, MAX_NEIGHBORS);
+ init_scan_neighbors(vp9_row_scan_8x8, 8,
+ vp9_row_scan_8x8_neighbors, MAX_NEIGHBORS);
+ init_scan_neighbors(vp9_col_scan_8x8, 8,
+ vp9_col_scan_8x8_neighbors, MAX_NEIGHBORS);
+ init_scan_neighbors(vp9_default_scan_16x16, 16,
+ vp9_default_scan_16x16_neighbors, MAX_NEIGHBORS);
+ init_scan_neighbors(vp9_row_scan_16x16, 16,
+ vp9_row_scan_16x16_neighbors, MAX_NEIGHBORS);
+ init_scan_neighbors(vp9_col_scan_16x16, 16,
+ vp9_col_scan_16x16_neighbors, MAX_NEIGHBORS);
+ init_scan_neighbors(vp9_default_scan_32x32, 32,
+ vp9_default_scan_32x32_neighbors, MAX_NEIGHBORS);
+}
+
+const int *vp9_get_coef_neighbors_handle(const int *scan, int *pad) {
+ if (scan == vp9_default_scan_4x4) {
+ *pad = MAX_NEIGHBORS;
+ return vp9_default_scan_4x4_neighbors;
+ } else if (scan == vp9_row_scan_4x4) {
+ *pad = MAX_NEIGHBORS;
+ return vp9_row_scan_4x4_neighbors;
+ } else if (scan == vp9_col_scan_4x4) {
+ *pad = MAX_NEIGHBORS;
+ return vp9_col_scan_4x4_neighbors;
+ } else if (scan == vp9_default_scan_8x8) {
+ *pad = MAX_NEIGHBORS;
+ return vp9_default_scan_8x8_neighbors;
+ } else if (scan == vp9_row_scan_8x8) {
+ *pad = 2;
+ return vp9_row_scan_8x8_neighbors;
+ } else if (scan == vp9_col_scan_8x8) {
+ *pad = 2;
+ return vp9_col_scan_8x8_neighbors;
+ } else if (scan == vp9_default_scan_16x16) {
+ *pad = MAX_NEIGHBORS;
+ return vp9_default_scan_16x16_neighbors;
+ } else if (scan == vp9_row_scan_16x16) {
+ *pad = 2;
+ return vp9_row_scan_16x16_neighbors;
+ } else if (scan == vp9_col_scan_16x16) {
+ *pad = 2;
+ return vp9_col_scan_16x16_neighbors;
+ } else if (scan == vp9_default_scan_32x32) {
+ *pad = MAX_NEIGHBORS;
+ return vp9_default_scan_32x32_neighbors;
+ } else {
+ assert(0);
+ return NULL;
+ }
+}
+
+void vp9_coef_tree_initialize() {
+ vp9_init_neighbors();
+ init_bit_trees();
+ vp9_tokens_from_tree(vp9_coef_encodings, vp9_coef_tree);
+}
+
+// #define COEF_COUNT_TESTING
+
+#define COEF_COUNT_SAT 24
+#define COEF_MAX_UPDATE_FACTOR 112
+#define COEF_COUNT_SAT_KEY 24
+#define COEF_MAX_UPDATE_FACTOR_KEY 112
+#define COEF_COUNT_SAT_AFTER_KEY 24
+#define COEF_MAX_UPDATE_FACTOR_AFTER_KEY 128
+
+void vp9_full_to_model_count(unsigned int *model_count,
+ unsigned int *full_count) {
+ int n;
+ model_count[ZERO_TOKEN] = full_count[ZERO_TOKEN];
+ model_count[ONE_TOKEN] = full_count[ONE_TOKEN];
+ model_count[TWO_TOKEN] = full_count[TWO_TOKEN];
+ for (n = THREE_TOKEN; n < DCT_EOB_TOKEN; ++n)
+ model_count[TWO_TOKEN] += full_count[n];
+ model_count[DCT_EOB_MODEL_TOKEN] = full_count[DCT_EOB_TOKEN];
+}
+
+void vp9_full_to_model_counts(
+ vp9_coeff_count_model *model_count, vp9_coeff_count *full_count) {
+ int i, j, k, l;
+ for (i = 0; i < BLOCK_TYPES; ++i)
+ for (j = 0; j < REF_TYPES; ++j)
+ for (k = 0; k < COEF_BANDS; ++k)
+ for (l = 0; l < PREV_COEF_CONTEXTS; ++l) {
+ if (l >= 3 && k == 0)
+ continue;
+ vp9_full_to_model_count(model_count[i][j][k][l],
+ full_count[i][j][k][l]);
+ }
+}
+
+static void adapt_coef_probs(VP9_COMMON *cm, TX_SIZE txfm_size,
+ int count_sat, int update_factor) {
+ vp9_coeff_probs_model *dst_coef_probs = cm->fc.coef_probs[txfm_size];
+ vp9_coeff_probs_model *pre_coef_probs = cm->fc.pre_coef_probs[txfm_size];
+ vp9_coeff_count_model *coef_counts = cm->fc.coef_counts[txfm_size];
+ unsigned int (*eob_branch_count)[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] =
+ cm->fc.eob_branch_counts[txfm_size];
+ int t, i, j, k, l, count;
+ int factor;
+ unsigned int branch_ct[UNCONSTRAINED_NODES][2];
+ vp9_prob coef_probs[UNCONSTRAINED_NODES];
+ int entropy_nodes_adapt = UNCONSTRAINED_NODES;
+
+ for (i = 0; i < BLOCK_TYPES; ++i)
+ for (j = 0; j < REF_TYPES; ++j)
+ for (k = 0; k < COEF_BANDS; ++k)
+ for (l = 0; l < PREV_COEF_CONTEXTS; ++l) {
+ if (l >= 3 && k == 0)
+ continue;
+ vp9_tree_probs_from_distribution(
+ vp9_coefmodel_tree,
+ coef_probs, branch_ct,
+ coef_counts[i][j][k][l], 0);
+#if CONFIG_BALANCED_COEFTREE
+ branch_ct[1][1] = eob_branch_count[i][j][k][l] - branch_ct[1][0];
+ coef_probs[1] = get_binary_prob(branch_ct[1][0], branch_ct[1][1]);
+#else
+ branch_ct[0][1] = eob_branch_count[i][j][k][l] - branch_ct[0][0];
+ coef_probs[0] = get_binary_prob(branch_ct[0][0], branch_ct[0][1]);
+#endif
+ for (t = 0; t < entropy_nodes_adapt; ++t) {
+ count = branch_ct[t][0] + branch_ct[t][1];
+ count = count > count_sat ? count_sat : count;
+ factor = (update_factor * count / count_sat);
+ dst_coef_probs[i][j][k][l][t] =
+ weighted_prob(pre_coef_probs[i][j][k][l][t],
+ coef_probs[t], factor);
+ }
+ }
+}
+
+void vp9_adapt_coef_probs(VP9_COMMON *cm) {
+ TX_SIZE t;
+ int count_sat;
+ int update_factor; /* denominator 256 */
+
+ if ((cm->frame_type == KEY_FRAME) || cm->intra_only) {
+ update_factor = COEF_MAX_UPDATE_FACTOR_KEY;
+ count_sat = COEF_COUNT_SAT_KEY;
+ } else if (cm->last_frame_type == KEY_FRAME) {
+ update_factor = COEF_MAX_UPDATE_FACTOR_AFTER_KEY; /* adapt quickly */
+ count_sat = COEF_COUNT_SAT_AFTER_KEY;
+ } else {
+ update_factor = COEF_MAX_UPDATE_FACTOR;
+ count_sat = COEF_COUNT_SAT;
+ }
+ for (t = TX_4X4; t <= TX_32X32; t++)
+ adapt_coef_probs(cm, t, count_sat, update_factor);
+}
diff --git a/libvpx/vp9/common/vp9_entropy.h b/libvpx/vp9/common/vp9_entropy.h
new file mode 100644
index 0000000..7f2bf3d
--- /dev/null
+++ b/libvpx/vp9/common/vp9_entropy.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_ENTROPY_H_
+#define VP9_COMMON_VP9_ENTROPY_H_
+
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_treecoder.h"
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_common.h"
+
+/* Coefficient token alphabet */
+
+#define ZERO_TOKEN 0 /* 0 Extra Bits 0+0 */
+#define ONE_TOKEN 1 /* 1 Extra Bits 0+1 */
+#define TWO_TOKEN 2 /* 2 Extra Bits 0+1 */
+#define THREE_TOKEN 3 /* 3 Extra Bits 0+1 */
+#define FOUR_TOKEN 4 /* 4 Extra Bits 0+1 */
+#define DCT_VAL_CATEGORY1 5 /* 5-6 Extra Bits 1+1 */
+#define DCT_VAL_CATEGORY2 6 /* 7-10 Extra Bits 2+1 */
+#define DCT_VAL_CATEGORY3 7 /* 11-18 Extra Bits 3+1 */
+#define DCT_VAL_CATEGORY4 8 /* 19-34 Extra Bits 4+1 */
+#define DCT_VAL_CATEGORY5 9 /* 35-66 Extra Bits 5+1 */
+#define DCT_VAL_CATEGORY6 10 /* 67+ Extra Bits 14+1 */
+#define DCT_EOB_TOKEN 11 /* EOB Extra Bits 0+0 */
+#define MAX_ENTROPY_TOKENS 12
+#define ENTROPY_NODES 11
+#define EOSB_TOKEN 127 /* Not signalled, encoder only */
+
+#define INTER_MODE_CONTEXTS 7
+
+extern const vp9_tree_index vp9_coef_tree[];
+
+#define DCT_EOB_MODEL_TOKEN 3 /* EOB Extra Bits 0+0 */
+extern const vp9_tree_index vp9_coefmodel_tree[];
+
+extern struct vp9_token vp9_coef_encodings[MAX_ENTROPY_TOKENS];
+
+typedef struct {
+ vp9_tree_p tree;
+ const vp9_prob *prob;
+ int len;
+ int base_val;
+} vp9_extra_bit;
+
+extern vp9_extra_bit vp9_extra_bits[12]; /* indexed by token value */
+
+#define PROB_UPDATE_BASELINE_COST 7
+
+#define MAX_PROB 255
+#define DCT_MAX_VALUE 16384
+
+/* Coefficients are predicted via a 3-dimensional probability table. */
+
+/* Outside dimension. 0 = Y with DC, 1 = UV */
+#define BLOCK_TYPES 2
+#define REF_TYPES 2 // intra=0, inter=1
+
+/* Middle dimension reflects the coefficient position within the transform. */
+#define COEF_BANDS 6
+
+/* Inside dimension is measure of nearby complexity, that reflects the energy
+ of nearby coefficients are nonzero. For the first coefficient (DC, unless
+ block type is 0), we look at the (already encoded) blocks above and to the
+ left of the current block. The context index is then the number (0,1,or 2)
+ of these blocks having nonzero coefficients.
+ After decoding a coefficient, the measure is determined by the size of the
+ most recently decoded coefficient.
+ Note that the intuitive meaning of this measure changes as coefficients
+ are decoded, e.g., prior to the first token, a zero means that my neighbors
+ are empty while, after the first token, because of the use of end-of-block,
+ a zero means we just decoded a zero and hence guarantees that a non-zero
+ coefficient will appear later in this block. However, this shift
+ in meaning is perfectly OK because our context depends also on the
+ coefficient band (and since zigzag positions 0, 1, and 2 are in
+ distinct bands). */
+
+/*# define DC_TOKEN_CONTEXTS 3*/ /* 00, 0!0, !0!0 */
+#define PREV_COEF_CONTEXTS 6
+
+// #define ENTROPY_STATS
+
+typedef unsigned int vp9_coeff_count[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS]
+ [MAX_ENTROPY_TOKENS];
+typedef unsigned int vp9_coeff_stats[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS]
+ [ENTROPY_NODES][2];
+typedef vp9_prob vp9_coeff_probs[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS]
+ [ENTROPY_NODES];
+
+#define SUBEXP_PARAM 4 /* Subexponential code parameter */
+#define MODULUS_PARAM 13 /* Modulus parameter */
+
+struct VP9Common;
+void vp9_default_coef_probs(struct VP9Common *);
+extern DECLARE_ALIGNED(16, const int, vp9_default_scan_4x4[16]);
+
+extern DECLARE_ALIGNED(16, const int, vp9_col_scan_4x4[16]);
+extern DECLARE_ALIGNED(16, const int, vp9_row_scan_4x4[16]);
+
+extern DECLARE_ALIGNED(64, const int, vp9_default_scan_8x8[64]);
+
+extern DECLARE_ALIGNED(16, const int, vp9_col_scan_8x8[64]);
+extern DECLARE_ALIGNED(16, const int, vp9_row_scan_8x8[64]);
+
+extern DECLARE_ALIGNED(16, const int, vp9_default_scan_16x16[256]);
+
+extern DECLARE_ALIGNED(16, const int, vp9_col_scan_16x16[256]);
+extern DECLARE_ALIGNED(16, const int, vp9_row_scan_16x16[256]);
+
+extern DECLARE_ALIGNED(16, const int, vp9_default_scan_32x32[1024]);
+
+void vp9_coef_tree_initialize(void);
+void vp9_adapt_coef_probs(struct VP9Common *);
+
+static INLINE void vp9_reset_sb_tokens_context(MACROBLOCKD* const xd,
+ BLOCK_SIZE_TYPE bsize) {
+ /* Clear entropy contexts */
+ const int bw = 1 << b_width_log2(bsize);
+ const int bh = 1 << b_height_log2(bsize);
+ int i;
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ vpx_memset(xd->plane[i].above_context, 0,
+ sizeof(ENTROPY_CONTEXT) * bw >> xd->plane[i].subsampling_x);
+ vpx_memset(xd->plane[i].left_context, 0,
+ sizeof(ENTROPY_CONTEXT) * bh >> xd->plane[i].subsampling_y);
+ }
+}
+
+// This is the index in the scan order beyond which all coefficients for
+// 8x8 transform and above are in the top band.
+// For 4x4 blocks the index is less but to keep things common the lookup
+// table for 4x4 is padded out to this index.
+#define MAXBAND_INDEX 21
+
+extern const uint8_t vp9_coefband_trans_8x8plus[MAXBAND_INDEX + 1];
+extern const uint8_t vp9_coefband_trans_4x4[MAXBAND_INDEX + 1];
+
+
+static int get_coef_band(const uint8_t * band_translate, int coef_index) {
+ return (coef_index > MAXBAND_INDEX)
+ ? (COEF_BANDS-1) : band_translate[coef_index];
+}
+
+extern int vp9_get_coef_context(const int *scan, const int *neighbors,
+ int nb_pad, uint8_t *token_cache, int c, int l);
+const int *vp9_get_coef_neighbors_handle(const int *scan, int *pad);
+
+
+// 128 lists of probabilities are stored for the following ONE node probs:
+// 1, 3, 5, 7, ..., 253, 255
+// In between probabilities are interpolated linearly
+
+#define COEFPROB_MODELS 128
+
+#define UNCONSTRAINED_NODES 3
+#define MODEL_NODES (ENTROPY_NODES - UNCONSTRAINED_NODES)
+
+#define PIVOT_NODE 2 // which node is pivot
+
+typedef vp9_prob vp9_coeff_probs_model[REF_TYPES][COEF_BANDS]
+ [PREV_COEF_CONTEXTS]
+ [UNCONSTRAINED_NODES];
+
+typedef unsigned int vp9_coeff_count_model[REF_TYPES][COEF_BANDS]
+ [PREV_COEF_CONTEXTS]
+ [UNCONSTRAINED_NODES + 1];
+typedef unsigned int vp9_coeff_stats_model[REF_TYPES][COEF_BANDS]
+ [PREV_COEF_CONTEXTS]
+ [UNCONSTRAINED_NODES][2];
+extern void vp9_full_to_model_count(unsigned int *model_count,
+ unsigned int *full_count);
+extern void vp9_full_to_model_counts(
+ vp9_coeff_count_model *model_count, vp9_coeff_count *full_count);
+
+void vp9_model_to_full_probs(const vp9_prob *model, vp9_prob *full);
+
+void vp9_model_to_full_probs_sb(
+ vp9_prob model[COEF_BANDS][PREV_COEF_CONTEXTS][UNCONSTRAINED_NODES],
+ vp9_prob full[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES]);
+
+extern const vp9_prob vp9_modelcoefprobs[COEFPROB_MODELS][ENTROPY_NODES - 1];
+
+static INLINE const int* get_scan_4x4(TX_TYPE tx_type) {
+ switch (tx_type) {
+ case ADST_DCT:
+ return vp9_row_scan_4x4;
+ case DCT_ADST:
+ return vp9_col_scan_4x4;
+ default:
+ return vp9_default_scan_4x4;
+ }
+}
+
+static INLINE const int* get_scan_8x8(TX_TYPE tx_type) {
+ switch (tx_type) {
+ case ADST_DCT:
+ return vp9_row_scan_8x8;
+ case DCT_ADST:
+ return vp9_col_scan_8x8;
+ default:
+ return vp9_default_scan_8x8;
+ }
+}
+
+static INLINE const int* get_scan_16x16(TX_TYPE tx_type) {
+ switch (tx_type) {
+ case ADST_DCT:
+ return vp9_row_scan_16x16;
+ case DCT_ADST:
+ return vp9_col_scan_16x16;
+ default:
+ return vp9_default_scan_16x16;
+ }
+}
+
+enum { VP9_COEF_UPDATE_PROB = 252 };
+
+#endif // VP9_COMMON_VP9_ENTROPY_H_
diff --git a/libvpx/vp9/common/vp9_entropymode.c b/libvpx/vp9/common/vp9_entropymode.c
new file mode 100644
index 0000000..3302814
--- /dev/null
+++ b/libvpx/vp9/common/vp9_entropymode.c
@@ -0,0 +1,535 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/common/vp9_modecont.h"
+#include "vp9/common/vp9_seg_common.h"
+#include "vp9/common/vp9_alloccommon.h"
+#include "vpx_mem/vpx_mem.h"
+
+static const vp9_prob default_kf_uv_probs[VP9_INTRA_MODES]
+ [VP9_INTRA_MODES - 1] = {
+ { 144, 11, 54, 157, 195, 130, 46, 58, 108 } /* y = dc */,
+ { 118, 15, 123, 148, 131, 101, 44, 93, 131 } /* y = v */,
+ { 113, 12, 23, 188, 226, 142, 26, 32, 125 } /* y = h */,
+ { 120, 11, 50, 123, 163, 135, 64, 77, 103 } /* y = d45 */,
+ { 113, 9, 36, 155, 111, 157, 32, 44, 161 } /* y = d135 */,
+ { 116, 9, 55, 176, 76, 96, 37, 61, 149 } /* y = d117 */,
+ { 115, 9, 28, 141, 161, 167, 21, 25, 193 } /* y = d153 */,
+ { 120, 12, 32, 145, 195, 142, 32, 38, 86 } /* y = d27 */,
+ { 116, 12, 64, 120, 140, 125, 49, 115, 121 } /* y = d63 */,
+ { 102, 19, 66, 162, 182, 122, 35, 59, 128 } /* y = tm */
+};
+
+static const vp9_prob default_if_y_probs[BLOCK_SIZE_GROUPS]
+ [VP9_INTRA_MODES - 1] = {
+ { 65, 32, 18, 144, 162, 194, 41, 51, 98 } /* block_size < 8x8 */,
+ { 132, 68, 18, 165, 217, 196, 45, 40, 78 } /* block_size < 16x16 */,
+ { 173, 80, 19, 176, 240, 193, 64, 35, 46 } /* block_size < 32x32 */,
+ { 221, 135, 38, 194, 248, 121, 96, 85, 29 } /* block_size >= 32x32 */
+};
+
+static const vp9_prob default_if_uv_probs[VP9_INTRA_MODES]
+ [VP9_INTRA_MODES - 1] = {
+ { 120, 7, 76, 176, 208, 126, 28, 54, 103 } /* y = dc */,
+ { 48, 12, 154, 155, 139, 90, 34, 117, 119 } /* y = v */,
+ { 67, 6, 25, 204, 243, 158, 13, 21, 96 } /* y = h */,
+ { 97, 5, 44, 131, 176, 139, 48, 68, 97 } /* y = d45 */,
+ { 83, 5, 42, 156, 111, 152, 26, 49, 152 } /* y = d135 */,
+ { 80, 5, 58, 178, 74, 83, 33, 62, 145 } /* y = d117 */,
+ { 86, 5, 32, 154, 192, 168, 14, 22, 163 } /* y = d153 */,
+ { 85, 5, 32, 156, 216, 148, 19, 29, 73 } /* y = d27 */,
+ { 77, 7, 64, 116, 132, 122, 37, 126, 120 } /* y = d63 */,
+ { 101, 21, 107, 181, 192, 103, 19, 67, 125 } /* y = tm */
+};
+
+const vp9_prob vp9_partition_probs[NUM_FRAME_TYPES][NUM_PARTITION_CONTEXTS]
+ [PARTITION_TYPES - 1] = {
+ { /* frame_type = keyframe */
+ /* 8x8 -> 4x4 */
+ { 158, 97, 94 } /* a/l both not split */,
+ { 93, 24, 99 } /* a split, l not split */,
+ { 85, 119, 44 } /* l split, a not split */,
+ { 62, 59, 67 } /* a/l both split */,
+ /* 16x16 -> 8x8 */
+ { 149, 53, 53 } /* a/l both not split */,
+ { 94, 20, 48 } /* a split, l not split */,
+ { 83, 53, 24 } /* l split, a not split */,
+ { 52, 18, 18 } /* a/l both split */,
+ /* 32x32 -> 16x16 */
+ { 150, 40, 39 } /* a/l both not split */,
+ { 78, 12, 26 } /* a split, l not split */,
+ { 67, 33, 11 } /* l split, a not split */,
+ { 24, 7, 5 } /* a/l both split */,
+ /* 64x64 -> 32x32 */
+ { 174, 35, 49 } /* a/l both not split */,
+ { 68, 11, 27 } /* a split, l not split */,
+ { 57, 15, 9 } /* l split, a not split */,
+ { 12, 3, 3 } /* a/l both split */
+ }, { /* frame_type = interframe */
+ /* 8x8 -> 4x4 */
+ { 199, 122, 141 } /* a/l both not split */,
+ { 147, 63, 159 } /* a split, l not split */,
+ { 148, 133, 118 } /* l split, a not split */,
+ { 121, 104, 114 } /* a/l both split */,
+ /* 16x16 -> 8x8 */
+ { 174, 73, 87 } /* a/l both not split */,
+ { 92, 41, 83 } /* a split, l not split */,
+ { 82, 99, 50 } /* l split, a not split */,
+ { 53, 39, 39 } /* a/l both split */,
+ /* 32x32 -> 16x16 */
+ { 177, 58, 59 } /* a/l both not split */,
+ { 68, 26, 63 } /* a split, l not split */,
+ { 52, 79, 25 } /* l split, a not split */,
+ { 17, 14, 12 } /* a/l both split */,
+ /* 64x64 -> 32x32 */
+ { 222, 34, 30 } /* a/l both not split */,
+ { 72, 16, 44 } /* a split, l not split */,
+ { 58, 32, 12 } /* l split, a not split */,
+ { 10, 7, 6 } /* a/l both split */
+ }
+};
+
+/* Array indices are identical to previously-existing INTRAMODECONTEXTNODES. */
+const vp9_tree_index vp9_intra_mode_tree[VP9_INTRA_MODES * 2 - 2] = {
+ -DC_PRED, 2, /* 0 = DC_NODE */
+ -TM_PRED, 4, /* 1 = TM_NODE */
+ -V_PRED, 6, /* 2 = V_NODE */
+ 8, 12, /* 3 = COM_NODE */
+ -H_PRED, 10, /* 4 = H_NODE */
+ -D135_PRED, -D117_PRED, /* 5 = D135_NODE */
+ -D45_PRED, 14, /* 6 = D45_NODE */
+ -D63_PRED, 16, /* 7 = D63_NODE */
+ -D153_PRED, -D27_PRED /* 8 = D153_NODE */
+};
+
+const vp9_tree_index vp9_sb_mv_ref_tree[6] = {
+ -ZEROMV, 2,
+ -NEARESTMV, 4,
+ -NEARMV, -NEWMV
+};
+
+const vp9_tree_index vp9_partition_tree[6] = {
+ -PARTITION_NONE, 2,
+ -PARTITION_HORZ, 4,
+ -PARTITION_VERT, -PARTITION_SPLIT
+};
+
+struct vp9_token vp9_intra_mode_encodings[VP9_INTRA_MODES];
+
+struct vp9_token vp9_sb_mv_ref_encoding_array[VP9_INTER_MODES];
+
+struct vp9_token vp9_partition_encodings[PARTITION_TYPES];
+
+static const vp9_prob default_intra_inter_p[INTRA_INTER_CONTEXTS] = {
+ 9, 102, 187, 225
+};
+
+static const vp9_prob default_comp_inter_p[COMP_INTER_CONTEXTS] = {
+ 239, 183, 119, 96, 41
+};
+
+static const vp9_prob default_comp_ref_p[REF_CONTEXTS] = {
+ 50, 126, 123, 221, 226
+};
+
+static const vp9_prob default_single_ref_p[REF_CONTEXTS][2] = {
+ { 33, 16 },
+ { 77, 74 },
+ { 142, 142 },
+ { 172, 170 },
+ { 238, 247 }
+};
+
+const vp9_prob vp9_default_tx_probs_32x32p[TX_SIZE_CONTEXTS]
+ [TX_SIZE_MAX_SB - 1] = {
+ { 3, 136, 37, },
+ { 5, 52, 13, },
+};
+const vp9_prob vp9_default_tx_probs_16x16p[TX_SIZE_CONTEXTS]
+ [TX_SIZE_MAX_SB - 2] = {
+ { 20, 152, },
+ { 15, 101, },
+};
+const vp9_prob vp9_default_tx_probs_8x8p[TX_SIZE_CONTEXTS]
+ [TX_SIZE_MAX_SB - 3] = {
+ { 100, },
+ { 66, },
+};
+
+void tx_counts_to_branch_counts_32x32(unsigned int *tx_count_32x32p,
+ unsigned int (*ct_32x32p)[2]) {
+ ct_32x32p[0][0] = tx_count_32x32p[TX_4X4];
+ ct_32x32p[0][1] = tx_count_32x32p[TX_8X8] +
+ tx_count_32x32p[TX_16X16] +
+ tx_count_32x32p[TX_32X32];
+ ct_32x32p[1][0] = tx_count_32x32p[TX_8X8];
+ ct_32x32p[1][1] = tx_count_32x32p[TX_16X16] +
+ tx_count_32x32p[TX_32X32];
+ ct_32x32p[2][0] = tx_count_32x32p[TX_16X16];
+ ct_32x32p[2][1] = tx_count_32x32p[TX_32X32];
+}
+
+void tx_counts_to_branch_counts_16x16(unsigned int *tx_count_16x16p,
+ unsigned int (*ct_16x16p)[2]) {
+ ct_16x16p[0][0] = tx_count_16x16p[TX_4X4];
+ ct_16x16p[0][1] = tx_count_16x16p[TX_8X8] +
+ tx_count_16x16p[TX_16X16];
+ ct_16x16p[1][0] = tx_count_16x16p[TX_8X8];
+ ct_16x16p[1][1] = tx_count_16x16p[TX_16X16];
+}
+
+void tx_counts_to_branch_counts_8x8(unsigned int *tx_count_8x8p,
+ unsigned int (*ct_8x8p)[2]) {
+ ct_8x8p[0][0] = tx_count_8x8p[TX_4X4];
+ ct_8x8p[0][1] = tx_count_8x8p[TX_8X8];
+}
+
+const vp9_prob vp9_default_mbskip_probs[MBSKIP_CONTEXTS] = {
+ 192, 128, 64
+};
+
+void vp9_init_mbmode_probs(VP9_COMMON *x) {
+ vpx_memcpy(x->fc.uv_mode_prob, default_if_uv_probs,
+ sizeof(default_if_uv_probs));
+ vpx_memcpy(x->kf_uv_mode_prob, default_kf_uv_probs,
+ sizeof(default_kf_uv_probs));
+ vpx_memcpy(x->fc.y_mode_prob, default_if_y_probs,
+ sizeof(default_if_y_probs));
+
+ vpx_memcpy(x->fc.switchable_interp_prob, vp9_switchable_interp_prob,
+ sizeof(vp9_switchable_interp_prob));
+
+ vpx_memcpy(x->fc.partition_prob, vp9_partition_probs,
+ sizeof(vp9_partition_probs));
+
+ vpx_memcpy(x->fc.intra_inter_prob, default_intra_inter_p,
+ sizeof(default_intra_inter_p));
+ vpx_memcpy(x->fc.comp_inter_prob, default_comp_inter_p,
+ sizeof(default_comp_inter_p));
+ vpx_memcpy(x->fc.comp_ref_prob, default_comp_ref_p,
+ sizeof(default_comp_ref_p));
+ vpx_memcpy(x->fc.single_ref_prob, default_single_ref_p,
+ sizeof(default_single_ref_p));
+ vpx_memcpy(x->fc.tx_probs_32x32p, vp9_default_tx_probs_32x32p,
+ sizeof(vp9_default_tx_probs_32x32p));
+ vpx_memcpy(x->fc.tx_probs_16x16p, vp9_default_tx_probs_16x16p,
+ sizeof(vp9_default_tx_probs_16x16p));
+ vpx_memcpy(x->fc.tx_probs_8x8p, vp9_default_tx_probs_8x8p,
+ sizeof(vp9_default_tx_probs_8x8p));
+ vpx_memcpy(x->fc.mbskip_probs, vp9_default_mbskip_probs,
+ sizeof(vp9_default_mbskip_probs));
+}
+
+const vp9_tree_index vp9_switchable_interp_tree[VP9_SWITCHABLE_FILTERS*2-2] = {
+ -0, 2,
+ -1, -2
+};
+struct vp9_token vp9_switchable_interp_encodings[VP9_SWITCHABLE_FILTERS];
+const INTERPOLATIONFILTERTYPE vp9_switchable_interp[VP9_SWITCHABLE_FILTERS] = {
+ EIGHTTAP, EIGHTTAP_SMOOTH, EIGHTTAP_SHARP};
+const int vp9_switchable_interp_map[SWITCHABLE+1] = {1, 0, 2, -1, -1};
+const vp9_prob vp9_switchable_interp_prob [VP9_SWITCHABLE_FILTERS+1]
+ [VP9_SWITCHABLE_FILTERS-1] = {
+ { 235, 162, },
+ { 36, 255, },
+ { 34, 3, },
+ { 149, 144, },
+};
+
+// Indicates if the filter is interpolating or non-interpolating
+const int vp9_is_interpolating_filter[SWITCHABLE + 1] = {1, 1, 1, 1, -1};
+
+void vp9_entropy_mode_init() {
+ vp9_tokens_from_tree(vp9_intra_mode_encodings, vp9_intra_mode_tree);
+ vp9_tokens_from_tree(vp9_switchable_interp_encodings,
+ vp9_switchable_interp_tree);
+ vp9_tokens_from_tree(vp9_partition_encodings, vp9_partition_tree);
+
+ vp9_tokens_from_tree_offset(vp9_sb_mv_ref_encoding_array,
+ vp9_sb_mv_ref_tree, NEARESTMV);
+}
+
+void vp9_init_mode_contexts(VP9_COMMON *pc) {
+ vpx_memset(pc->fc.inter_mode_counts, 0, sizeof(pc->fc.inter_mode_counts));
+ vpx_memcpy(pc->fc.inter_mode_probs,
+ vp9_default_inter_mode_probs,
+ sizeof(vp9_default_inter_mode_probs));
+}
+
+void vp9_accum_mv_refs(VP9_COMMON *pc,
+ MB_PREDICTION_MODE m,
+ const int context) {
+ unsigned int (*inter_mode_counts)[VP9_INTER_MODES - 1][2] =
+ pc->fc.inter_mode_counts;
+
+ if (m == ZEROMV) {
+ ++inter_mode_counts[context][0][0];
+ } else {
+ ++inter_mode_counts[context][0][1];
+ if (m == NEARESTMV) {
+ ++inter_mode_counts[context][1][0];
+ } else {
+ ++inter_mode_counts[context][1][1];
+ if (m == NEARMV) {
+ ++inter_mode_counts[context][2][0];
+ } else {
+ ++inter_mode_counts[context][2][1];
+ }
+ }
+ }
+}
+
+#define MVREF_COUNT_SAT 20
+#define MVREF_MAX_UPDATE_FACTOR 128
+void vp9_adapt_mode_context(VP9_COMMON *pc) {
+ int i, j;
+ unsigned int (*inter_mode_counts)[VP9_INTER_MODES - 1][2] =
+ pc->fc.inter_mode_counts;
+ vp9_prob (*mode_context)[VP9_INTER_MODES - 1] = pc->fc.inter_mode_probs;
+
+ for (j = 0; j < INTER_MODE_CONTEXTS; j++) {
+ for (i = 0; i < VP9_INTER_MODES - 1; i++) {
+ int count = inter_mode_counts[j][i][0] + inter_mode_counts[j][i][1];
+ int factor;
+ count = count > MVREF_COUNT_SAT ? MVREF_COUNT_SAT : count;
+ factor = (MVREF_MAX_UPDATE_FACTOR * count / MVREF_COUNT_SAT);
+ mode_context[j][i] = weighted_prob(
+ pc->fc.pre_inter_mode_probs[j][i],
+ get_binary_prob(inter_mode_counts[j][i][0],
+ inter_mode_counts[j][i][1]),
+ factor);
+ }
+ }
+}
+
+#define MODE_COUNT_SAT 20
+#define MODE_MAX_UPDATE_FACTOR 128
+static int update_mode_ct(vp9_prob pre_prob, vp9_prob prob,
+ unsigned int branch_ct[2]) {
+ int factor, count = branch_ct[0] + branch_ct[1];
+ count = count > MODE_COUNT_SAT ? MODE_COUNT_SAT : count;
+ factor = (MODE_MAX_UPDATE_FACTOR * count / MODE_COUNT_SAT);
+ return weighted_prob(pre_prob, prob, factor);
+}
+
+static void update_mode_probs(int n_modes,
+ const vp9_tree_index *tree, unsigned int *cnt,
+ vp9_prob *pre_probs, vp9_prob *dst_probs,
+ unsigned int tok0_offset) {
+#define MAX_PROBS 32
+ vp9_prob probs[MAX_PROBS];
+ unsigned int branch_ct[MAX_PROBS][2];
+ int t;
+
+ assert(n_modes - 1 < MAX_PROBS);
+ vp9_tree_probs_from_distribution(tree, probs, branch_ct, cnt, tok0_offset);
+ for (t = 0; t < n_modes - 1; ++t)
+ dst_probs[t] = update_mode_ct(pre_probs[t], probs[t], branch_ct[t]);
+}
+
+static int update_mode_ct2(vp9_prob pre_prob, unsigned int branch_ct[2]) {
+ return update_mode_ct(pre_prob, get_binary_prob(branch_ct[0],
+ branch_ct[1]), branch_ct);
+}
+
+// #define MODE_COUNT_TESTING
+void vp9_adapt_mode_probs(VP9_COMMON *cm) {
+ int i, j;
+ FRAME_CONTEXT *fc = &cm->fc;
+#ifdef MODE_COUNT_TESTING
+ int t;
+
+ printf("static const unsigned int\nymode_counts"
+ "[VP9_INTRA_MODES] = {\n");
+ for (t = 0; t < VP9_INTRA_MODES; ++t)
+ printf("%d, ", fc->ymode_counts[t]);
+ printf("};\n");
+ printf("static const unsigned int\nuv_mode_counts"
+ "[VP9_INTRA_MODES] [VP9_INTRA_MODES] = {\n");
+ for (i = 0; i < VP9_INTRA_MODES; ++i) {
+ printf(" {");
+ for (t = 0; t < VP9_INTRA_MODES; ++t)
+ printf("%d, ", fc->uv_mode_counts[i][t]);
+ printf("},\n");
+ }
+ printf("};\n");
+ printf("static const unsigned int\nbmode_counts"
+ "[VP9_NKF_BINTRAMODES] = {\n");
+ for (t = 0; t < VP9_NKF_BINTRAMODES; ++t)
+ printf("%d, ", fc->bmode_counts[t]);
+ printf("};\n");
+ printf("static const unsigned int\ni8x8_mode_counts"
+ "[VP9_I8X8_MODES] = {\n");
+ for (t = 0; t < VP9_I8X8_MODES; ++t)
+ printf("%d, ", fc->i8x8_mode_counts[t]);
+ printf("};\n");
+ printf("static const unsigned int\nmbsplit_counts"
+ "[VP9_NUMMBSPLITS] = {\n");
+ for (t = 0; t < VP9_NUMMBSPLITS; ++t)
+ printf("%d, ", fc->mbsplit_counts[t]);
+ printf("};\n");
+#endif
+
+ for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
+ fc->intra_inter_prob[i] = update_mode_ct2(fc->pre_intra_inter_prob[i],
+ fc->intra_inter_count[i]);
+ for (i = 0; i < COMP_INTER_CONTEXTS; i++)
+ fc->comp_inter_prob[i] = update_mode_ct2(fc->pre_comp_inter_prob[i],
+ fc->comp_inter_count[i]);
+ for (i = 0; i < REF_CONTEXTS; i++)
+ fc->comp_ref_prob[i] = update_mode_ct2(fc->pre_comp_ref_prob[i],
+ fc->comp_ref_count[i]);
+ for (i = 0; i < REF_CONTEXTS; i++)
+ for (j = 0; j < 2; j++)
+ fc->single_ref_prob[i][j] = update_mode_ct2(fc->pre_single_ref_prob[i][j],
+ fc->single_ref_count[i][j]);
+
+ for (i = 0; i < BLOCK_SIZE_GROUPS; i++)
+ update_mode_probs(VP9_INTRA_MODES, vp9_intra_mode_tree,
+ fc->y_mode_counts[i], fc->pre_y_mode_prob[i],
+ fc->y_mode_prob[i], 0);
+
+ for (i = 0; i < VP9_INTRA_MODES; ++i)
+ update_mode_probs(VP9_INTRA_MODES, vp9_intra_mode_tree,
+ fc->uv_mode_counts[i], fc->pre_uv_mode_prob[i],
+ fc->uv_mode_prob[i], 0);
+
+ for (i = 0; i < NUM_PARTITION_CONTEXTS; i++)
+ update_mode_probs(PARTITION_TYPES, vp9_partition_tree,
+ fc->partition_counts[i], fc->pre_partition_prob[i],
+ fc->partition_prob[INTER_FRAME][i], 0);
+
+ if (cm->mcomp_filter_type == SWITCHABLE) {
+ for (i = 0; i <= VP9_SWITCHABLE_FILTERS; i++) {
+ update_mode_probs(VP9_SWITCHABLE_FILTERS, vp9_switchable_interp_tree,
+ fc->switchable_interp_count[i],
+ fc->pre_switchable_interp_prob[i],
+ fc->switchable_interp_prob[i], 0);
+ }
+ }
+ if (cm->txfm_mode == TX_MODE_SELECT) {
+ int j;
+ unsigned int branch_ct_8x8p[TX_SIZE_MAX_SB - 3][2];
+ unsigned int branch_ct_16x16p[TX_SIZE_MAX_SB - 2][2];
+ unsigned int branch_ct_32x32p[TX_SIZE_MAX_SB - 1][2];
+ for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
+ tx_counts_to_branch_counts_8x8(cm->fc.tx_count_8x8p[i],
+ branch_ct_8x8p);
+ for (j = 0; j < TX_SIZE_MAX_SB - 3; ++j) {
+ int factor;
+ int count = branch_ct_8x8p[j][0] + branch_ct_8x8p[j][1];
+ vp9_prob prob = get_binary_prob(branch_ct_8x8p[j][0],
+ branch_ct_8x8p[j][1]);
+ count = count > MODE_COUNT_SAT ? MODE_COUNT_SAT : count;
+ factor = (MODE_MAX_UPDATE_FACTOR * count / MODE_COUNT_SAT);
+ cm->fc.tx_probs_8x8p[i][j] = weighted_prob(
+ cm->fc.pre_tx_probs_8x8p[i][j], prob, factor);
+ }
+ }
+ for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
+ tx_counts_to_branch_counts_16x16(cm->fc.tx_count_16x16p[i],
+ branch_ct_16x16p);
+ for (j = 0; j < TX_SIZE_MAX_SB - 2; ++j) {
+ int factor;
+ int count = branch_ct_16x16p[j][0] + branch_ct_16x16p[j][1];
+ vp9_prob prob = get_binary_prob(branch_ct_16x16p[j][0],
+ branch_ct_16x16p[j][1]);
+ count = count > MODE_COUNT_SAT ? MODE_COUNT_SAT : count;
+ factor = (MODE_MAX_UPDATE_FACTOR * count / MODE_COUNT_SAT);
+ cm->fc.tx_probs_16x16p[i][j] = weighted_prob(
+ cm->fc.pre_tx_probs_16x16p[i][j], prob, factor);
+ }
+ }
+ for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
+ tx_counts_to_branch_counts_32x32(cm->fc.tx_count_32x32p[i],
+ branch_ct_32x32p);
+ for (j = 0; j < TX_SIZE_MAX_SB - 1; ++j) {
+ int factor;
+ int count = branch_ct_32x32p[j][0] + branch_ct_32x32p[j][1];
+ vp9_prob prob = get_binary_prob(branch_ct_32x32p[j][0],
+ branch_ct_32x32p[j][1]);
+ count = count > MODE_COUNT_SAT ? MODE_COUNT_SAT : count;
+ factor = (MODE_MAX_UPDATE_FACTOR * count / MODE_COUNT_SAT);
+ cm->fc.tx_probs_32x32p[i][j] = weighted_prob(
+ cm->fc.pre_tx_probs_32x32p[i][j], prob, factor);
+ }
+ }
+ }
+ for (i = 0; i < MBSKIP_CONTEXTS; ++i)
+ fc->mbskip_probs[i] = update_mode_ct2(fc->pre_mbskip_probs[i],
+ fc->mbskip_count[i]);
+}
+
+static void set_default_lf_deltas(MACROBLOCKD *xd) {
+ xd->mode_ref_lf_delta_enabled = 1;
+ xd->mode_ref_lf_delta_update = 1;
+
+ xd->ref_lf_deltas[INTRA_FRAME] = 1;
+ xd->ref_lf_deltas[LAST_FRAME] = 0;
+ xd->ref_lf_deltas[GOLDEN_FRAME] = -1;
+ xd->ref_lf_deltas[ALTREF_FRAME] = -1;
+
+ xd->mode_lf_deltas[0] = 0; // Zero
+ xd->mode_lf_deltas[1] = 0; // New mv
+}
+
+void vp9_setup_past_independence(VP9_COMMON *cm, MACROBLOCKD *xd) {
+ // Reset the segment feature data to the default stats:
+ // Features disabled, 0, with delta coding (Default state).
+ int i;
+ vp9_clearall_segfeatures(xd);
+ xd->mb_segment_abs_delta = SEGMENT_DELTADATA;
+ if (cm->last_frame_seg_map)
+ vpx_memset(cm->last_frame_seg_map, 0, (cm->mi_rows * cm->mi_cols));
+
+ // Reset the mode ref deltas for loop filter
+ vpx_memset(xd->last_ref_lf_deltas, 0, sizeof(xd->last_ref_lf_deltas));
+ vpx_memset(xd->last_mode_lf_deltas, 0, sizeof(xd->last_mode_lf_deltas));
+ set_default_lf_deltas(xd);
+
+ vp9_default_coef_probs(cm);
+ vp9_init_mbmode_probs(cm);
+ vpx_memcpy(cm->kf_y_mode_prob, vp9_kf_default_bmode_probs,
+ sizeof(vp9_kf_default_bmode_probs));
+ vp9_init_mv_probs(cm);
+
+ // To force update of the sharpness
+ cm->last_sharpness_level = -1;
+
+ vp9_init_mode_contexts(cm);
+
+ if ((cm->frame_type == KEY_FRAME) ||
+ cm->error_resilient_mode || (cm->reset_frame_context == 3)) {
+ // Reset all frame contexts.
+ for (i = 0; i < NUM_FRAME_CONTEXTS; ++i)
+ vpx_memcpy(&cm->frame_contexts[i], &cm->fc, sizeof(cm->fc));
+ } else if (cm->reset_frame_context == 2) {
+ // Reset only the frame context specified in the frame header.
+ vpx_memcpy(&cm->frame_contexts[cm->frame_context_idx], &cm->fc,
+ sizeof(cm->fc));
+ }
+
+ vpx_memset(cm->prev_mip, 0,
+ cm->mode_info_stride * (cm->mi_rows + 1) * sizeof(MODE_INFO));
+ vpx_memset(cm->mip, 0,
+ cm->mode_info_stride * (cm->mi_rows + 1) * sizeof(MODE_INFO));
+
+ vp9_update_mode_info_border(cm, cm->mip);
+ vp9_update_mode_info_in_image(cm, cm->mi);
+
+ vp9_update_mode_info_border(cm, cm->prev_mip);
+ vp9_update_mode_info_in_image(cm, cm->prev_mi);
+
+ vpx_memset(cm->ref_frame_sign_bias, 0, sizeof(cm->ref_frame_sign_bias));
+
+ cm->frame_context_idx = 0;
+}
diff --git a/libvpx/vp9/common/vp9_entropymode.h b/libvpx/vp9/common/vp9_entropymode.h
new file mode 100644
index 0000000..aa8aec7
--- /dev/null
+++ b/libvpx/vp9/common/vp9_entropymode.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_ENTROPYMODE_H_
+#define VP9_COMMON_VP9_ENTROPYMODE_H_
+
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_treecoder.h"
+
+#define SUBMVREF_COUNT 5
+#define TX_SIZE_CONTEXTS 2
+
+#define VP9_MODE_UPDATE_PROB 252
+
+// #define MODE_STATS
+
+extern int vp9_mv_cont(const int_mv *l, const int_mv *a);
+
+
+extern const vp9_prob vp9_kf_default_bmode_probs[VP9_INTRA_MODES]
+ [VP9_INTRA_MODES]
+ [VP9_INTRA_MODES - 1];
+
+extern const vp9_tree_index vp9_intra_mode_tree[];
+extern const vp9_tree_index vp9_sb_mv_ref_tree[];
+
+extern struct vp9_token vp9_intra_mode_encodings[VP9_INTRA_MODES];
+
+/* Inter mode values do not start at zero */
+
+extern struct vp9_token vp9_sb_mv_ref_encoding_array[VP9_INTER_MODES];
+
+// probability models for partition information
+extern const vp9_tree_index vp9_partition_tree[];
+extern struct vp9_token vp9_partition_encodings[PARTITION_TYPES];
+extern const vp9_prob vp9_partition_probs[NUM_FRAME_TYPES]
+ [NUM_PARTITION_CONTEXTS]
+ [PARTITION_TYPES - 1];
+
+void vp9_entropy_mode_init(void);
+
+struct VP9Common;
+
+/* sets up common features to forget past dependence */
+void vp9_setup_past_independence(struct VP9Common *cm, MACROBLOCKD *xd);
+
+void vp9_init_mbmode_probs(struct VP9Common *x);
+
+extern void vp9_init_mode_contexts(struct VP9Common *pc);
+
+extern void vp9_adapt_mode_context(struct VP9Common *pc);
+
+extern void vp9_accum_mv_refs(struct VP9Common *pc,
+ MB_PREDICTION_MODE m,
+ const int context);
+
+void vp9_adapt_mode_probs(struct VP9Common *);
+
+#define VP9_SWITCHABLE_FILTERS 3 /* number of switchable filters */
+
+extern const INTERPOLATIONFILTERTYPE vp9_switchable_interp
+ [VP9_SWITCHABLE_FILTERS];
+
+extern const int vp9_switchable_interp_map[SWITCHABLE + 1];
+
+extern const int vp9_is_interpolating_filter[SWITCHABLE + 1];
+
+extern const vp9_tree_index vp9_switchable_interp_tree
+ [2 * (VP9_SWITCHABLE_FILTERS - 1)];
+
+extern struct vp9_token vp9_switchable_interp_encodings[VP9_SWITCHABLE_FILTERS];
+
+extern const vp9_prob vp9_switchable_interp_prob[VP9_SWITCHABLE_FILTERS + 1]
+ [VP9_SWITCHABLE_FILTERS - 1];
+
+extern const vp9_prob vp9_default_tx_probs_32x32p[TX_SIZE_CONTEXTS]
+ [TX_SIZE_MAX_SB - 1];
+extern const vp9_prob vp9_default_tx_probs_16x16p[TX_SIZE_CONTEXTS]
+ [TX_SIZE_MAX_SB - 2];
+extern const vp9_prob vp9_default_tx_probs_8x8p[TX_SIZE_CONTEXTS]
+ [TX_SIZE_MAX_SB - 3];
+
+extern void tx_counts_to_branch_counts_32x32(unsigned int *tx_count_32x32p,
+ unsigned int (*ct_32x32p)[2]);
+extern void tx_counts_to_branch_counts_16x16(unsigned int *tx_count_16x16p,
+ unsigned int (*ct_16x16p)[2]);
+extern void tx_counts_to_branch_counts_8x8(unsigned int *tx_count_8x8p,
+ unsigned int (*ct_8x8p)[2]);
+#endif // VP9_COMMON_VP9_ENTROPYMODE_H_
diff --git a/libvpx/vp9/common/vp9_entropymv.c b/libvpx/vp9/common/vp9_entropymv.c
new file mode 100644
index 0000000..e07e43c
--- /dev/null
+++ b/libvpx/vp9/common/vp9_entropymv.c
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/common/vp9_entropymv.h"
+
+//#define MV_COUNT_TESTING
+
+#define MV_COUNT_SAT 20
+#define MV_MAX_UPDATE_FACTOR 128
+
+/* Integer pel reference mv threshold for use of high-precision 1/8 mv */
+#define COMPANDED_MVREF_THRESH 8
+
+/* Smooth or bias the mv-counts before prob computation */
+/* #define SMOOTH_MV_COUNTS */
+
+const vp9_tree_index vp9_mv_joint_tree[2 * MV_JOINTS - 2] = {
+ -MV_JOINT_ZERO, 2,
+ -MV_JOINT_HNZVZ, 4,
+ -MV_JOINT_HZVNZ, -MV_JOINT_HNZVNZ
+};
+struct vp9_token vp9_mv_joint_encodings[MV_JOINTS];
+
+const vp9_tree_index vp9_mv_class_tree[2 * MV_CLASSES - 2] = {
+ -MV_CLASS_0, 2,
+ -MV_CLASS_1, 4,
+ 6, 8,
+ -MV_CLASS_2, -MV_CLASS_3,
+ 10, 12,
+ -MV_CLASS_4, -MV_CLASS_5,
+ -MV_CLASS_6, 14,
+ 16, 18,
+ -MV_CLASS_7, -MV_CLASS_8,
+ -MV_CLASS_9, -MV_CLASS_10,
+};
+struct vp9_token vp9_mv_class_encodings[MV_CLASSES];
+
+const vp9_tree_index vp9_mv_class0_tree [2 * CLASS0_SIZE - 2] = {
+ -0, -1,
+};
+struct vp9_token vp9_mv_class0_encodings[CLASS0_SIZE];
+
+const vp9_tree_index vp9_mv_fp_tree [2 * 4 - 2] = {
+ -0, 2,
+ -1, 4,
+ -2, -3
+};
+struct vp9_token vp9_mv_fp_encodings[4];
+
+const nmv_context vp9_default_nmv_context = {
+ {32, 64, 96},
+ {
+ { /* vert component */
+ 128, /* sign */
+ {224, 144, 192, 168, 192, 176, 192, 198, 198, 245}, /* class */
+ {216}, /* class0 */
+ {136, 140, 148, 160, 176, 192, 224, 234, 234, 240}, /* bits */
+ {{128, 128, 64}, {96, 112, 64}}, /* class0_fp */
+ {64, 96, 64}, /* fp */
+ 160, /* class0_hp bit */
+ 128, /* hp */
+ },
+ { /* hor component */
+ 128, /* sign */
+ {216, 128, 176, 160, 176, 176, 192, 198, 198, 208}, /* class */
+ {208}, /* class0 */
+ {136, 140, 148, 160, 176, 192, 224, 234, 234, 240}, /* bits */
+ {{128, 128, 64}, {96, 112, 64}}, /* class0_fp */
+ {64, 96, 64}, /* fp */
+ 160, /* class0_hp bit */
+ 128, /* hp */
+ }
+ },
+};
+
+MV_JOINT_TYPE vp9_get_mv_joint(const MV *mv) {
+ if (mv->row == 0 && mv->col == 0)
+ return MV_JOINT_ZERO;
+ else if (mv->row == 0 && mv->col != 0)
+ return MV_JOINT_HNZVZ;
+ else if (mv->row != 0 && mv->col == 0)
+ return MV_JOINT_HZVNZ;
+ else
+ return MV_JOINT_HNZVNZ;
+}
+
+#define mv_class_base(c) ((c) ? (CLASS0_SIZE << (c + 2)) : 0)
+
+MV_CLASS_TYPE vp9_get_mv_class(int z, int *offset) {
+ MV_CLASS_TYPE c;
+ if (z < CLASS0_SIZE * 8) c = MV_CLASS_0;
+ else if (z < CLASS0_SIZE * 16) c = MV_CLASS_1;
+ else if (z < CLASS0_SIZE * 32) c = MV_CLASS_2;
+ else if (z < CLASS0_SIZE * 64) c = MV_CLASS_3;
+ else if (z < CLASS0_SIZE * 128) c = MV_CLASS_4;
+ else if (z < CLASS0_SIZE * 256) c = MV_CLASS_5;
+ else if (z < CLASS0_SIZE * 512) c = MV_CLASS_6;
+ else if (z < CLASS0_SIZE * 1024) c = MV_CLASS_7;
+ else if (z < CLASS0_SIZE * 2048) c = MV_CLASS_8;
+ else if (z < CLASS0_SIZE * 4096) c = MV_CLASS_9;
+ else if (z < CLASS0_SIZE * 8192) c = MV_CLASS_10;
+ else assert(0);
+ if (offset)
+ *offset = z - mv_class_base(c);
+ return c;
+}
+
+int vp9_use_nmv_hp(const MV *ref) {
+ return (abs(ref->row) >> 3) < COMPANDED_MVREF_THRESH &&
+ (abs(ref->col) >> 3) < COMPANDED_MVREF_THRESH;
+}
+
+int vp9_get_mv_mag(MV_CLASS_TYPE c, int offset) {
+ return mv_class_base(c) + offset;
+}
+
+static void increment_nmv_component_count(int v,
+ nmv_component_counts *mvcomp,
+ int incr,
+ int usehp) {
+ assert (v != 0); /* should not be zero */
+ mvcomp->mvcount[MV_MAX + v] += incr;
+}
+
+static void increment_nmv_component(int v,
+ nmv_component_counts *mvcomp,
+ int incr,
+ int usehp) {
+ int s, z, c, o, d, e, f;
+ if (!incr)
+ return;
+ assert (v != 0); /* should not be zero */
+ s = v < 0;
+ mvcomp->sign[s] += incr;
+ z = (s ? -v : v) - 1; /* magnitude - 1 */
+
+ c = vp9_get_mv_class(z, &o);
+ mvcomp->classes[c] += incr;
+
+ d = (o >> 3); /* int mv data */
+ f = (o >> 1) & 3; /* fractional pel mv data */
+ e = (o & 1); /* high precision mv data */
+ if (c == MV_CLASS_0) {
+ mvcomp->class0[d] += incr;
+ } else {
+ int i;
+ int b = c + CLASS0_BITS - 1; // number of bits
+ for (i = 0; i < b; ++i)
+ mvcomp->bits[i][((d >> i) & 1)] += incr;
+ }
+
+ /* Code the fractional pel bits */
+ if (c == MV_CLASS_0) {
+ mvcomp->class0_fp[d][f] += incr;
+ } else {
+ mvcomp->fp[f] += incr;
+ }
+
+ /* Code the high precision bit */
+ if (usehp) {
+ if (c == MV_CLASS_0) {
+ mvcomp->class0_hp[e] += incr;
+ } else {
+ mvcomp->hp[e] += incr;
+ }
+ }
+}
+
+#ifdef SMOOTH_MV_COUNTS
+static void smooth_counts(nmv_component_counts *mvcomp) {
+ static const int flen = 3; // (filter_length + 1) / 2
+ static const int fval[] = {8, 3, 1};
+ static const int fvalbits = 4;
+ int i;
+ unsigned int smvcount[MV_VALS];
+ vpx_memcpy(smvcount, mvcomp->mvcount, sizeof(smvcount));
+ smvcount[MV_MAX] = (smvcount[MV_MAX - 1] + smvcount[MV_MAX + 1]) >> 1;
+ for (i = flen - 1; i <= MV_VALS - flen; ++i) {
+ int j, s = smvcount[i] * fval[0];
+ for (j = 1; j < flen; ++j)
+ s += (smvcount[i - j] + smvcount[i + j]) * fval[j];
+ mvcomp->mvcount[i] = (s + (1 << (fvalbits - 1))) >> fvalbits;
+ }
+}
+#endif
+
+static void counts_to_context(nmv_component_counts *mvcomp, int usehp) {
+ int v;
+ vpx_memset(mvcomp->sign, 0, sizeof(nmv_component_counts) - sizeof(mvcomp->mvcount));
+ for (v = 1; v <= MV_MAX; v++) {
+ increment_nmv_component(-v, mvcomp, mvcomp->mvcount[MV_MAX - v], usehp);
+ increment_nmv_component( v, mvcomp, mvcomp->mvcount[MV_MAX + v], usehp);
+ }
+}
+
+void vp9_increment_nmv(const MV *mv, const MV *ref, nmv_context_counts *mvctx,
+ int usehp) {
+ const MV_JOINT_TYPE j = vp9_get_mv_joint(mv);
+ mvctx->joints[j]++;
+ usehp = usehp && vp9_use_nmv_hp(ref);
+ if (mv_joint_vertical(j))
+ increment_nmv_component_count(mv->row, &mvctx->comps[0], 1, usehp);
+
+ if (mv_joint_horizontal(j))
+ increment_nmv_component_count(mv->col, &mvctx->comps[1], 1, usehp);
+}
+
+static void adapt_prob(vp9_prob *dest, vp9_prob prep, unsigned int ct[2]) {
+ const int count = MIN(ct[0] + ct[1], MV_COUNT_SAT);
+ if (count) {
+ const vp9_prob newp = get_binary_prob(ct[0], ct[1]);
+ const int factor = MV_MAX_UPDATE_FACTOR * count / MV_COUNT_SAT;
+ *dest = weighted_prob(prep, newp, factor);
+ } else {
+ *dest = prep;
+ }
+}
+
+void vp9_counts_process(nmv_context_counts *nmv_count, int usehp) {
+ counts_to_context(&nmv_count->comps[0], usehp);
+ counts_to_context(&nmv_count->comps[1], usehp);
+}
+
+void vp9_counts_to_nmv_context(
+ nmv_context_counts *nmv_count,
+ nmv_context *prob,
+ int usehp,
+ unsigned int (*branch_ct_joint)[2],
+ unsigned int (*branch_ct_sign)[2],
+ unsigned int (*branch_ct_classes)[MV_CLASSES - 1][2],
+ unsigned int (*branch_ct_class0)[CLASS0_SIZE - 1][2],
+ unsigned int (*branch_ct_bits)[MV_OFFSET_BITS][2],
+ unsigned int (*branch_ct_class0_fp)[CLASS0_SIZE][4 - 1][2],
+ unsigned int (*branch_ct_fp)[4 - 1][2],
+ unsigned int (*branch_ct_class0_hp)[2],
+ unsigned int (*branch_ct_hp)[2]) {
+ int i, j, k;
+ vp9_counts_process(nmv_count, usehp);
+ vp9_tree_probs_from_distribution(vp9_mv_joint_tree,
+ prob->joints,
+ branch_ct_joint,
+ nmv_count->joints, 0);
+ for (i = 0; i < 2; ++i) {
+ const uint32_t s0 = nmv_count->comps[i].sign[0];
+ const uint32_t s1 = nmv_count->comps[i].sign[1];
+
+ prob->comps[i].sign = get_binary_prob(s0, s1);
+ branch_ct_sign[i][0] = s0;
+ branch_ct_sign[i][1] = s1;
+ vp9_tree_probs_from_distribution(vp9_mv_class_tree,
+ prob->comps[i].classes,
+ branch_ct_classes[i],
+ nmv_count->comps[i].classes, 0);
+ vp9_tree_probs_from_distribution(vp9_mv_class0_tree,
+ prob->comps[i].class0,
+ branch_ct_class0[i],
+ nmv_count->comps[i].class0, 0);
+ for (j = 0; j < MV_OFFSET_BITS; ++j) {
+ const uint32_t b0 = nmv_count->comps[i].bits[j][0];
+ const uint32_t b1 = nmv_count->comps[i].bits[j][1];
+
+ prob->comps[i].bits[j] = get_binary_prob(b0, b1);
+ branch_ct_bits[i][j][0] = b0;
+ branch_ct_bits[i][j][1] = b1;
+ }
+ }
+ for (i = 0; i < 2; ++i) {
+ for (k = 0; k < CLASS0_SIZE; ++k) {
+ vp9_tree_probs_from_distribution(vp9_mv_fp_tree,
+ prob->comps[i].class0_fp[k],
+ branch_ct_class0_fp[i][k],
+ nmv_count->comps[i].class0_fp[k], 0);
+ }
+ vp9_tree_probs_from_distribution(vp9_mv_fp_tree,
+ prob->comps[i].fp,
+ branch_ct_fp[i],
+ nmv_count->comps[i].fp, 0);
+ }
+ if (usehp) {
+ for (i = 0; i < 2; ++i) {
+ const uint32_t c0_hp0 = nmv_count->comps[i].class0_hp[0];
+ const uint32_t c0_hp1 = nmv_count->comps[i].class0_hp[1];
+ const uint32_t hp0 = nmv_count->comps[i].hp[0];
+ const uint32_t hp1 = nmv_count->comps[i].hp[1];
+
+ prob->comps[i].class0_hp = get_binary_prob(c0_hp0, c0_hp1);
+ branch_ct_class0_hp[i][0] = c0_hp0;
+ branch_ct_class0_hp[i][1] = c0_hp1;
+
+ prob->comps[i].hp = get_binary_prob(hp0, hp1);
+ branch_ct_hp[i][0] = hp0;
+ branch_ct_hp[i][1] = hp1;
+ }
+ }
+}
+
+static unsigned int adapt_probs(unsigned int i,
+ vp9_tree tree,
+ vp9_prob this_probs[],
+ const vp9_prob last_probs[],
+ const unsigned int num_events[]) {
+ vp9_prob this_prob;
+
+ const uint32_t left = tree[i] <= 0
+ ? num_events[-tree[i]]
+ : adapt_probs(tree[i], tree, this_probs, last_probs, num_events);
+
+ const uint32_t right = tree[i + 1] <= 0
+ ? num_events[-tree[i + 1]]
+ : adapt_probs(tree[i + 1], tree, this_probs, last_probs, num_events);
+
+ uint32_t weight = left + right;
+ if (weight) {
+ this_prob = get_binary_prob(left, right);
+ weight = weight > MV_COUNT_SAT ? MV_COUNT_SAT : weight;
+ this_prob = weighted_prob(last_probs[i >> 1], this_prob,
+ MV_MAX_UPDATE_FACTOR * weight / MV_COUNT_SAT);
+ } else {
+ this_prob = last_probs[i >> 1];
+ }
+ this_probs[i >> 1] = this_prob;
+ return left + right;
+}
+
+
+void vp9_adapt_nmv_probs(VP9_COMMON *cm, int usehp) {
+ int i, j;
+#ifdef MV_COUNT_TESTING
+ printf("joints count: ");
+ for (j = 0; j < MV_JOINTS; ++j) printf("%d ", cm->fc.NMVcount.joints[j]);
+ printf("\n"); fflush(stdout);
+ printf("signs count:\n");
+ for (i = 0; i < 2; ++i)
+ printf("%d/%d ", cm->fc.NMVcount.comps[i].sign[0], cm->fc.NMVcount.comps[i].sign[1]);
+ printf("\n"); fflush(stdout);
+ printf("classes count:\n");
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < MV_CLASSES; ++j)
+ printf("%d ", cm->fc.NMVcount.comps[i].classes[j]);
+ printf("\n"); fflush(stdout);
+ }
+ printf("class0 count:\n");
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < CLASS0_SIZE; ++j)
+ printf("%d ", cm->fc.NMVcount.comps[i].class0[j]);
+ printf("\n"); fflush(stdout);
+ }
+ printf("bits count:\n");
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < MV_OFFSET_BITS; ++j)
+ printf("%d/%d ", cm->fc.NMVcount.comps[i].bits[j][0],
+ cm->fc.NMVcount.comps[i].bits[j][1]);
+ printf("\n"); fflush(stdout);
+ }
+ printf("class0_fp count:\n");
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < CLASS0_SIZE; ++j) {
+ printf("{");
+ for (k = 0; k < 4; ++k)
+ printf("%d ", cm->fc.NMVcount.comps[i].class0_fp[j][k]);
+ printf("}, ");
+ }
+ printf("\n"); fflush(stdout);
+ }
+ printf("fp count:\n");
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < 4; ++j)
+ printf("%d ", cm->fc.NMVcount.comps[i].fp[j]);
+ printf("\n"); fflush(stdout);
+ }
+ if (usehp) {
+ printf("class0_hp count:\n");
+ for (i = 0; i < 2; ++i)
+ printf("%d/%d ", cm->fc.NMVcount.comps[i].class0_hp[0],
+ cm->fc.NMVcount.comps[i].class0_hp[1]);
+ printf("\n"); fflush(stdout);
+ printf("hp count:\n");
+ for (i = 0; i < 2; ++i)
+ printf("%d/%d ", cm->fc.NMVcount.comps[i].hp[0],
+ cm->fc.NMVcount.comps[i].hp[1]);
+ printf("\n"); fflush(stdout);
+ }
+#endif
+#ifdef SMOOTH_MV_COUNTS
+ smooth_counts(&cm->fc.NMVcount.comps[0]);
+ smooth_counts(&cm->fc.NMVcount.comps[1]);
+#endif
+ vp9_counts_process(&cm->fc.NMVcount, usehp);
+
+ adapt_probs(0, vp9_mv_joint_tree,
+ cm->fc.nmvc.joints, cm->fc.pre_nmvc.joints,
+ cm->fc.NMVcount.joints);
+
+ for (i = 0; i < 2; ++i) {
+ adapt_prob(&cm->fc.nmvc.comps[i].sign,
+ cm->fc.pre_nmvc.comps[i].sign,
+ cm->fc.NMVcount.comps[i].sign);
+ adapt_probs(0, vp9_mv_class_tree,
+ cm->fc.nmvc.comps[i].classes, cm->fc.pre_nmvc.comps[i].classes,
+ cm->fc.NMVcount.comps[i].classes);
+ adapt_probs(0, vp9_mv_class0_tree,
+ cm->fc.nmvc.comps[i].class0, cm->fc.pre_nmvc.comps[i].class0,
+ cm->fc.NMVcount.comps[i].class0);
+ for (j = 0; j < MV_OFFSET_BITS; ++j) {
+ adapt_prob(&cm->fc.nmvc.comps[i].bits[j],
+ cm->fc.pre_nmvc.comps[i].bits[j],
+ cm->fc.NMVcount.comps[i].bits[j]);
+ }
+ }
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < CLASS0_SIZE; ++j) {
+ adapt_probs(0, vp9_mv_fp_tree,
+ cm->fc.nmvc.comps[i].class0_fp[j],
+ cm->fc.pre_nmvc.comps[i].class0_fp[j],
+ cm->fc.NMVcount.comps[i].class0_fp[j]);
+ }
+ adapt_probs(0, vp9_mv_fp_tree,
+ cm->fc.nmvc.comps[i].fp,
+ cm->fc.pre_nmvc.comps[i].fp,
+ cm->fc.NMVcount.comps[i].fp);
+ }
+ if (usehp) {
+ for (i = 0; i < 2; ++i) {
+ adapt_prob(&cm->fc.nmvc.comps[i].class0_hp,
+ cm->fc.pre_nmvc.comps[i].class0_hp,
+ cm->fc.NMVcount.comps[i].class0_hp);
+ adapt_prob(&cm->fc.nmvc.comps[i].hp,
+ cm->fc.pre_nmvc.comps[i].hp,
+ cm->fc.NMVcount.comps[i].hp);
+ }
+ }
+}
+
+void vp9_entropy_mv_init() {
+ vp9_tokens_from_tree(vp9_mv_joint_encodings, vp9_mv_joint_tree);
+ vp9_tokens_from_tree(vp9_mv_class_encodings, vp9_mv_class_tree);
+ vp9_tokens_from_tree(vp9_mv_class0_encodings, vp9_mv_class0_tree);
+ vp9_tokens_from_tree(vp9_mv_fp_encodings, vp9_mv_fp_tree);
+}
+
+void vp9_init_mv_probs(VP9_COMMON *cm) {
+ vpx_memcpy(&cm->fc.nmvc, &vp9_default_nmv_context, sizeof(nmv_context));
+}
diff --git a/libvpx/vp9/common/vp9_entropymv.h b/libvpx/vp9/common/vp9_entropymv.h
new file mode 100644
index 0000000..15994a6
--- /dev/null
+++ b/libvpx/vp9/common/vp9_entropymv.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_COMMON_VP9_ENTROPYMV_H_
+#define VP9_COMMON_VP9_ENTROPYMV_H_
+
+#include "vp9/common/vp9_treecoder.h"
+#include "vpx_config.h"
+#include "vp9/common/vp9_blockd.h"
+
+struct VP9Common;
+
+void vp9_entropy_mv_init();
+void vp9_init_mv_probs(struct VP9Common *cm);
+
+void vp9_adapt_nmv_probs(struct VP9Common *cm, int usehp);
+int vp9_use_nmv_hp(const MV *ref);
+
+#define VP9_NMV_UPDATE_PROB 252
+
+//#define MV_GROUP_UPDATE
+
+#define LOW_PRECISION_MV_UPDATE /* Use 7 bit forward update */
+
+/* Symbols for coding which components are zero jointly */
+#define MV_JOINTS 4
+typedef enum {
+ MV_JOINT_ZERO = 0, /* Zero vector */
+ MV_JOINT_HNZVZ = 1, /* Vert zero, hor nonzero */
+ MV_JOINT_HZVNZ = 2, /* Hor zero, vert nonzero */
+ MV_JOINT_HNZVNZ = 3, /* Both components nonzero */
+} MV_JOINT_TYPE;
+
+static INLINE int mv_joint_vertical(MV_JOINT_TYPE type) {
+ return type == MV_JOINT_HZVNZ || type == MV_JOINT_HNZVNZ;
+}
+
+static INLINE int mv_joint_horizontal(MV_JOINT_TYPE type) {
+ return type == MV_JOINT_HNZVZ || type == MV_JOINT_HNZVNZ;
+}
+
+extern const vp9_tree_index vp9_mv_joint_tree[2 * MV_JOINTS - 2];
+extern struct vp9_token vp9_mv_joint_encodings[MV_JOINTS];
+
+/* Symbols for coding magnitude class of nonzero components */
+#define MV_CLASSES 11
+typedef enum {
+ MV_CLASS_0 = 0, /* (0, 2] integer pel */
+ MV_CLASS_1 = 1, /* (2, 4] integer pel */
+ MV_CLASS_2 = 2, /* (4, 8] integer pel */
+ MV_CLASS_3 = 3, /* (8, 16] integer pel */
+ MV_CLASS_4 = 4, /* (16, 32] integer pel */
+ MV_CLASS_5 = 5, /* (32, 64] integer pel */
+ MV_CLASS_6 = 6, /* (64, 128] integer pel */
+ MV_CLASS_7 = 7, /* (128, 256] integer pel */
+ MV_CLASS_8 = 8, /* (256, 512] integer pel */
+ MV_CLASS_9 = 9, /* (512, 1024] integer pel */
+ MV_CLASS_10 = 10, /* (1024,2048] integer pel */
+} MV_CLASS_TYPE;
+
+extern const vp9_tree_index vp9_mv_class_tree[2 * MV_CLASSES - 2];
+extern struct vp9_token vp9_mv_class_encodings[MV_CLASSES];
+
+#define CLASS0_BITS 1 /* bits at integer precision for class 0 */
+#define CLASS0_SIZE (1 << CLASS0_BITS)
+#define MV_OFFSET_BITS (MV_CLASSES + CLASS0_BITS - 2)
+
+#define MV_MAX_BITS (MV_CLASSES + CLASS0_BITS + 2)
+#define MV_MAX ((1 << MV_MAX_BITS) - 1)
+#define MV_VALS ((MV_MAX << 1) + 1)
+
+extern const vp9_tree_index vp9_mv_class0_tree[2 * CLASS0_SIZE - 2];
+extern struct vp9_token vp9_mv_class0_encodings[CLASS0_SIZE];
+
+extern const vp9_tree_index vp9_mv_fp_tree[2 * 4 - 2];
+extern struct vp9_token vp9_mv_fp_encodings[4];
+
+typedef struct {
+ vp9_prob sign;
+ vp9_prob classes[MV_CLASSES - 1];
+ vp9_prob class0[CLASS0_SIZE - 1];
+ vp9_prob bits[MV_OFFSET_BITS];
+ vp9_prob class0_fp[CLASS0_SIZE][4 - 1];
+ vp9_prob fp[4 - 1];
+ vp9_prob class0_hp;
+ vp9_prob hp;
+} nmv_component;
+
+typedef struct {
+ vp9_prob joints[MV_JOINTS - 1];
+ nmv_component comps[2];
+} nmv_context;
+
+MV_JOINT_TYPE vp9_get_mv_joint(const MV *mv);
+MV_CLASS_TYPE vp9_get_mv_class(int z, int *offset);
+int vp9_get_mv_mag(MV_CLASS_TYPE c, int offset);
+
+
+typedef struct {
+ unsigned int mvcount[MV_VALS];
+ unsigned int sign[2];
+ unsigned int classes[MV_CLASSES];
+ unsigned int class0[CLASS0_SIZE];
+ unsigned int bits[MV_OFFSET_BITS][2];
+ unsigned int class0_fp[CLASS0_SIZE][4];
+ unsigned int fp[4];
+ unsigned int class0_hp[2];
+ unsigned int hp[2];
+} nmv_component_counts;
+
+typedef struct {
+ unsigned int joints[MV_JOINTS];
+ nmv_component_counts comps[2];
+} nmv_context_counts;
+
+void vp9_increment_nmv(const MV *mv, const MV *ref, nmv_context_counts *mvctx,
+ int usehp);
+extern const nmv_context vp9_default_nmv_context;
+void vp9_counts_to_nmv_context(
+ nmv_context_counts *NMVcount,
+ nmv_context *prob,
+ int usehp,
+ unsigned int (*branch_ct_joint)[2],
+ unsigned int (*branch_ct_sign)[2],
+ unsigned int (*branch_ct_classes)[MV_CLASSES - 1][2],
+ unsigned int (*branch_ct_class0)[CLASS0_SIZE - 1][2],
+ unsigned int (*branch_ct_bits)[MV_OFFSET_BITS][2],
+ unsigned int (*branch_ct_class0_fp)[CLASS0_SIZE][4 - 1][2],
+ unsigned int (*branch_ct_fp)[4 - 1][2],
+ unsigned int (*branch_ct_class0_hp)[2],
+ unsigned int (*branch_ct_hp)[2]);
+void vp9_counts_process(nmv_context_counts *NMVcount, int usehp);
+
+#endif // VP9_COMMON_VP9_ENTROPYMV_H_
diff --git a/libvpx/vp9/common/vp9_enums.h b/libvpx/vp9/common/vp9_enums.h
new file mode 100644
index 0000000..e18d353
--- /dev/null
+++ b/libvpx/vp9/common/vp9_enums.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_ENUMS_H_
+#define VP9_COMMON_VP9_ENUMS_H_
+
+#include "./vpx_config.h"
+
+#define LOG2_MI_SIZE 3
+
+#define MI_SIZE (1 << LOG2_MI_SIZE)
+#define MI_MASK ((64 >> LOG2_MI_SIZE) - 1)
+
+typedef enum BLOCK_SIZE_TYPE {
+ BLOCK_SIZE_AB4X4,
+ BLOCK_SIZE_SB4X8,
+ BLOCK_SIZE_SB8X4,
+ BLOCK_SIZE_SB8X8,
+ BLOCK_SIZE_SB8X16,
+ BLOCK_SIZE_SB16X8,
+ BLOCK_SIZE_MB16X16,
+ BLOCK_SIZE_SB16X32,
+ BLOCK_SIZE_SB32X16,
+ BLOCK_SIZE_SB32X32,
+ BLOCK_SIZE_SB32X64,
+ BLOCK_SIZE_SB64X32,
+ BLOCK_SIZE_SB64X64,
+ BLOCK_SIZE_TYPES
+} BLOCK_SIZE_TYPE;
+
+typedef enum PARTITION_TYPE {
+ PARTITION_NONE,
+ PARTITION_HORZ,
+ PARTITION_VERT,
+ PARTITION_SPLIT,
+ PARTITION_TYPES
+} PARTITION_TYPE;
+
+#define PARTITION_PLOFFSET 4 // number of probability models per block size
+#define NUM_PARTITION_CONTEXTS (4 * PARTITION_PLOFFSET)
+
+#endif // VP9_COMMON_VP9_ENUMS_H_
diff --git a/libvpx/vp9/common/vp9_extend.c b/libvpx/vp9/common/vp9_extend.c
new file mode 100644
index 0000000..95ec590
--- /dev/null
+++ b/libvpx/vp9/common/vp9_extend.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp9/common/vp9_extend.h"
+#include "vpx_mem/vpx_mem.h"
+
+static void copy_and_extend_plane(const uint8_t *src, int src_pitch,
+ uint8_t *dst, int dst_pitch,
+ int w, int h,
+ int extend_top, int extend_left,
+ int extend_bottom, int extend_right) {
+ int i, linesize;
+
+ // copy the left and right most columns out
+ const uint8_t *src_ptr1 = src;
+ const uint8_t *src_ptr2 = src + w - 1;
+ uint8_t *dst_ptr1 = dst - extend_left;
+ uint8_t *dst_ptr2 = dst + w;
+
+ for (i = 0; i < h; i++) {
+ vpx_memset(dst_ptr1, src_ptr1[0], extend_left);
+ vpx_memcpy(dst_ptr1 + extend_left, src_ptr1, w);
+ vpx_memset(dst_ptr2, src_ptr2[0], extend_right);
+ src_ptr1 += src_pitch;
+ src_ptr2 += src_pitch;
+ dst_ptr1 += dst_pitch;
+ dst_ptr2 += dst_pitch;
+ }
+
+ // Now copy the top and bottom lines into each line of the respective
+ // borders
+ src_ptr1 = dst - extend_left;
+ src_ptr2 = dst + dst_pitch * (h - 1) - extend_left;
+ dst_ptr1 = dst + dst_pitch * (-extend_top) - extend_left;
+ dst_ptr2 = dst + dst_pitch * (h) - extend_left;
+ linesize = extend_left + extend_right + w;
+
+ for (i = 0; i < extend_top; i++) {
+ vpx_memcpy(dst_ptr1, src_ptr1, linesize);
+ dst_ptr1 += dst_pitch;
+ }
+
+ for (i = 0; i < extend_bottom; i++) {
+ vpx_memcpy(dst_ptr2, src_ptr2, linesize);
+ dst_ptr2 += dst_pitch;
+ }
+}
+
+void vp9_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
+ YV12_BUFFER_CONFIG *dst) {
+ const int et_y = dst->border;
+ const int el_y = dst->border;
+ const int eb_y = dst->border + dst->y_height - src->y_height;
+ const int er_y = dst->border + dst->y_width - src->y_width;
+
+ const int et_uv = dst->border >> (dst->uv_height != dst->y_height);
+ const int el_uv = dst->border >> (dst->uv_width != dst->y_width);
+ const int eb_uv = et_uv + dst->uv_height - src->uv_height;
+ const int er_uv = el_uv + dst->uv_width - src->uv_width;
+
+#if CONFIG_ALPHA
+ const int et_a = dst->border >> (dst->alpha_height != dst->y_height);
+ const int el_a = dst->border >> (dst->alpha_width != dst->y_width);
+ const int eb_a = et_a + dst->alpha_height - src->alpha_height;
+ const int er_a = el_a + dst->alpha_width - src->alpha_width;
+
+ copy_and_extend_plane(src->alpha_buffer, src->alpha_stride,
+ dst->alpha_buffer, dst->alpha_stride,
+ src->alpha_width, src->alpha_height,
+ et_a, el_a, eb_a, er_a);
+#endif
+
+ copy_and_extend_plane(src->y_buffer, src->y_stride,
+ dst->y_buffer, dst->y_stride,
+ src->y_width, src->y_height,
+ et_y, el_y, eb_y, er_y);
+
+ copy_and_extend_plane(src->u_buffer, src->uv_stride,
+ dst->u_buffer, dst->uv_stride,
+ src->uv_width, src->uv_height,
+ et_uv, el_uv, eb_uv, er_uv);
+
+ copy_and_extend_plane(src->v_buffer, src->uv_stride,
+ dst->v_buffer, dst->uv_stride,
+ src->uv_width, src->uv_height,
+ et_uv, el_uv, eb_uv, er_uv);
+}
+
+void vp9_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
+ YV12_BUFFER_CONFIG *dst,
+ int srcy, int srcx,
+ int srch, int srcw) {
+ // If the side is not touching the bounder then don't extend.
+ const int et_y = srcy ? 0 : dst->border;
+ const int el_y = srcx ? 0 : dst->border;
+ const int eb_y = srcy + srch != src->y_height ? 0 :
+ dst->border + dst->y_height - src->y_height;
+ const int er_y = srcx + srcw != src->y_width ? 0 :
+ dst->border + dst->y_width - src->y_width;
+ const int src_y_offset = srcy * src->y_stride + srcx;
+ const int dst_y_offset = srcy * dst->y_stride + srcx;
+
+ const int et_uv = (et_y + 1) >> 1;
+ const int el_uv = (el_y + 1) >> 1;
+ const int eb_uv = (eb_y + 1) >> 1;
+ const int er_uv = (er_y + 1) >> 1;
+ const int src_uv_offset = ((srcy * src->uv_stride) >> 1) + (srcx >> 1);
+ const int dst_uv_offset = ((srcy * dst->uv_stride) >> 1) + (srcx >> 1);
+ const int srch_uv = (srch + 1) >> 1;
+ const int srcw_uv = (srcw + 1) >> 1;
+
+ copy_and_extend_plane(src->y_buffer + src_y_offset, src->y_stride,
+ dst->y_buffer + dst_y_offset, dst->y_stride,
+ srcw, srch,
+ et_y, el_y, eb_y, er_y);
+
+ copy_and_extend_plane(src->u_buffer + src_uv_offset, src->uv_stride,
+ dst->u_buffer + dst_uv_offset, dst->uv_stride,
+ srcw_uv, srch_uv,
+ et_uv, el_uv, eb_uv, er_uv);
+
+ copy_and_extend_plane(src->v_buffer + src_uv_offset, src->uv_stride,
+ dst->v_buffer + dst_uv_offset, dst->uv_stride,
+ srcw_uv, srch_uv,
+ et_uv, el_uv, eb_uv, er_uv);
+}
diff --git a/libvpx/vp9/common/vp9_extend.h b/libvpx/vp9/common/vp9_extend.h
new file mode 100644
index 0000000..7ff79b7
--- /dev/null
+++ b/libvpx/vp9/common/vp9_extend.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_EXTEND_H_
+#define VP9_COMMON_VP9_EXTEND_H_
+
+#include "vpx_scale/yv12config.h"
+#include "vpx/vpx_integer.h"
+
+
+void vp9_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
+ YV12_BUFFER_CONFIG *dst);
+
+void vp9_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
+ YV12_BUFFER_CONFIG *dst,
+ int srcy, int srcx,
+ int srch, int srcw);
+#endif // VP9_COMMON_VP9_EXTEND_H_
diff --git a/libvpx/vp9/common/vp9_filter.c b/libvpx/vp9/common/vp9_filter.c
new file mode 100644
index 0000000..e5503cd
--- /dev/null
+++ b/libvpx/vp9/common/vp9_filter.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include <stdlib.h>
+#include "vp9/common/vp9_filter.h"
+#include "vpx_ports/mem.h"
+#include "vp9_rtcd.h"
+#include "vp9/common/vp9_common.h"
+
+DECLARE_ALIGNED(256, const int16_t, vp9_bilinear_filters[SUBPEL_SHIFTS][8]) = {
+ { 0, 0, 0, 128, 0, 0, 0, 0 },
+ { 0, 0, 0, 120, 8, 0, 0, 0 },
+ { 0, 0, 0, 112, 16, 0, 0, 0 },
+ { 0, 0, 0, 104, 24, 0, 0, 0 },
+ { 0, 0, 0, 96, 32, 0, 0, 0 },
+ { 0, 0, 0, 88, 40, 0, 0, 0 },
+ { 0, 0, 0, 80, 48, 0, 0, 0 },
+ { 0, 0, 0, 72, 56, 0, 0, 0 },
+ { 0, 0, 0, 64, 64, 0, 0, 0 },
+ { 0, 0, 0, 56, 72, 0, 0, 0 },
+ { 0, 0, 0, 48, 80, 0, 0, 0 },
+ { 0, 0, 0, 40, 88, 0, 0, 0 },
+ { 0, 0, 0, 32, 96, 0, 0, 0 },
+ { 0, 0, 0, 24, 104, 0, 0, 0 },
+ { 0, 0, 0, 16, 112, 0, 0, 0 },
+ { 0, 0, 0, 8, 120, 0, 0, 0 }
+};
+
+DECLARE_ALIGNED(256, const int16_t, vp9_sub_pel_filters_8[SUBPEL_SHIFTS][8]) = {
+ /* Lagrangian interpolation filter */
+ { 0, 0, 0, 128, 0, 0, 0, 0},
+ { 0, 1, -5, 126, 8, -3, 1, 0},
+ { -1, 3, -10, 122, 18, -6, 2, 0},
+ { -1, 4, -13, 118, 27, -9, 3, -1},
+ { -1, 4, -16, 112, 37, -11, 4, -1},
+ { -1, 5, -18, 105, 48, -14, 4, -1},
+ { -1, 5, -19, 97, 58, -16, 5, -1},
+ { -1, 6, -19, 88, 68, -18, 5, -1},
+ { -1, 6, -19, 78, 78, -19, 6, -1},
+ { -1, 5, -18, 68, 88, -19, 6, -1},
+ { -1, 5, -16, 58, 97, -19, 5, -1},
+ { -1, 4, -14, 48, 105, -18, 5, -1},
+ { -1, 4, -11, 37, 112, -16, 4, -1},
+ { -1, 3, -9, 27, 118, -13, 4, -1},
+ { 0, 2, -6, 18, 122, -10, 3, -1},
+ { 0, 1, -3, 8, 126, -5, 1, 0}
+};
+
+DECLARE_ALIGNED(256, const int16_t, vp9_sub_pel_filters_8s[SUBPEL_SHIFTS][8])
+ = {
+ /* dct based filter */
+ {0, 0, 0, 128, 0, 0, 0, 0},
+ {-1, 3, -7, 127, 8, -3, 1, 0},
+ {-2, 5, -13, 125, 17, -6, 3, -1},
+ {-3, 7, -17, 121, 27, -10, 5, -2},
+ {-4, 9, -20, 115, 37, -13, 6, -2},
+ {-4, 10, -23, 108, 48, -16, 8, -3},
+ {-4, 10, -24, 100, 59, -19, 9, -3},
+ {-4, 11, -24, 90, 70, -21, 10, -4},
+ {-4, 11, -23, 80, 80, -23, 11, -4},
+ {-4, 10, -21, 70, 90, -24, 11, -4},
+ {-3, 9, -19, 59, 100, -24, 10, -4},
+ {-3, 8, -16, 48, 108, -23, 10, -4},
+ {-2, 6, -13, 37, 115, -20, 9, -4},
+ {-2, 5, -10, 27, 121, -17, 7, -3},
+ {-1, 3, -6, 17, 125, -13, 5, -2},
+ {0, 1, -3, 8, 127, -7, 3, -1}
+};
+
+DECLARE_ALIGNED(256, const int16_t,
+ vp9_sub_pel_filters_8lp[SUBPEL_SHIFTS][8]) = {
+ /* freqmultiplier = 0.5 */
+ { 0, 0, 0, 128, 0, 0, 0, 0},
+ {-3, -1, 32, 64, 38, 1, -3, 0},
+ {-2, -2, 29, 63, 41, 2, -3, 0},
+ {-2, -2, 26, 63, 43, 4, -4, 0},
+ {-2, -3, 24, 62, 46, 5, -4, 0},
+ {-2, -3, 21, 60, 49, 7, -4, 0},
+ {-1, -4, 18, 59, 51, 9, -4, 0},
+ {-1, -4, 16, 57, 53, 12, -4, -1},
+ {-1, -4, 14, 55, 55, 14, -4, -1},
+ {-1, -4, 12, 53, 57, 16, -4, -1},
+ { 0, -4, 9, 51, 59, 18, -4, -1},
+ { 0, -4, 7, 49, 60, 21, -3, -2},
+ { 0, -4, 5, 46, 62, 24, -3, -2},
+ { 0, -4, 4, 43, 63, 26, -2, -2},
+ { 0, -3, 2, 41, 63, 29, -2, -2},
+ { 0, -3, 1, 38, 64, 32, -1, -3}
+};
diff --git a/libvpx/vp9/common/vp9_filter.h b/libvpx/vp9/common/vp9_filter.h
new file mode 100644
index 0000000..1ccfdaa
--- /dev/null
+++ b/libvpx/vp9/common/vp9_filter.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_FILTER_H_
+#define VP9_COMMON_VP9_FILTER_H_
+
+#include "vpx_config.h"
+#include "vpx_scale/yv12config.h"
+#include "vpx/vpx_integer.h"
+
+#define BLOCK_HEIGHT_WIDTH 4
+#define VP9_FILTER_WEIGHT 128
+#define VP9_FILTER_SHIFT 7
+
+#define SUBPEL_SHIFTS 16
+
+extern const int16_t vp9_bilinear_filters[SUBPEL_SHIFTS][8];
+extern const int16_t vp9_sub_pel_filters_6[SUBPEL_SHIFTS][8];
+extern const int16_t vp9_sub_pel_filters_8[SUBPEL_SHIFTS][8];
+extern const int16_t vp9_sub_pel_filters_8s[SUBPEL_SHIFTS][8];
+extern const int16_t vp9_sub_pel_filters_8lp[SUBPEL_SHIFTS][8];
+
+// The VP9_BILINEAR_FILTERS_2TAP macro returns a pointer to the bilinear
+// filter kernel as a 2 tap filter.
+#define BF_LENGTH (sizeof(vp9_bilinear_filters[0]) / \
+ sizeof(vp9_bilinear_filters[0][0]))
+#define BF_OFFSET (BF_LENGTH / 2 - 1)
+#define VP9_BILINEAR_FILTERS_2TAP(x) (vp9_bilinear_filters[x] + BF_OFFSET)
+
+#endif // VP9_COMMON_VP9_FILTER_H_
diff --git a/libvpx/vp9/common/vp9_findnearmv.c b/libvpx/vp9/common/vp9_findnearmv.c
new file mode 100644
index 0000000..a692271
--- /dev/null
+++ b/libvpx/vp9/common/vp9_findnearmv.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits.h>
+
+#include "vp9/common/vp9_findnearmv.h"
+#include "vp9/common/vp9_mvref_common.h"
+#include "vp9/common/vp9_sadmxn.h"
+
+static void lower_mv_precision(int_mv *mv, int usehp) {
+ if (!usehp || !vp9_use_nmv_hp(&mv->as_mv)) {
+ if (mv->as_mv.row & 1)
+ mv->as_mv.row += (mv->as_mv.row > 0 ? -1 : 1);
+ if (mv->as_mv.col & 1)
+ mv->as_mv.col += (mv->as_mv.col > 0 ? -1 : 1);
+ }
+}
+
+
+void vp9_find_best_ref_mvs(MACROBLOCKD *xd,
+ int_mv *mvlist,
+ int_mv *nearest,
+ int_mv *near) {
+ int i;
+ // Make sure all the candidates are properly clamped etc
+ for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i) {
+ lower_mv_precision(&mvlist[i], xd->allow_high_precision_mv);
+ clamp_mv2(&mvlist[i], xd);
+ }
+ *nearest = mvlist[0];
+ *near = mvlist[1];
+}
+
+void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd,
+ int_mv *dst_nearest,
+ int_mv *dst_near,
+ int block_idx, int ref_idx) {
+ int_mv dst_list[MAX_MV_REF_CANDIDATES];
+ int_mv mv_list[MAX_MV_REF_CANDIDATES];
+ MODE_INFO *mi = xd->mode_info_context;
+ MB_MODE_INFO *const mbmi = &mi->mbmi;
+
+ assert(ref_idx == 0 || ref_idx == 1);
+ assert(MAX_MV_REF_CANDIDATES == 2); // makes code here slightly easier
+
+ vp9_find_mv_refs_idx(cm, xd, xd->mode_info_context,
+ xd->prev_mode_info_context,
+ mbmi->ref_frame[ref_idx],
+ mv_list, cm->ref_frame_sign_bias, block_idx);
+
+ dst_list[1].as_int = 0;
+ if (block_idx == 0) {
+ memcpy(dst_list, mv_list, MAX_MV_REF_CANDIDATES * sizeof(int_mv));
+ } else if (block_idx == 1 || block_idx == 2) {
+ int dst = 0, n;
+ union b_mode_info *bmi = mi->bmi;
+
+ dst_list[dst++].as_int = bmi[0].as_mv[ref_idx].as_int;
+ for (n = 0; dst < MAX_MV_REF_CANDIDATES &&
+ n < MAX_MV_REF_CANDIDATES; n++)
+ if (mv_list[n].as_int != dst_list[0].as_int)
+ dst_list[dst++].as_int = mv_list[n].as_int;
+ } else {
+ int dst = 0, n;
+ union b_mode_info *bmi = mi->bmi;
+
+ assert(block_idx == 3);
+ dst_list[dst++].as_int = bmi[2].as_mv[ref_idx].as_int;
+ if (dst_list[0].as_int != bmi[1].as_mv[ref_idx].as_int)
+ dst_list[dst++].as_int = bmi[1].as_mv[ref_idx].as_int;
+ if (dst < MAX_MV_REF_CANDIDATES &&
+ dst_list[0].as_int != bmi[0].as_mv[ref_idx].as_int)
+ dst_list[dst++].as_int = bmi[0].as_mv[ref_idx].as_int;
+ for (n = 0; dst < MAX_MV_REF_CANDIDATES &&
+ n < MAX_MV_REF_CANDIDATES; n++)
+ if (mv_list[n].as_int != dst_list[0].as_int)
+ dst_list[dst++].as_int = mv_list[n].as_int;
+ }
+
+ dst_nearest->as_int = dst_list[0].as_int;
+ dst_near->as_int = dst_list[1].as_int;
+}
diff --git a/libvpx/vp9/common/vp9_findnearmv.h b/libvpx/vp9/common/vp9_findnearmv.h
new file mode 100644
index 0000000..d4ae210
--- /dev/null
+++ b/libvpx/vp9/common/vp9_findnearmv.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_COMMON_VP9_FINDNEARMV_H_
+#define VP9_COMMON_VP9_FINDNEARMV_H_
+
+#include "vp9/common/vp9_mv.h"
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_treecoder.h"
+#include "vp9/common/vp9_onyxc_int.h"
+
+#define LEFT_TOP_MARGIN ((VP9BORDERINPIXELS - VP9_INTERP_EXTEND) << 3)
+#define RIGHT_BOTTOM_MARGIN ((VP9BORDERINPIXELS - VP9_INTERP_EXTEND) << 3)
+
+// check a list of motion vectors by sad score using a number rows of pixels
+// above and a number cols of pixels in the left to select the one with best
+// score to use as ref motion vector
+void vp9_find_best_ref_mvs(MACROBLOCKD *xd,
+ int_mv *mvlist,
+ int_mv *nearest,
+ int_mv *near);
+
+static void mv_bias(int refmb_ref_frame_sign_bias, int refframe,
+ int_mv *mvp, const int *ref_frame_sign_bias) {
+ MV xmv = mvp->as_mv;
+
+ if (refmb_ref_frame_sign_bias != ref_frame_sign_bias[refframe]) {
+ xmv.row *= -1;
+ xmv.col *= -1;
+ }
+
+ mvp->as_mv = xmv;
+}
+
+// TODO(jingning): this mv clamping function should be block size dependent.
+static void clamp_mv(int_mv *mv,
+ int mb_to_left_edge,
+ int mb_to_right_edge,
+ int mb_to_top_edge,
+ int mb_to_bottom_edge) {
+ mv->as_mv.col = clamp(mv->as_mv.col, mb_to_left_edge, mb_to_right_edge);
+ mv->as_mv.row = clamp(mv->as_mv.row, mb_to_top_edge, mb_to_bottom_edge);
+}
+
+static int clamp_mv2(int_mv *mv, const MACROBLOCKD *xd) {
+ int_mv tmp_mv;
+ tmp_mv.as_int = mv->as_int;
+ clamp_mv(mv,
+ xd->mb_to_left_edge - LEFT_TOP_MARGIN,
+ xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN,
+ xd->mb_to_top_edge - LEFT_TOP_MARGIN,
+ xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN);
+ return tmp_mv.as_int != mv->as_int;
+}
+
+static int check_mv_bounds(int_mv *mv,
+ int mb_to_left_edge, int mb_to_right_edge,
+ int mb_to_top_edge, int mb_to_bottom_edge) {
+ return mv->as_mv.col < mb_to_left_edge ||
+ mv->as_mv.col > mb_to_right_edge ||
+ mv->as_mv.row < mb_to_top_edge ||
+ mv->as_mv.row > mb_to_bottom_edge;
+}
+
+void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *pc,
+ MACROBLOCKD *xd,
+ int_mv *dst_nearest,
+ int_mv *dst_near,
+ int block_idx, int ref_idx);
+
+static MB_PREDICTION_MODE left_block_mode(const MODE_INFO *cur_mb, int b) {
+ // FIXME(rbultje, jingning): temporary hack because jenkins doesn't
+ // understand this condition. This will go away soon.
+ if (b == 0 || b == 2) {
+ /* On L edge, get from MB to left of us */
+ --cur_mb;
+
+ if (cur_mb->mbmi.ref_frame[0] != INTRA_FRAME) {
+ return DC_PRED;
+ } else if (cur_mb->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
+ return ((cur_mb->bmi + 1 + b)->as_mode.first);
+ } else {
+ return cur_mb->mbmi.mode;
+ }
+ }
+ assert(b == 1 || b == 3);
+ return (cur_mb->bmi + b - 1)->as_mode.first;
+}
+
+static MB_PREDICTION_MODE above_block_mode(const MODE_INFO *cur_mb,
+ int b, int mi_stride) {
+ if (!(b >> 1)) {
+ /* On top edge, get from MB above us */
+ cur_mb -= mi_stride;
+
+ if (cur_mb->mbmi.ref_frame[0] != INTRA_FRAME) {
+ return DC_PRED;
+ } else if (cur_mb->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
+ return ((cur_mb->bmi + 2 + b)->as_mode.first);
+ } else {
+ return cur_mb->mbmi.mode;
+ }
+ }
+
+ return (cur_mb->bmi + b - 2)->as_mode.first;
+}
+
+#endif // VP9_COMMON_VP9_FINDNEARMV_H_
diff --git a/libvpx/vp9/common/vp9_idct.c b/libvpx/vp9/common/vp9_idct.c
new file mode 100644
index 0000000..dcc7f03
--- /dev/null
+++ b/libvpx/vp9/common/vp9_idct.c
@@ -0,0 +1,1310 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <math.h>
+
+#include "./vpx_config.h"
+#include "./vp9_rtcd.h"
+#include "vp9/common/vp9_systemdependent.h"
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_idct.h"
+
+void vp9_short_iwalsh4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride) {
+/* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
+ 0.5 shifts per pixel. */
+ int i;
+ int16_t output[16];
+ int a1, b1, c1, d1, e1;
+ int16_t *ip = input;
+ int16_t *op = output;
+
+ for (i = 0; i < 4; i++) {
+ a1 = ip[0] >> WHT_UPSCALE_FACTOR;
+ c1 = ip[1] >> WHT_UPSCALE_FACTOR;
+ d1 = ip[2] >> WHT_UPSCALE_FACTOR;
+ b1 = ip[3] >> WHT_UPSCALE_FACTOR;
+ a1 += c1;
+ d1 -= b1;
+ e1 = (a1 - d1) >> 1;
+ b1 = e1 - b1;
+ c1 = e1 - c1;
+ a1 -= b1;
+ d1 += c1;
+ op[0] = a1;
+ op[1] = b1;
+ op[2] = c1;
+ op[3] = d1;
+ ip += 4;
+ op += 4;
+ }
+
+ ip = output;
+ for (i = 0; i < 4; i++) {
+ a1 = ip[4 * 0];
+ c1 = ip[4 * 1];
+ d1 = ip[4 * 2];
+ b1 = ip[4 * 3];
+ a1 += c1;
+ d1 -= b1;
+ e1 = (a1 - d1) >> 1;
+ b1 = e1 - b1;
+ c1 = e1 - c1;
+ a1 -= b1;
+ d1 += c1;
+ dest[dest_stride * 0] = clip_pixel(dest[dest_stride * 0] + a1);
+ dest[dest_stride * 1] = clip_pixel(dest[dest_stride * 1] + b1);
+ dest[dest_stride * 2] = clip_pixel(dest[dest_stride * 2] + c1);
+ dest[dest_stride * 3] = clip_pixel(dest[dest_stride * 3] + d1);
+
+ ip++;
+ dest++;
+ }
+}
+
+void vp9_short_iwalsh4x4_1_add_c(int16_t *in, uint8_t *dest, int dest_stride) {
+ int i;
+ int a1, e1;
+ int16_t tmp[4];
+ int16_t *ip = in;
+ int16_t *op = tmp;
+
+ a1 = ip[0] >> WHT_UPSCALE_FACTOR;
+ e1 = a1 >> 1;
+ a1 -= e1;
+ op[0] = a1;
+ op[1] = op[2] = op[3] = e1;
+
+ ip = tmp;
+ for (i = 0; i < 4; i++) {
+ e1 = ip[0] >> 1;
+ a1 = ip[0] - e1;
+ dest[dest_stride * 0] = clip_pixel(dest[dest_stride * 0] + a1);
+ dest[dest_stride * 1] = clip_pixel(dest[dest_stride * 1] + e1);
+ dest[dest_stride * 2] = clip_pixel(dest[dest_stride * 2] + e1);
+ dest[dest_stride * 3] = clip_pixel(dest[dest_stride * 3] + e1);
+ ip++;
+ dest++;
+ }
+}
+
+void vp9_idct4_1d_c(int16_t *input, int16_t *output) {
+ int16_t step[4];
+ int temp1, temp2;
+ // stage 1
+ temp1 = (input[0] + input[2]) * cospi_16_64;
+ temp2 = (input[0] - input[2]) * cospi_16_64;
+ step[0] = dct_const_round_shift(temp1);
+ step[1] = dct_const_round_shift(temp2);
+ temp1 = input[1] * cospi_24_64 - input[3] * cospi_8_64;
+ temp2 = input[1] * cospi_8_64 + input[3] * cospi_24_64;
+ step[2] = dct_const_round_shift(temp1);
+ step[3] = dct_const_round_shift(temp2);
+
+ // stage 2
+ output[0] = step[0] + step[3];
+ output[1] = step[1] + step[2];
+ output[2] = step[1] - step[2];
+ output[3] = step[0] - step[3];
+}
+
+void vp9_short_idct4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride) {
+ int16_t out[4 * 4];
+ int16_t *outptr = out;
+ int i, j;
+ int16_t temp_in[4], temp_out[4];
+
+ // Rows
+ for (i = 0; i < 4; ++i) {
+ for (j = 0; j < 4; ++j)
+ temp_in[j] = input[j];
+ vp9_idct4_1d(temp_in, outptr);
+ input += 4;
+ outptr += 4;
+ }
+
+ // Columns
+ for (i = 0; i < 4; ++i) {
+ for (j = 0; j < 4; ++j)
+ temp_in[j] = out[j * 4 + i];
+ vp9_idct4_1d(temp_in, temp_out);
+ for (j = 0; j < 4; ++j)
+ dest[j * dest_stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 4)
+ + dest[j * dest_stride + i]);
+ }
+}
+
+void vp9_short_idct4x4_1_add_c(int16_t *input, uint8_t *dest, int dest_stride) {
+ int i;
+ int a1;
+ int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
+ out = dct_const_round_shift(out * cospi_16_64);
+ a1 = ROUND_POWER_OF_TWO(out, 4);
+
+ for (i = 0; i < 4; i++) {
+ dest[0] = clip_pixel(dest[0] + a1);
+ dest[1] = clip_pixel(dest[1] + a1);
+ dest[2] = clip_pixel(dest[2] + a1);
+ dest[3] = clip_pixel(dest[3] + a1);
+ dest += dest_stride;
+ }
+}
+
+void vp9_dc_only_idct_add_c(int input_dc, uint8_t *pred_ptr,
+ uint8_t *dst_ptr, int pitch, int stride) {
+ int a1;
+ int r, c;
+ int16_t out = dct_const_round_shift(input_dc * cospi_16_64);
+ out = dct_const_round_shift(out * cospi_16_64);
+ a1 = ROUND_POWER_OF_TWO(out, 4);
+
+ for (r = 0; r < 4; r++) {
+ for (c = 0; c < 4; c++)
+ dst_ptr[c] = clip_pixel(a1 + pred_ptr[c]);
+
+ dst_ptr += stride;
+ pred_ptr += pitch;
+ }
+}
+
+static void idct8_1d(int16_t *input, int16_t *output) {
+ int16_t step1[8], step2[8];
+ int temp1, temp2;
+ // stage 1
+ step1[0] = input[0];
+ step1[2] = input[4];
+ step1[1] = input[2];
+ step1[3] = input[6];
+ temp1 = input[1] * cospi_28_64 - input[7] * cospi_4_64;
+ temp2 = input[1] * cospi_4_64 + input[7] * cospi_28_64;
+ step1[4] = dct_const_round_shift(temp1);
+ step1[7] = dct_const_round_shift(temp2);
+ temp1 = input[5] * cospi_12_64 - input[3] * cospi_20_64;
+ temp2 = input[5] * cospi_20_64 + input[3] * cospi_12_64;
+ step1[5] = dct_const_round_shift(temp1);
+ step1[6] = dct_const_round_shift(temp2);
+
+ // stage 2 & stage 3 - even half
+ vp9_idct4_1d(step1, step1);
+
+ // stage 2 - odd half
+ step2[4] = step1[4] + step1[5];
+ step2[5] = step1[4] - step1[5];
+ step2[6] = -step1[6] + step1[7];
+ step2[7] = step1[6] + step1[7];
+
+ // stage 3 -odd half
+ step1[4] = step2[4];
+ temp1 = (step2[6] - step2[5]) * cospi_16_64;
+ temp2 = (step2[5] + step2[6]) * cospi_16_64;
+ step1[5] = dct_const_round_shift(temp1);
+ step1[6] = dct_const_round_shift(temp2);
+ step1[7] = step2[7];
+
+ // stage 4
+ output[0] = step1[0] + step1[7];
+ output[1] = step1[1] + step1[6];
+ output[2] = step1[2] + step1[5];
+ output[3] = step1[3] + step1[4];
+ output[4] = step1[3] - step1[4];
+ output[5] = step1[2] - step1[5];
+ output[6] = step1[1] - step1[6];
+ output[7] = step1[0] - step1[7];
+}
+
+void vp9_short_idct8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride) {
+ int16_t out[8 * 8];
+ int16_t *outptr = out;
+ int i, j;
+ int16_t temp_in[8], temp_out[8];
+
+ // First transform rows
+ for (i = 0; i < 8; ++i) {
+ idct8_1d(input, outptr);
+ input += 8;
+ outptr += 8;
+ }
+
+ // Then transform columns
+ for (i = 0; i < 8; ++i) {
+ for (j = 0; j < 8; ++j)
+ temp_in[j] = out[j * 8 + i];
+ idct8_1d(temp_in, temp_out);
+ for (j = 0; j < 8; ++j)
+ dest[j * dest_stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 5)
+ + dest[j * dest_stride + i]);
+ }
+}
+
+static void iadst4_1d(int16_t *input, int16_t *output) {
+ int s0, s1, s2, s3, s4, s5, s6, s7;
+
+ int x0 = input[0];
+ int x1 = input[1];
+ int x2 = input[2];
+ int x3 = input[3];
+
+ if (!(x0 | x1 | x2 | x3)) {
+ output[0] = output[1] = output[2] = output[3] = 0;
+ return;
+ }
+
+ s0 = sinpi_1_9 * x0;
+ s1 = sinpi_2_9 * x0;
+ s2 = sinpi_3_9 * x1;
+ s3 = sinpi_4_9 * x2;
+ s4 = sinpi_1_9 * x2;
+ s5 = sinpi_2_9 * x3;
+ s6 = sinpi_4_9 * x3;
+ s7 = x0 - x2 + x3;
+
+ x0 = s0 + s3 + s5;
+ x1 = s1 - s4 - s6;
+ x2 = sinpi_3_9 * s7;
+ x3 = s2;
+
+ s0 = x0 + x3;
+ s1 = x1 + x3;
+ s2 = x2;
+ s3 = x0 + x1 - x3;
+
+ // 1-D transform scaling factor is sqrt(2).
+ // The overall dynamic range is 14b (input) + 14b (multiplication scaling)
+ // + 1b (addition) = 29b.
+ // Hence the output bit depth is 15b.
+ output[0] = dct_const_round_shift(s0);
+ output[1] = dct_const_round_shift(s1);
+ output[2] = dct_const_round_shift(s2);
+ output[3] = dct_const_round_shift(s3);
+}
+
+void vp9_short_iht4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride,
+ int tx_type) {
+ const transform_2d IHT_4[] = {
+ { vp9_idct4_1d, vp9_idct4_1d }, // DCT_DCT = 0
+ { iadst4_1d, vp9_idct4_1d }, // ADST_DCT = 1
+ { vp9_idct4_1d, iadst4_1d }, // DCT_ADST = 2
+ { iadst4_1d, iadst4_1d } // ADST_ADST = 3
+ };
+
+ int i, j;
+ int16_t out[4 * 4];
+ int16_t *outptr = out;
+ int16_t temp_in[4], temp_out[4];
+
+ // inverse transform row vectors
+ for (i = 0; i < 4; ++i) {
+ IHT_4[tx_type].rows(input, outptr);
+ input += 4;
+ outptr += 4;
+ }
+
+ // inverse transform column vectors
+ for (i = 0; i < 4; ++i) {
+ for (j = 0; j < 4; ++j)
+ temp_in[j] = out[j * 4 + i];
+ IHT_4[tx_type].cols(temp_in, temp_out);
+ for (j = 0; j < 4; ++j)
+ dest[j * dest_stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 4)
+ + dest[j * dest_stride + i]);
+ }
+}
+static void iadst8_1d(int16_t *input, int16_t *output) {
+ int s0, s1, s2, s3, s4, s5, s6, s7;
+
+ int x0 = input[7];
+ int x1 = input[0];
+ int x2 = input[5];
+ int x3 = input[2];
+ int x4 = input[3];
+ int x5 = input[4];
+ int x6 = input[1];
+ int x7 = input[6];
+
+ if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7)) {
+ output[0] = output[1] = output[2] = output[3] = output[4]
+ = output[5] = output[6] = output[7] = 0;
+ return;
+ }
+
+ // stage 1
+ s0 = cospi_2_64 * x0 + cospi_30_64 * x1;
+ s1 = cospi_30_64 * x0 - cospi_2_64 * x1;
+ s2 = cospi_10_64 * x2 + cospi_22_64 * x3;
+ s3 = cospi_22_64 * x2 - cospi_10_64 * x3;
+ s4 = cospi_18_64 * x4 + cospi_14_64 * x5;
+ s5 = cospi_14_64 * x4 - cospi_18_64 * x5;
+ s6 = cospi_26_64 * x6 + cospi_6_64 * x7;
+ s7 = cospi_6_64 * x6 - cospi_26_64 * x7;
+
+ x0 = dct_const_round_shift(s0 + s4);
+ x1 = dct_const_round_shift(s1 + s5);
+ x2 = dct_const_round_shift(s2 + s6);
+ x3 = dct_const_round_shift(s3 + s7);
+ x4 = dct_const_round_shift(s0 - s4);
+ x5 = dct_const_round_shift(s1 - s5);
+ x6 = dct_const_round_shift(s2 - s6);
+ x7 = dct_const_round_shift(s3 - s7);
+
+ // stage 2
+ s0 = x0;
+ s1 = x1;
+ s2 = x2;
+ s3 = x3;
+ s4 = cospi_8_64 * x4 + cospi_24_64 * x5;
+ s5 = cospi_24_64 * x4 - cospi_8_64 * x5;
+ s6 = -cospi_24_64 * x6 + cospi_8_64 * x7;
+ s7 = cospi_8_64 * x6 + cospi_24_64 * x7;
+
+ x0 = s0 + s2;
+ x1 = s1 + s3;
+ x2 = s0 - s2;
+ x3 = s1 - s3;
+ x4 = dct_const_round_shift(s4 + s6);
+ x5 = dct_const_round_shift(s5 + s7);
+ x6 = dct_const_round_shift(s4 - s6);
+ x7 = dct_const_round_shift(s5 - s7);
+
+ // stage 3
+ s2 = cospi_16_64 * (x2 + x3);
+ s3 = cospi_16_64 * (x2 - x3);
+ s6 = cospi_16_64 * (x6 + x7);
+ s7 = cospi_16_64 * (x6 - x7);
+
+ x2 = dct_const_round_shift(s2);
+ x3 = dct_const_round_shift(s3);
+ x6 = dct_const_round_shift(s6);
+ x7 = dct_const_round_shift(s7);
+
+ output[0] = x0;
+ output[1] = -x4;
+ output[2] = x6;
+ output[3] = -x2;
+ output[4] = x3;
+ output[5] = -x7;
+ output[6] = x5;
+ output[7] = -x1;
+}
+
+static const transform_2d IHT_8[] = {
+ { idct8_1d, idct8_1d }, // DCT_DCT = 0
+ { iadst8_1d, idct8_1d }, // ADST_DCT = 1
+ { idct8_1d, iadst8_1d }, // DCT_ADST = 2
+ { iadst8_1d, iadst8_1d } // ADST_ADST = 3
+};
+
+void vp9_short_iht8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride,
+ int tx_type) {
+ int i, j;
+ int16_t out[8 * 8];
+ int16_t *outptr = out;
+ int16_t temp_in[8], temp_out[8];
+ const transform_2d ht = IHT_8[tx_type];
+
+ // inverse transform row vectors
+ for (i = 0; i < 8; ++i) {
+ ht.rows(input, outptr);
+ input += 8;
+ outptr += 8;
+ }
+
+ // inverse transform column vectors
+ for (i = 0; i < 8; ++i) {
+ for (j = 0; j < 8; ++j)
+ temp_in[j] = out[j * 8 + i];
+ ht.cols(temp_in, temp_out);
+ for (j = 0; j < 8; ++j)
+ dest[j * dest_stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 5)
+ + dest[j * dest_stride + i]); }
+}
+
+void vp9_short_idct10_8x8_add_c(int16_t *input, uint8_t *dest,
+ int dest_stride) {
+ int16_t out[8 * 8];
+ int16_t *outptr = out;
+ int i, j;
+ int16_t temp_in[8], temp_out[8];
+
+ vpx_memset(out, 0, sizeof(out));
+ // First transform rows
+ // only first 4 row has non-zero coefs
+ for (i = 0; i < 4; ++i) {
+ idct8_1d(input, outptr);
+ input += 8;
+ outptr += 8;
+ }
+
+ // Then transform columns
+ for (i = 0; i < 8; ++i) {
+ for (j = 0; j < 8; ++j)
+ temp_in[j] = out[j * 8 + i];
+ idct8_1d(temp_in, temp_out);
+ for (j = 0; j < 8; ++j)
+ dest[j * dest_stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 5)
+ + dest[j * dest_stride + i]);
+ }
+}
+
+void vp9_short_idct1_8x8_c(int16_t *input, int16_t *output) {
+ int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
+ out = dct_const_round_shift(out * cospi_16_64);
+ output[0] = ROUND_POWER_OF_TWO(out, 5);
+}
+
+static void idct16_1d(int16_t *input, int16_t *output) {
+ int16_t step1[16], step2[16];
+ int temp1, temp2;
+
+ // stage 1
+ step1[0] = input[0/2];
+ step1[1] = input[16/2];
+ step1[2] = input[8/2];
+ step1[3] = input[24/2];
+ step1[4] = input[4/2];
+ step1[5] = input[20/2];
+ step1[6] = input[12/2];
+ step1[7] = input[28/2];
+ step1[8] = input[2/2];
+ step1[9] = input[18/2];
+ step1[10] = input[10/2];
+ step1[11] = input[26/2];
+ step1[12] = input[6/2];
+ step1[13] = input[22/2];
+ step1[14] = input[14/2];
+ step1[15] = input[30/2];
+
+ // stage 2
+ step2[0] = step1[0];
+ step2[1] = step1[1];
+ step2[2] = step1[2];
+ step2[3] = step1[3];
+ step2[4] = step1[4];
+ step2[5] = step1[5];
+ step2[6] = step1[6];
+ step2[7] = step1[7];
+
+ temp1 = step1[8] * cospi_30_64 - step1[15] * cospi_2_64;
+ temp2 = step1[8] * cospi_2_64 + step1[15] * cospi_30_64;
+ step2[8] = dct_const_round_shift(temp1);
+ step2[15] = dct_const_round_shift(temp2);
+
+ temp1 = step1[9] * cospi_14_64 - step1[14] * cospi_18_64;
+ temp2 = step1[9] * cospi_18_64 + step1[14] * cospi_14_64;
+ step2[9] = dct_const_round_shift(temp1);
+ step2[14] = dct_const_round_shift(temp2);
+
+ temp1 = step1[10] * cospi_22_64 - step1[13] * cospi_10_64;
+ temp2 = step1[10] * cospi_10_64 + step1[13] * cospi_22_64;
+ step2[10] = dct_const_round_shift(temp1);
+ step2[13] = dct_const_round_shift(temp2);
+
+ temp1 = step1[11] * cospi_6_64 - step1[12] * cospi_26_64;
+ temp2 = step1[11] * cospi_26_64 + step1[12] * cospi_6_64;
+ step2[11] = dct_const_round_shift(temp1);
+ step2[12] = dct_const_round_shift(temp2);
+
+ // stage 3
+ step1[0] = step2[0];
+ step1[1] = step2[1];
+ step1[2] = step2[2];
+ step1[3] = step2[3];
+
+ temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64;
+ temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64;
+ step1[4] = dct_const_round_shift(temp1);
+ step1[7] = dct_const_round_shift(temp2);
+ temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64;
+ temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64;
+ step1[5] = dct_const_round_shift(temp1);
+ step1[6] = dct_const_round_shift(temp2);
+
+ step1[8] = step2[8] + step2[9];
+ step1[9] = step2[8] - step2[9];
+ step1[10] = -step2[10] + step2[11];
+ step1[11] = step2[10] + step2[11];
+ step1[12] = step2[12] + step2[13];
+ step1[13] = step2[12] - step2[13];
+ step1[14] = -step2[14] + step2[15];
+ step1[15] = step2[14] + step2[15];
+
+ temp1 = (step1[0] + step1[1]) * cospi_16_64;
+ temp2 = (step1[0] - step1[1]) * cospi_16_64;
+ step2[0] = dct_const_round_shift(temp1);
+ step2[1] = dct_const_round_shift(temp2);
+ temp1 = step1[2] * cospi_24_64 - step1[3] * cospi_8_64;
+ temp2 = step1[2] * cospi_8_64 + step1[3] * cospi_24_64;
+ step2[2] = dct_const_round_shift(temp1);
+ step2[3] = dct_const_round_shift(temp2);
+ step2[4] = step1[4] + step1[5];
+ step2[5] = step1[4] - step1[5];
+ step2[6] = -step1[6] + step1[7];
+ step2[7] = step1[6] + step1[7];
+
+ step2[8] = step1[8];
+ step2[15] = step1[15];
+ temp1 = -step1[9] * cospi_8_64 + step1[14] * cospi_24_64;
+ temp2 = step1[9] * cospi_24_64 + step1[14] * cospi_8_64;
+ step2[9] = dct_const_round_shift(temp1);
+ step2[14] = dct_const_round_shift(temp2);
+ temp1 = -step1[10] * cospi_24_64 - step1[13] * cospi_8_64;
+ temp2 = -step1[10] * cospi_8_64 + step1[13] * cospi_24_64;
+ step2[10] = dct_const_round_shift(temp1);
+ step2[13] = dct_const_round_shift(temp2);
+ step2[11] = step1[11];
+ step2[12] = step1[12];
+
+ // stage 5
+ step1[0] = step2[0] + step2[3];
+ step1[1] = step2[1] + step2[2];
+ step1[2] = step2[1] - step2[2];
+ step1[3] = step2[0] - step2[3];
+ step1[4] = step2[4];
+ temp1 = (step2[6] - step2[5]) * cospi_16_64;
+ temp2 = (step2[5] + step2[6]) * cospi_16_64;
+ step1[5] = dct_const_round_shift(temp1);
+ step1[6] = dct_const_round_shift(temp2);
+ step1[7] = step2[7];
+
+ step1[8] = step2[8] + step2[11];
+ step1[9] = step2[9] + step2[10];
+ step1[10] = step2[9] - step2[10];
+ step1[11] = step2[8] - step2[11];
+ step1[12] = -step2[12] + step2[15];
+ step1[13] = -step2[13] + step2[14];
+ step1[14] = step2[13] + step2[14];
+ step1[15] = step2[12] + step2[15];
+
+ // stage 6
+ step2[0] = step1[0] + step1[7];
+ step2[1] = step1[1] + step1[6];
+ step2[2] = step1[2] + step1[5];
+ step2[3] = step1[3] + step1[4];
+ step2[4] = step1[3] - step1[4];
+ step2[5] = step1[2] - step1[5];
+ step2[6] = step1[1] - step1[6];
+ step2[7] = step1[0] - step1[7];
+ step2[8] = step1[8];
+ step2[9] = step1[9];
+ temp1 = (-step1[10] + step1[13]) * cospi_16_64;
+ temp2 = (step1[10] + step1[13]) * cospi_16_64;
+ step2[10] = dct_const_round_shift(temp1);
+ step2[13] = dct_const_round_shift(temp2);
+ temp1 = (-step1[11] + step1[12]) * cospi_16_64;
+ temp2 = (step1[11] + step1[12]) * cospi_16_64;
+ step2[11] = dct_const_round_shift(temp1);
+ step2[12] = dct_const_round_shift(temp2);
+ step2[14] = step1[14];
+ step2[15] = step1[15];
+
+ // stage 7
+ output[0] = step2[0] + step2[15];
+ output[1] = step2[1] + step2[14];
+ output[2] = step2[2] + step2[13];
+ output[3] = step2[3] + step2[12];
+ output[4] = step2[4] + step2[11];
+ output[5] = step2[5] + step2[10];
+ output[6] = step2[6] + step2[9];
+ output[7] = step2[7] + step2[8];
+ output[8] = step2[7] - step2[8];
+ output[9] = step2[6] - step2[9];
+ output[10] = step2[5] - step2[10];
+ output[11] = step2[4] - step2[11];
+ output[12] = step2[3] - step2[12];
+ output[13] = step2[2] - step2[13];
+ output[14] = step2[1] - step2[14];
+ output[15] = step2[0] - step2[15];
+}
+
+void vp9_short_idct16x16_add_c(int16_t *input, uint8_t *dest, int dest_stride) {
+ int16_t out[16 * 16];
+ int16_t *outptr = out;
+ int i, j;
+ int16_t temp_in[16], temp_out[16];
+
+ // First transform rows
+ for (i = 0; i < 16; ++i) {
+ idct16_1d(input, outptr);
+ input += 16;
+ outptr += 16;
+ }
+
+ // Then transform columns
+ for (i = 0; i < 16; ++i) {
+ for (j = 0; j < 16; ++j)
+ temp_in[j] = out[j * 16 + i];
+ idct16_1d(temp_in, temp_out);
+ for (j = 0; j < 16; ++j)
+ dest[j * dest_stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 6)
+ + dest[j * dest_stride + i]);
+ }
+}
+
+void iadst16_1d(int16_t *input, int16_t *output) {
+ int s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15;
+
+ int x0 = input[15];
+ int x1 = input[0];
+ int x2 = input[13];
+ int x3 = input[2];
+ int x4 = input[11];
+ int x5 = input[4];
+ int x6 = input[9];
+ int x7 = input[6];
+ int x8 = input[7];
+ int x9 = input[8];
+ int x10 = input[5];
+ int x11 = input[10];
+ int x12 = input[3];
+ int x13 = input[12];
+ int x14 = input[1];
+ int x15 = input[14];
+
+ if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8
+ | x9 | x10 | x11 | x12 | x13 | x14 | x15)) {
+ output[0] = output[1] = output[2] = output[3] = output[4]
+ = output[5] = output[6] = output[7] = output[8]
+ = output[9] = output[10] = output[11] = output[12]
+ = output[13] = output[14] = output[15] = 0;
+ return;
+ }
+
+ // stage 1
+ s0 = x0 * cospi_1_64 + x1 * cospi_31_64;
+ s1 = x0 * cospi_31_64 - x1 * cospi_1_64;
+ s2 = x2 * cospi_5_64 + x3 * cospi_27_64;
+ s3 = x2 * cospi_27_64 - x3 * cospi_5_64;
+ s4 = x4 * cospi_9_64 + x5 * cospi_23_64;
+ s5 = x4 * cospi_23_64 - x5 * cospi_9_64;
+ s6 = x6 * cospi_13_64 + x7 * cospi_19_64;
+ s7 = x6 * cospi_19_64 - x7 * cospi_13_64;
+ s8 = x8 * cospi_17_64 + x9 * cospi_15_64;
+ s9 = x8 * cospi_15_64 - x9 * cospi_17_64;
+ s10 = x10 * cospi_21_64 + x11 * cospi_11_64;
+ s11 = x10 * cospi_11_64 - x11 * cospi_21_64;
+ s12 = x12 * cospi_25_64 + x13 * cospi_7_64;
+ s13 = x12 * cospi_7_64 - x13 * cospi_25_64;
+ s14 = x14 * cospi_29_64 + x15 * cospi_3_64;
+ s15 = x14 * cospi_3_64 - x15 * cospi_29_64;
+
+ x0 = dct_const_round_shift(s0 + s8);
+ x1 = dct_const_round_shift(s1 + s9);
+ x2 = dct_const_round_shift(s2 + s10);
+ x3 = dct_const_round_shift(s3 + s11);
+ x4 = dct_const_round_shift(s4 + s12);
+ x5 = dct_const_round_shift(s5 + s13);
+ x6 = dct_const_round_shift(s6 + s14);
+ x7 = dct_const_round_shift(s7 + s15);
+ x8 = dct_const_round_shift(s0 - s8);
+ x9 = dct_const_round_shift(s1 - s9);
+ x10 = dct_const_round_shift(s2 - s10);
+ x11 = dct_const_round_shift(s3 - s11);
+ x12 = dct_const_round_shift(s4 - s12);
+ x13 = dct_const_round_shift(s5 - s13);
+ x14 = dct_const_round_shift(s6 - s14);
+ x15 = dct_const_round_shift(s7 - s15);
+
+ // stage 2
+ s0 = x0;
+ s1 = x1;
+ s2 = x2;
+ s3 = x3;
+ s4 = x4;
+ s5 = x5;
+ s6 = x6;
+ s7 = x7;
+ s8 = x8 * cospi_4_64 + x9 * cospi_28_64;
+ s9 = x8 * cospi_28_64 - x9 * cospi_4_64;
+ s10 = x10 * cospi_20_64 + x11 * cospi_12_64;
+ s11 = x10 * cospi_12_64 - x11 * cospi_20_64;
+ s12 = - x12 * cospi_28_64 + x13 * cospi_4_64;
+ s13 = x12 * cospi_4_64 + x13 * cospi_28_64;
+ s14 = - x14 * cospi_12_64 + x15 * cospi_20_64;
+ s15 = x14 * cospi_20_64 + x15 * cospi_12_64;
+
+ x0 = s0 + s4;
+ x1 = s1 + s5;
+ x2 = s2 + s6;
+ x3 = s3 + s7;
+ x4 = s0 - s4;
+ x5 = s1 - s5;
+ x6 = s2 - s6;
+ x7 = s3 - s7;
+ x8 = dct_const_round_shift(s8 + s12);
+ x9 = dct_const_round_shift(s9 + s13);
+ x10 = dct_const_round_shift(s10 + s14);
+ x11 = dct_const_round_shift(s11 + s15);
+ x12 = dct_const_round_shift(s8 - s12);
+ x13 = dct_const_round_shift(s9 - s13);
+ x14 = dct_const_round_shift(s10 - s14);
+ x15 = dct_const_round_shift(s11 - s15);
+
+ // stage 3
+ s0 = x0;
+ s1 = x1;
+ s2 = x2;
+ s3 = x3;
+ s4 = x4 * cospi_8_64 + x5 * cospi_24_64;
+ s5 = x4 * cospi_24_64 - x5 * cospi_8_64;
+ s6 = - x6 * cospi_24_64 + x7 * cospi_8_64;
+ s7 = x6 * cospi_8_64 + x7 * cospi_24_64;
+ s8 = x8;
+ s9 = x9;
+ s10 = x10;
+ s11 = x11;
+ s12 = x12 * cospi_8_64 + x13 * cospi_24_64;
+ s13 = x12 * cospi_24_64 - x13 * cospi_8_64;
+ s14 = - x14 * cospi_24_64 + x15 * cospi_8_64;
+ s15 = x14 * cospi_8_64 + x15 * cospi_24_64;
+
+ x0 = s0 + s2;
+ x1 = s1 + s3;
+ x2 = s0 - s2;
+ x3 = s1 - s3;
+ x4 = dct_const_round_shift(s4 + s6);
+ x5 = dct_const_round_shift(s5 + s7);
+ x6 = dct_const_round_shift(s4 - s6);
+ x7 = dct_const_round_shift(s5 - s7);
+ x8 = s8 + s10;
+ x9 = s9 + s11;
+ x10 = s8 - s10;
+ x11 = s9 - s11;
+ x12 = dct_const_round_shift(s12 + s14);
+ x13 = dct_const_round_shift(s13 + s15);
+ x14 = dct_const_round_shift(s12 - s14);
+ x15 = dct_const_round_shift(s13 - s15);
+
+ // stage 4
+ s2 = (- cospi_16_64) * (x2 + x3);
+ s3 = cospi_16_64 * (x2 - x3);
+ s6 = cospi_16_64 * (x6 + x7);
+ s7 = cospi_16_64 * (- x6 + x7);
+ s10 = cospi_16_64 * (x10 + x11);
+ s11 = cospi_16_64 * (- x10 + x11);
+ s14 = (- cospi_16_64) * (x14 + x15);
+ s15 = cospi_16_64 * (x14 - x15);
+
+ x2 = dct_const_round_shift(s2);
+ x3 = dct_const_round_shift(s3);
+ x6 = dct_const_round_shift(s6);
+ x7 = dct_const_round_shift(s7);
+ x10 = dct_const_round_shift(s10);
+ x11 = dct_const_round_shift(s11);
+ x14 = dct_const_round_shift(s14);
+ x15 = dct_const_round_shift(s15);
+
+ output[0] = x0;
+ output[1] = -x8;
+ output[2] = x12;
+ output[3] = -x4;
+ output[4] = x6;
+ output[5] = x14;
+ output[6] = x10;
+ output[7] = x2;
+ output[8] = x3;
+ output[9] = x11;
+ output[10] = x15;
+ output[11] = x7;
+ output[12] = x5;
+ output[13] = -x13;
+ output[14] = x9;
+ output[15] = -x1;
+}
+
+static const transform_2d IHT_16[] = {
+ { idct16_1d, idct16_1d }, // DCT_DCT = 0
+ { iadst16_1d, idct16_1d }, // ADST_DCT = 1
+ { idct16_1d, iadst16_1d }, // DCT_ADST = 2
+ { iadst16_1d, iadst16_1d } // ADST_ADST = 3
+};
+
+void vp9_short_iht16x16_add_c(int16_t *input, uint8_t *dest, int dest_stride,
+ int tx_type) {
+ int i, j;
+ int16_t out[16 * 16];
+ int16_t *outptr = out;
+ int16_t temp_in[16], temp_out[16];
+ const transform_2d ht = IHT_16[tx_type];
+
+ // Rows
+ for (i = 0; i < 16; ++i) {
+ ht.rows(input, outptr);
+ input += 16;
+ outptr += 16;
+ }
+
+ // Columns
+ for (i = 0; i < 16; ++i) {
+ for (j = 0; j < 16; ++j)
+ temp_in[j] = out[j * 16 + i];
+ ht.cols(temp_in, temp_out);
+ for (j = 0; j < 16; ++j)
+ dest[j * dest_stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 6)
+ + dest[j * dest_stride + i]); }
+}
+
+void vp9_short_idct10_16x16_add_c(int16_t *input, uint8_t *dest,
+ int dest_stride) {
+ int16_t out[16 * 16];
+ int16_t *outptr = out;
+ int i, j;
+ int16_t temp_in[16], temp_out[16];
+
+ /* First transform rows. Since all non-zero dct coefficients are in
+ * upper-left 4x4 area, we only need to calculate first 4 rows here.
+ */
+ vpx_memset(out, 0, sizeof(out));
+ for (i = 0; i < 4; ++i) {
+ idct16_1d(input, outptr);
+ input += 16;
+ outptr += 16;
+ }
+
+ // Then transform columns
+ for (i = 0; i < 16; ++i) {
+ for (j = 0; j < 16; ++j)
+ temp_in[j] = out[j*16 + i];
+ idct16_1d(temp_in, temp_out);
+ for (j = 0; j < 16; ++j)
+ dest[j * dest_stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 6)
+ + dest[j * dest_stride + i]);
+ }
+}
+
+void vp9_short_idct1_16x16_c(int16_t *input, int16_t *output) {
+ int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
+ out = dct_const_round_shift(out * cospi_16_64);
+ output[0] = ROUND_POWER_OF_TWO(out, 6);
+}
+
+static void idct32_1d(int16_t *input, int16_t *output) {
+ int16_t step1[32], step2[32];
+ int temp1, temp2;
+
+ // stage 1
+ step1[0] = input[0];
+ step1[1] = input[16];
+ step1[2] = input[8];
+ step1[3] = input[24];
+ step1[4] = input[4];
+ step1[5] = input[20];
+ step1[6] = input[12];
+ step1[7] = input[28];
+ step1[8] = input[2];
+ step1[9] = input[18];
+ step1[10] = input[10];
+ step1[11] = input[26];
+ step1[12] = input[6];
+ step1[13] = input[22];
+ step1[14] = input[14];
+ step1[15] = input[30];
+
+ temp1 = input[1] * cospi_31_64 - input[31] * cospi_1_64;
+ temp2 = input[1] * cospi_1_64 + input[31] * cospi_31_64;
+ step1[16] = dct_const_round_shift(temp1);
+ step1[31] = dct_const_round_shift(temp2);
+
+ temp1 = input[17] * cospi_15_64 - input[15] * cospi_17_64;
+ temp2 = input[17] * cospi_17_64 + input[15] * cospi_15_64;
+ step1[17] = dct_const_round_shift(temp1);
+ step1[30] = dct_const_round_shift(temp2);
+
+ temp1 = input[9] * cospi_23_64 - input[23] * cospi_9_64;
+ temp2 = input[9] * cospi_9_64 + input[23] * cospi_23_64;
+ step1[18] = dct_const_round_shift(temp1);
+ step1[29] = dct_const_round_shift(temp2);
+
+ temp1 = input[25] * cospi_7_64 - input[7] * cospi_25_64;
+ temp2 = input[25] * cospi_25_64 + input[7] * cospi_7_64;
+ step1[19] = dct_const_round_shift(temp1);
+ step1[28] = dct_const_round_shift(temp2);
+
+ temp1 = input[5] * cospi_27_64 - input[27] * cospi_5_64;
+ temp2 = input[5] * cospi_5_64 + input[27] * cospi_27_64;
+ step1[20] = dct_const_round_shift(temp1);
+ step1[27] = dct_const_round_shift(temp2);
+
+ temp1 = input[21] * cospi_11_64 - input[11] * cospi_21_64;
+ temp2 = input[21] * cospi_21_64 + input[11] * cospi_11_64;
+ step1[21] = dct_const_round_shift(temp1);
+ step1[26] = dct_const_round_shift(temp2);
+
+ temp1 = input[13] * cospi_19_64 - input[19] * cospi_13_64;
+ temp2 = input[13] * cospi_13_64 + input[19] * cospi_19_64;
+ step1[22] = dct_const_round_shift(temp1);
+ step1[25] = dct_const_round_shift(temp2);
+
+ temp1 = input[29] * cospi_3_64 - input[3] * cospi_29_64;
+ temp2 = input[29] * cospi_29_64 + input[3] * cospi_3_64;
+ step1[23] = dct_const_round_shift(temp1);
+ step1[24] = dct_const_round_shift(temp2);
+
+ // stage 2
+ step2[0] = step1[0];
+ step2[1] = step1[1];
+ step2[2] = step1[2];
+ step2[3] = step1[3];
+ step2[4] = step1[4];
+ step2[5] = step1[5];
+ step2[6] = step1[6];
+ step2[7] = step1[7];
+
+ temp1 = step1[8] * cospi_30_64 - step1[15] * cospi_2_64;
+ temp2 = step1[8] * cospi_2_64 + step1[15] * cospi_30_64;
+ step2[8] = dct_const_round_shift(temp1);
+ step2[15] = dct_const_round_shift(temp2);
+
+ temp1 = step1[9] * cospi_14_64 - step1[14] * cospi_18_64;
+ temp2 = step1[9] * cospi_18_64 + step1[14] * cospi_14_64;
+ step2[9] = dct_const_round_shift(temp1);
+ step2[14] = dct_const_round_shift(temp2);
+
+ temp1 = step1[10] * cospi_22_64 - step1[13] * cospi_10_64;
+ temp2 = step1[10] * cospi_10_64 + step1[13] * cospi_22_64;
+ step2[10] = dct_const_round_shift(temp1);
+ step2[13] = dct_const_round_shift(temp2);
+
+ temp1 = step1[11] * cospi_6_64 - step1[12] * cospi_26_64;
+ temp2 = step1[11] * cospi_26_64 + step1[12] * cospi_6_64;
+ step2[11] = dct_const_round_shift(temp1);
+ step2[12] = dct_const_round_shift(temp2);
+
+ step2[16] = step1[16] + step1[17];
+ step2[17] = step1[16] - step1[17];
+ step2[18] = -step1[18] + step1[19];
+ step2[19] = step1[18] + step1[19];
+ step2[20] = step1[20] + step1[21];
+ step2[21] = step1[20] - step1[21];
+ step2[22] = -step1[22] + step1[23];
+ step2[23] = step1[22] + step1[23];
+ step2[24] = step1[24] + step1[25];
+ step2[25] = step1[24] - step1[25];
+ step2[26] = -step1[26] + step1[27];
+ step2[27] = step1[26] + step1[27];
+ step2[28] = step1[28] + step1[29];
+ step2[29] = step1[28] - step1[29];
+ step2[30] = -step1[30] + step1[31];
+ step2[31] = step1[30] + step1[31];
+
+ // stage 3
+ step1[0] = step2[0];
+ step1[1] = step2[1];
+ step1[2] = step2[2];
+ step1[3] = step2[3];
+
+ temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64;
+ temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64;
+ step1[4] = dct_const_round_shift(temp1);
+ step1[7] = dct_const_round_shift(temp2);
+ temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64;
+ temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64;
+ step1[5] = dct_const_round_shift(temp1);
+ step1[6] = dct_const_round_shift(temp2);
+
+ step1[8] = step2[8] + step2[9];
+ step1[9] = step2[8] - step2[9];
+ step1[10] = -step2[10] + step2[11];
+ step1[11] = step2[10] + step2[11];
+ step1[12] = step2[12] + step2[13];
+ step1[13] = step2[12] - step2[13];
+ step1[14] = -step2[14] + step2[15];
+ step1[15] = step2[14] + step2[15];
+
+ step1[16] = step2[16];
+ step1[31] = step2[31];
+ temp1 = -step2[17] * cospi_4_64 + step2[30] * cospi_28_64;
+ temp2 = step2[17] * cospi_28_64 + step2[30] * cospi_4_64;
+ step1[17] = dct_const_round_shift(temp1);
+ step1[30] = dct_const_round_shift(temp2);
+ temp1 = -step2[18] * cospi_28_64 - step2[29] * cospi_4_64;
+ temp2 = -step2[18] * cospi_4_64 + step2[29] * cospi_28_64;
+ step1[18] = dct_const_round_shift(temp1);
+ step1[29] = dct_const_round_shift(temp2);
+ step1[19] = step2[19];
+ step1[20] = step2[20];
+ temp1 = -step2[21] * cospi_20_64 + step2[26] * cospi_12_64;
+ temp2 = step2[21] * cospi_12_64 + step2[26] * cospi_20_64;
+ step1[21] = dct_const_round_shift(temp1);
+ step1[26] = dct_const_round_shift(temp2);
+ temp1 = -step2[22] * cospi_12_64 - step2[25] * cospi_20_64;
+ temp2 = -step2[22] * cospi_20_64 + step2[25] * cospi_12_64;
+ step1[22] = dct_const_round_shift(temp1);
+ step1[25] = dct_const_round_shift(temp2);
+ step1[23] = step2[23];
+ step1[24] = step2[24];
+ step1[27] = step2[27];
+ step1[28] = step2[28];
+
+ // stage 4
+ temp1 = (step1[0] + step1[1]) * cospi_16_64;
+ temp2 = (step1[0] - step1[1]) * cospi_16_64;
+ step2[0] = dct_const_round_shift(temp1);
+ step2[1] = dct_const_round_shift(temp2);
+ temp1 = step1[2] * cospi_24_64 - step1[3] * cospi_8_64;
+ temp2 = step1[2] * cospi_8_64 + step1[3] * cospi_24_64;
+ step2[2] = dct_const_round_shift(temp1);
+ step2[3] = dct_const_round_shift(temp2);
+ step2[4] = step1[4] + step1[5];
+ step2[5] = step1[4] - step1[5];
+ step2[6] = -step1[6] + step1[7];
+ step2[7] = step1[6] + step1[7];
+
+ step2[8] = step1[8];
+ step2[15] = step1[15];
+ temp1 = -step1[9] * cospi_8_64 + step1[14] * cospi_24_64;
+ temp2 = step1[9] * cospi_24_64 + step1[14] * cospi_8_64;
+ step2[9] = dct_const_round_shift(temp1);
+ step2[14] = dct_const_round_shift(temp2);
+ temp1 = -step1[10] * cospi_24_64 - step1[13] * cospi_8_64;
+ temp2 = -step1[10] * cospi_8_64 + step1[13] * cospi_24_64;
+ step2[10] = dct_const_round_shift(temp1);
+ step2[13] = dct_const_round_shift(temp2);
+ step2[11] = step1[11];
+ step2[12] = step1[12];
+
+ step2[16] = step1[16] + step1[19];
+ step2[17] = step1[17] + step1[18];
+ step2[18] = step1[17] - step1[18];
+ step2[19] = step1[16] - step1[19];
+ step2[20] = -step1[20] + step1[23];
+ step2[21] = -step1[21] + step1[22];
+ step2[22] = step1[21] + step1[22];
+ step2[23] = step1[20] + step1[23];
+
+ step2[24] = step1[24] + step1[27];
+ step2[25] = step1[25] + step1[26];
+ step2[26] = step1[25] - step1[26];
+ step2[27] = step1[24] - step1[27];
+ step2[28] = -step1[28] + step1[31];
+ step2[29] = -step1[29] + step1[30];
+ step2[30] = step1[29] + step1[30];
+ step2[31] = step1[28] + step1[31];
+
+ // stage 5
+ step1[0] = step2[0] + step2[3];
+ step1[1] = step2[1] + step2[2];
+ step1[2] = step2[1] - step2[2];
+ step1[3] = step2[0] - step2[3];
+ step1[4] = step2[4];
+ temp1 = (step2[6] - step2[5]) * cospi_16_64;
+ temp2 = (step2[5] + step2[6]) * cospi_16_64;
+ step1[5] = dct_const_round_shift(temp1);
+ step1[6] = dct_const_round_shift(temp2);
+ step1[7] = step2[7];
+
+ step1[8] = step2[8] + step2[11];
+ step1[9] = step2[9] + step2[10];
+ step1[10] = step2[9] - step2[10];
+ step1[11] = step2[8] - step2[11];
+ step1[12] = -step2[12] + step2[15];
+ step1[13] = -step2[13] + step2[14];
+ step1[14] = step2[13] + step2[14];
+ step1[15] = step2[12] + step2[15];
+
+ step1[16] = step2[16];
+ step1[17] = step2[17];
+ temp1 = -step2[18] * cospi_8_64 + step2[29] * cospi_24_64;
+ temp2 = step2[18] * cospi_24_64 + step2[29] * cospi_8_64;
+ step1[18] = dct_const_round_shift(temp1);
+ step1[29] = dct_const_round_shift(temp2);
+ temp1 = -step2[19] * cospi_8_64 + step2[28] * cospi_24_64;
+ temp2 = step2[19] * cospi_24_64 + step2[28] * cospi_8_64;
+ step1[19] = dct_const_round_shift(temp1);
+ step1[28] = dct_const_round_shift(temp2);
+ temp1 = -step2[20] * cospi_24_64 - step2[27] * cospi_8_64;
+ temp2 = -step2[20] * cospi_8_64 + step2[27] * cospi_24_64;
+ step1[20] = dct_const_round_shift(temp1);
+ step1[27] = dct_const_round_shift(temp2);
+ temp1 = -step2[21] * cospi_24_64 - step2[26] * cospi_8_64;
+ temp2 = -step2[21] * cospi_8_64 + step2[26] * cospi_24_64;
+ step1[21] = dct_const_round_shift(temp1);
+ step1[26] = dct_const_round_shift(temp2);
+ step1[22] = step2[22];
+ step1[23] = step2[23];
+ step1[24] = step2[24];
+ step1[25] = step2[25];
+ step1[30] = step2[30];
+ step1[31] = step2[31];
+
+ // stage 6
+ step2[0] = step1[0] + step1[7];
+ step2[1] = step1[1] + step1[6];
+ step2[2] = step1[2] + step1[5];
+ step2[3] = step1[3] + step1[4];
+ step2[4] = step1[3] - step1[4];
+ step2[5] = step1[2] - step1[5];
+ step2[6] = step1[1] - step1[6];
+ step2[7] = step1[0] - step1[7];
+ step2[8] = step1[8];
+ step2[9] = step1[9];
+ temp1 = (-step1[10] + step1[13]) * cospi_16_64;
+ temp2 = (step1[10] + step1[13]) * cospi_16_64;
+ step2[10] = dct_const_round_shift(temp1);
+ step2[13] = dct_const_round_shift(temp2);
+ temp1 = (-step1[11] + step1[12]) * cospi_16_64;
+ temp2 = (step1[11] + step1[12]) * cospi_16_64;
+ step2[11] = dct_const_round_shift(temp1);
+ step2[12] = dct_const_round_shift(temp2);
+ step2[14] = step1[14];
+ step2[15] = step1[15];
+
+ step2[16] = step1[16] + step1[23];
+ step2[17] = step1[17] + step1[22];
+ step2[18] = step1[18] + step1[21];
+ step2[19] = step1[19] + step1[20];
+ step2[20] = step1[19] - step1[20];
+ step2[21] = step1[18] - step1[21];
+ step2[22] = step1[17] - step1[22];
+ step2[23] = step1[16] - step1[23];
+
+ step2[24] = -step1[24] + step1[31];
+ step2[25] = -step1[25] + step1[30];
+ step2[26] = -step1[26] + step1[29];
+ step2[27] = -step1[27] + step1[28];
+ step2[28] = step1[27] + step1[28];
+ step2[29] = step1[26] + step1[29];
+ step2[30] = step1[25] + step1[30];
+ step2[31] = step1[24] + step1[31];
+
+ // stage 7
+ step1[0] = step2[0] + step2[15];
+ step1[1] = step2[1] + step2[14];
+ step1[2] = step2[2] + step2[13];
+ step1[3] = step2[3] + step2[12];
+ step1[4] = step2[4] + step2[11];
+ step1[5] = step2[5] + step2[10];
+ step1[6] = step2[6] + step2[9];
+ step1[7] = step2[7] + step2[8];
+ step1[8] = step2[7] - step2[8];
+ step1[9] = step2[6] - step2[9];
+ step1[10] = step2[5] - step2[10];
+ step1[11] = step2[4] - step2[11];
+ step1[12] = step2[3] - step2[12];
+ step1[13] = step2[2] - step2[13];
+ step1[14] = step2[1] - step2[14];
+ step1[15] = step2[0] - step2[15];
+
+ step1[16] = step2[16];
+ step1[17] = step2[17];
+ step1[18] = step2[18];
+ step1[19] = step2[19];
+ temp1 = (-step2[20] + step2[27]) * cospi_16_64;
+ temp2 = (step2[20] + step2[27]) * cospi_16_64;
+ step1[20] = dct_const_round_shift(temp1);
+ step1[27] = dct_const_round_shift(temp2);
+ temp1 = (-step2[21] + step2[26]) * cospi_16_64;
+ temp2 = (step2[21] + step2[26]) * cospi_16_64;
+ step1[21] = dct_const_round_shift(temp1);
+ step1[26] = dct_const_round_shift(temp2);
+ temp1 = (-step2[22] + step2[25]) * cospi_16_64;
+ temp2 = (step2[22] + step2[25]) * cospi_16_64;
+ step1[22] = dct_const_round_shift(temp1);
+ step1[25] = dct_const_round_shift(temp2);
+ temp1 = (-step2[23] + step2[24]) * cospi_16_64;
+ temp2 = (step2[23] + step2[24]) * cospi_16_64;
+ step1[23] = dct_const_round_shift(temp1);
+ step1[24] = dct_const_round_shift(temp2);
+ step1[28] = step2[28];
+ step1[29] = step2[29];
+ step1[30] = step2[30];
+ step1[31] = step2[31];
+
+ // final stage
+ output[0] = step1[0] + step1[31];
+ output[1] = step1[1] + step1[30];
+ output[2] = step1[2] + step1[29];
+ output[3] = step1[3] + step1[28];
+ output[4] = step1[4] + step1[27];
+ output[5] = step1[5] + step1[26];
+ output[6] = step1[6] + step1[25];
+ output[7] = step1[7] + step1[24];
+ output[8] = step1[8] + step1[23];
+ output[9] = step1[9] + step1[22];
+ output[10] = step1[10] + step1[21];
+ output[11] = step1[11] + step1[20];
+ output[12] = step1[12] + step1[19];
+ output[13] = step1[13] + step1[18];
+ output[14] = step1[14] + step1[17];
+ output[15] = step1[15] + step1[16];
+ output[16] = step1[15] - step1[16];
+ output[17] = step1[14] - step1[17];
+ output[18] = step1[13] - step1[18];
+ output[19] = step1[12] - step1[19];
+ output[20] = step1[11] - step1[20];
+ output[21] = step1[10] - step1[21];
+ output[22] = step1[9] - step1[22];
+ output[23] = step1[8] - step1[23];
+ output[24] = step1[7] - step1[24];
+ output[25] = step1[6] - step1[25];
+ output[26] = step1[5] - step1[26];
+ output[27] = step1[4] - step1[27];
+ output[28] = step1[3] - step1[28];
+ output[29] = step1[2] - step1[29];
+ output[30] = step1[1] - step1[30];
+ output[31] = step1[0] - step1[31];
+}
+
+void vp9_short_idct32x32_add_c(int16_t *input, uint8_t *dest, int dest_stride) {
+ int16_t out[32 * 32];
+ int16_t *outptr = out;
+ int i, j;
+ int16_t temp_in[32], temp_out[32];
+
+ // Rows
+ for (i = 0; i < 32; ++i) {
+ idct32_1d(input, outptr);
+ input += 32;
+ outptr += 32;
+ }
+
+ // Columns
+ for (i = 0; i < 32; ++i) {
+ for (j = 0; j < 32; ++j)
+ temp_in[j] = out[j * 32 + i];
+ idct32_1d(temp_in, temp_out);
+ for (j = 0; j < 32; ++j)
+ dest[j * dest_stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 6)
+ + dest[j * dest_stride + i]);
+ }
+}
+
+void vp9_short_idct1_32x32_c(int16_t *input, int16_t *output) {
+ int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
+ out = dct_const_round_shift(out * cospi_16_64);
+ output[0] = ROUND_POWER_OF_TWO(out, 6);
+}
+
+void vp9_short_idct10_32x32_add_c(int16_t *input, uint8_t *dest,
+ int dest_stride) {
+ int16_t out[32 * 32];
+ int16_t *outptr = out;
+ int i, j;
+ int16_t temp_in[32], temp_out[32];
+
+ /* First transform rows. Since all non-zero dct coefficients are in
+ * upper-left 4x4 area, we only need to calculate first 4 rows here.
+ */
+ vpx_memset(out, 0, sizeof(out));
+ for (i = 0; i < 4; ++i) {
+ idct32_1d(input, outptr);
+ input += 32;
+ outptr += 32;
+ }
+
+ // Columns
+ for (i = 0; i < 32; ++i) {
+ for (j = 0; j < 32; ++j)
+ temp_in[j] = out[j * 32 + i];
+ idct32_1d(temp_in, temp_out);
+ for (j = 0; j < 32; ++j)
+ dest[j * dest_stride + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 6)
+ + dest[j * dest_stride + i]);
+ }
+}
diff --git a/libvpx/vp9/common/vp9_idct.h b/libvpx/vp9/common/vp9_idct.h
new file mode 100644
index 0000000..64f14c9
--- /dev/null
+++ b/libvpx/vp9/common/vp9_idct.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_IDCT_H_
+#define VP9_COMMON_VP9_IDCT_H_
+
+#include <assert.h>
+
+#include "./vpx_config.h"
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_common.h"
+
+
+// Constants and Macros used by all idct/dct functions
+#define DCT_CONST_BITS 14
+#define DCT_CONST_ROUNDING (1 << (DCT_CONST_BITS - 1))
+
+#define pair_set_epi16(a, b) \
+ _mm_set1_epi32(((uint16_t)(a)) + (((uint16_t)(b)) << 16))
+
+// Constants are round(16384 * cos(k*Pi/64)) where k = 1 to 31.
+// Note: sin(k*Pi/64) = cos((32-k)*Pi/64)
+static const int cospi_1_64 = 16364;
+static const int cospi_2_64 = 16305;
+static const int cospi_3_64 = 16207;
+static const int cospi_4_64 = 16069;
+static const int cospi_5_64 = 15893;
+static const int cospi_6_64 = 15679;
+static const int cospi_7_64 = 15426;
+static const int cospi_8_64 = 15137;
+static const int cospi_9_64 = 14811;
+static const int cospi_10_64 = 14449;
+static const int cospi_11_64 = 14053;
+static const int cospi_12_64 = 13623;
+static const int cospi_13_64 = 13160;
+static const int cospi_14_64 = 12665;
+static const int cospi_15_64 = 12140;
+static const int cospi_16_64 = 11585;
+static const int cospi_17_64 = 11003;
+static const int cospi_18_64 = 10394;
+static const int cospi_19_64 = 9760;
+static const int cospi_20_64 = 9102;
+static const int cospi_21_64 = 8423;
+static const int cospi_22_64 = 7723;
+static const int cospi_23_64 = 7005;
+static const int cospi_24_64 = 6270;
+static const int cospi_25_64 = 5520;
+static const int cospi_26_64 = 4756;
+static const int cospi_27_64 = 3981;
+static const int cospi_28_64 = 3196;
+static const int cospi_29_64 = 2404;
+static const int cospi_30_64 = 1606;
+static const int cospi_31_64 = 804;
+
+// 16384 * sqrt(2) * sin(kPi/9) * 2 / 3
+static const int sinpi_1_9 = 5283;
+static const int sinpi_2_9 = 9929;
+static const int sinpi_3_9 = 13377;
+static const int sinpi_4_9 = 15212;
+
+static INLINE int dct_const_round_shift(int input) {
+ int rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
+ assert(INT16_MIN <= rv && rv <= INT16_MAX);
+ return rv;
+}
+
+typedef void (*transform_1d)(int16_t*, int16_t*);
+
+typedef struct {
+ transform_1d cols, rows; // vertical and horizontal
+} transform_2d;
+
+#endif // VP9_COMMON_VP9_IDCT_H_
diff --git a/libvpx/vp9/common/vp9_implicit_segmentation.c b/libvpx/vp9/common/vp9_implicit_segmentation.c
new file mode 100644
index 0000000..2a1d35f
--- /dev/null
+++ b/libvpx/vp9/common/vp9_implicit_segmentation.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp9/common/vp9_onyxc_int.h"
+
+#define MAX_REGIONS 24000
+#ifndef NULL
+#define NULL 0
+#endif
+
+#define min_mbs_in_region 3
+
+// this linked list structure holds equivalences for connected
+// component labeling
+struct list_el {
+ int label;
+ int seg_value;
+ int count;
+ struct list_el *next;
+};
+typedef struct list_el item;
+
+// connected colorsegments
+typedef struct {
+ int min_x;
+ int min_y;
+ int max_x;
+ int max_y;
+ int64_t sum_x;
+ int64_t sum_y;
+ int pixels;
+ int seg_value;
+ int label;
+} segment_info;
+
+
+typedef enum {
+ SEGMENT_MODE,
+ SEGMENT_MV,
+ SEGMENT_REFFRAME,
+ SEGMENT_SKIPPED
+} SEGMENT_TYPE;
+
+
+// this merges the two equivalence lists and
+// then makes sure that every label points to the same
+// equivalence list
+void merge(item *labels, int u, int v) {
+ item *a = labels[u].next;
+ item *b = labels[v].next;
+ item c;
+ item *it = &c;
+ int count;
+
+ // check if they are already merged
+ if (u == v || a == b)
+ return;
+
+ count = a->count + b->count;
+
+ // merge 2 sorted linked lists.
+ while (a != NULL && b != NULL) {
+ if (a->label < b->label) {
+ it->next = a;
+ a = a->next;
+ } else {
+ it->next = b;
+ b = b->next;
+ }
+
+ it = it->next;
+ }
+
+ if (a == NULL)
+ it->next = b;
+ else
+ it->next = a;
+
+ it = c.next;
+
+ // make sure every equivalence in the linked list points to this new ll
+ while (it != NULL) {
+ labels[it->label].next = c.next;
+ it = it->next;
+ }
+ c.next->count = count;
+
+}
+
+void segment_via_mode_info(VP9_COMMON *oci, int how) {
+ MODE_INFO *mi = oci->mi;
+ int i, j;
+ int mb_index = 0;
+
+ int label = 1;
+ int pitch = oci->mb_cols;
+
+ // holds linked list equivalences
+ // the max should probably be allocated at a higher level in oci
+ item equivalences[MAX_REGIONS];
+ int eq_ptr = 0;
+ item labels[MAX_REGIONS];
+ segment_info segments[MAX_REGIONS];
+ int label_count = 1;
+ int labeling[400 * 300];
+ int *lp = labeling;
+
+ label_count = 1;
+ memset(labels, 0, sizeof(labels));
+ memset(segments, 0, sizeof(segments));
+
+ /* Go through each macroblock first pass labelling */
+ for (i = 0; i < oci->mb_rows; i++, lp += pitch) {
+ for (j = 0; j < oci->mb_cols; j++) {
+ // int above seg_value, left seg_value, this seg_value...
+ int a = -1, l = -1, n = -1;
+
+ // above label, left label
+ int al = -1, ll = -1;
+ if (i) {
+ al = lp[j - pitch];
+ a = labels[al].next->seg_value;
+ }
+ if (j) {
+ ll = lp[j - 1];
+ l = labels[ll].next->seg_value;
+ }
+
+ // what setting are we going to do the implicit segmentation on
+ switch (how) {
+ case SEGMENT_MODE:
+ n = mi[mb_index].mbmi.mode;
+ break;
+ case SEGMENT_MV:
+ n = mi[mb_index].mbmi.mv[0].as_int;
+ if (mi[mb_index].mbmi.ref_frame[0] == INTRA_FRAME)
+ n = -9999999;
+ break;
+ case SEGMENT_REFFRAME:
+ n = mi[mb_index].mbmi.ref_frame[0];
+ break;
+ case SEGMENT_SKIPPED:
+ n = mi[mb_index].mbmi.mb_skip_coeff;
+ break;
+ }
+
+ // above and left both have the same seg_value
+ if (n == a && n == l) {
+ // pick the lowest label
+ lp[j] = (al < ll ? al : ll);
+ labels[lp[j]].next->count++;
+
+ // merge the above and left equivalencies
+ merge(labels, al, ll);
+ }
+ // this matches above seg_value
+ else if (n == a) {
+ // give it the same label as above
+ lp[j] = al;
+ labels[al].next->count++;
+ }
+ // this matches left seg_value
+ else if (n == l) {
+ // give it the same label as above
+ lp[j] = ll;
+ labels[ll].next->count++;
+ } else {
+ // new label doesn't match either
+ item *e = &labels[label];
+ item *nl = &equivalences[eq_ptr++];
+ lp[j] = label;
+ nl->label = label;
+ nl->next = 0;
+ nl->seg_value = n;
+ nl->count = 1;
+ e->next = nl;
+ label++;
+ }
+ mb_index++;
+ }
+ mb_index++;
+ }
+ lp = labeling;
+
+ // give new labels to regions
+ for (i = 1; i < label; i++)
+ if (labels[i].next->count > min_mbs_in_region &&
+ labels[labels[i].next->label].label == 0) {
+ segment_info *cs = &segments[label_count];
+ cs->label = label_count;
+ labels[labels[i].next->label].label = label_count++;
+ labels[labels[i].next->label].seg_value = labels[i].next->seg_value;
+ cs->seg_value = labels[labels[i].next->label].seg_value;
+ cs->min_x = oci->mb_cols;
+ cs->min_y = oci->mb_rows;
+ cs->max_x = 0;
+ cs->max_y = 0;
+ cs->sum_x = 0;
+ cs->sum_y = 0;
+ cs->pixels = 0;
+ }
+
+ lp = labeling;
+
+ // this is just to gather stats...
+ for (i = 0; i < oci->mb_rows; i++, lp += pitch) {
+ for (j = 0; j < oci->mb_cols; j++) {
+ const int old_lab = labels[lp[j]].next->label;
+ const int lab = labels[old_lab].label;
+ segment_info *cs = &segments[lab];
+
+ cs->min_x = MIN(cs->min_x, j);
+ cs->max_x = MAX(cs->max_x, j);
+ cs->min_y = MIN(cs->min_y, i);
+ cs->max_y = MAX(cs->max_y, i);
+ cs->sum_x += j;
+ cs->sum_y += i;
+ cs->pixels++;
+
+ lp[j] = lab;
+ mb_index++;
+ }
+ mb_index++;
+ }
+
+ {
+ lp = labeling;
+ printf("labelling \n");
+ mb_index = 0;
+ for (i = 0; i < oci->mb_rows; i++, lp += pitch) {
+ for (j = 0; j < oci->mb_cols; j++) {
+ printf("%4d", lp[j]);
+ }
+ printf(" ");
+ for (j = 0; j < oci->mb_cols; j++, mb_index++) {
+ // printf("%3d",mi[mb_index].mbmi.mode );
+ printf("%4d:%4d", mi[mb_index].mbmi.mv[0].as_mv.row,
+ mi[mb_index].mbmi.mv[0].as_mv.col);
+ }
+ printf("\n");
+ ++mb_index;
+ }
+ printf("\n");
+ }
+}
+
diff --git a/libvpx/vp9/common/vp9_loopfilter.c b/libvpx/vp9/common/vp9_loopfilter.c
new file mode 100644
index 0000000..7b3f0be
--- /dev/null
+++ b/libvpx/vp9/common/vp9_loopfilter.c
@@ -0,0 +1,407 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vp9/common/vp9_loopfilter.h"
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/common/vp9_reconinter.h"
+#include "vpx_mem/vpx_mem.h"
+
+#include "vp9/common/vp9_seg_common.h"
+
+static void lf_init_lut(loop_filter_info_n *lfi) {
+ lfi->mode_lf_lut[DC_PRED] = 0;
+ lfi->mode_lf_lut[D45_PRED] = 0;
+ lfi->mode_lf_lut[D135_PRED] = 0;
+ lfi->mode_lf_lut[D117_PRED] = 0;
+ lfi->mode_lf_lut[D153_PRED] = 0;
+ lfi->mode_lf_lut[D27_PRED] = 0;
+ lfi->mode_lf_lut[D63_PRED] = 0;
+ lfi->mode_lf_lut[V_PRED] = 0;
+ lfi->mode_lf_lut[H_PRED] = 0;
+ lfi->mode_lf_lut[TM_PRED] = 0;
+ lfi->mode_lf_lut[ZEROMV] = 0;
+ lfi->mode_lf_lut[NEARESTMV] = 1;
+ lfi->mode_lf_lut[NEARMV] = 1;
+ lfi->mode_lf_lut[NEWMV] = 1;
+}
+
+void vp9_loop_filter_update_sharpness(loop_filter_info_n *lfi,
+ int sharpness_lvl) {
+ int i;
+
+ /* For each possible value for the loop filter fill out limits */
+ for (i = 0; i <= MAX_LOOP_FILTER; i++) {
+ int filt_lvl = i;
+ int block_inside_limit = 0;
+
+ /* Set loop filter paramaeters that control sharpness. */
+ block_inside_limit = filt_lvl >> (sharpness_lvl > 0);
+ block_inside_limit = block_inside_limit >> (sharpness_lvl > 4);
+
+ if (sharpness_lvl > 0) {
+ if (block_inside_limit > (9 - sharpness_lvl))
+ block_inside_limit = (9 - sharpness_lvl);
+ }
+
+ if (block_inside_limit < 1)
+ block_inside_limit = 1;
+
+ vpx_memset(lfi->lim[i], block_inside_limit, SIMD_WIDTH);
+ vpx_memset(lfi->blim[i], (2 * filt_lvl + block_inside_limit),
+ SIMD_WIDTH);
+ vpx_memset(lfi->mblim[i], (2 * (filt_lvl + 2) + block_inside_limit),
+ SIMD_WIDTH);
+ }
+}
+
+void vp9_loop_filter_init(VP9_COMMON *cm) {
+ loop_filter_info_n *lfi = &cm->lf_info;
+ int i;
+
+ // init limits for given sharpness
+ vp9_loop_filter_update_sharpness(lfi, cm->sharpness_level);
+ cm->last_sharpness_level = cm->sharpness_level;
+
+ // init LUT for lvl and hev thr picking
+ lf_init_lut(lfi);
+
+ // init hev threshold const vectors
+ for (i = 0; i < 4; i++)
+ vpx_memset(lfi->hev_thr[i], i, SIMD_WIDTH);
+}
+
+void vp9_loop_filter_frame_init(VP9_COMMON *cm,
+ MACROBLOCKD *xd,
+ int default_filt_lvl) {
+ int seg, // segment number
+ ref, // index in ref_lf_deltas
+ mode; // index in mode_lf_deltas
+ // n_shift is the a multiplier for lf_deltas
+ // the multiplier is 1 for when filter_lvl is between 0 and 31;
+ // 2 when filter_lvl is between 32 and 63
+ int n_shift = default_filt_lvl >> 5;
+
+ loop_filter_info_n *lfi = &cm->lf_info;
+
+ /* update limits if sharpness has changed */
+ // printf("vp9_loop_filter_frame_init %d\n", default_filt_lvl);
+ // printf("sharpness level: %d [%d]\n",
+ // cm->sharpness_level, cm->last_sharpness_level);
+ if (cm->last_sharpness_level != cm->sharpness_level) {
+ vp9_loop_filter_update_sharpness(lfi, cm->sharpness_level);
+ cm->last_sharpness_level = cm->sharpness_level;
+ }
+
+ for (seg = 0; seg < MAX_MB_SEGMENTS; seg++) {
+ int lvl_seg = default_filt_lvl;
+ int lvl_ref, lvl_mode;
+
+
+ // Set the baseline filter values for each segment
+ if (vp9_segfeature_active(xd, seg, SEG_LVL_ALT_LF)) {
+ /* Abs value */
+ if (xd->mb_segment_abs_delta == SEGMENT_ABSDATA) {
+ lvl_seg = vp9_get_segdata(xd, seg, SEG_LVL_ALT_LF);
+ } else { /* Delta Value */
+ lvl_seg += vp9_get_segdata(xd, seg, SEG_LVL_ALT_LF);
+ lvl_seg = clamp(lvl_seg, 0, 63);
+ }
+ }
+
+ if (!xd->mode_ref_lf_delta_enabled) {
+ /* we could get rid of this if we assume that deltas are set to
+ * zero when not in use; encoder always uses deltas
+ */
+ vpx_memset(lfi->lvl[seg][0], lvl_seg, 4 * 4);
+ continue;
+ }
+
+ lvl_ref = lvl_seg;
+
+ /* INTRA_FRAME */
+ ref = INTRA_FRAME;
+
+ /* Apply delta for reference frame */
+ lvl_ref += xd->ref_lf_deltas[ref] << n_shift;
+
+ mode = 0; /* all the rest of Intra modes */
+ lvl_mode = lvl_ref;
+ lfi->lvl[seg][ref][mode] = clamp(lvl_mode, 0, 63);
+
+ /* LAST, GOLDEN, ALT */
+ for (ref = 1; ref < MAX_REF_FRAMES; ref++) {
+ int lvl_ref = lvl_seg;
+
+ /* Apply delta for reference frame */
+ lvl_ref += xd->ref_lf_deltas[ref] << n_shift;
+
+ /* Apply delta for Inter modes */
+ for (mode = 0; mode < MAX_MODE_LF_DELTAS; mode++) {
+ lvl_mode = lvl_ref + (xd->mode_lf_deltas[mode] << n_shift);
+ lfi->lvl[seg][ref][mode] = clamp(lvl_mode, 0, 63);
+ }
+ }
+ }
+}
+
+static int build_lfi(const VP9_COMMON *cm, const MB_MODE_INFO *mbmi,
+ struct loop_filter_info *lfi) {
+ const loop_filter_info_n *lfi_n = &cm->lf_info;
+ int mode = mbmi->mode;
+ int mode_index = lfi_n->mode_lf_lut[mode];
+ int seg = mbmi->segment_id;
+ int ref_frame = mbmi->ref_frame[0];
+ int filter_level = lfi_n->lvl[seg][ref_frame][mode_index];
+
+ if (filter_level) {
+ const int hev_index = filter_level >> 4;
+ lfi->mblim = lfi_n->mblim[filter_level];
+ lfi->blim = lfi_n->blim[filter_level];
+ lfi->lim = lfi_n->lim[filter_level];
+ lfi->hev_thr = lfi_n->hev_thr[hev_index];
+ return 1;
+ }
+ return 0;
+}
+
+static void filter_selectively_vert(uint8_t *s, int pitch,
+ unsigned int mask_16x16,
+ unsigned int mask_8x8,
+ unsigned int mask_4x4,
+ unsigned int mask_4x4_int,
+ const struct loop_filter_info *lfi) {
+ unsigned int mask;
+
+ for (mask = mask_16x16 | mask_8x8 | mask_4x4; mask; mask >>= 1) {
+ if (mask & 1) {
+ if (mask_16x16 & 1) {
+ vp9_mb_lpf_vertical_edge_w(s, pitch, lfi->mblim, lfi->lim,
+ lfi->hev_thr);
+ assert(!(mask_8x8 & 1));
+ assert(!(mask_4x4 & 1));
+ assert(!(mask_4x4_int & 1));
+ } else if (mask_8x8 & 1) {
+ vp9_mbloop_filter_vertical_edge(s, pitch, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 1);
+ assert(!(mask_16x16 & 1));
+ assert(!(mask_4x4 & 1));
+ } else if (mask_4x4 & 1) {
+ vp9_loop_filter_vertical_edge(s, pitch, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 1);
+ assert(!(mask_16x16 & 1));
+ assert(!(mask_8x8 & 1));
+ } else {
+ assert(0);
+ }
+
+ if (mask_4x4_int & 1)
+ vp9_loop_filter_vertical_edge(s + 4, pitch, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 1);
+ }
+ s += 8;
+ lfi++;
+ mask_16x16 >>= 1;
+ mask_8x8 >>= 1;
+ mask_4x4 >>= 1;
+ mask_4x4_int >>= 1;
+ }
+}
+
+static void filter_selectively_horiz(uint8_t *s, int pitch,
+ unsigned int mask_16x16,
+ unsigned int mask_8x8,
+ unsigned int mask_4x4,
+ unsigned int mask_4x4_int,
+ int only_4x4_1,
+ const struct loop_filter_info *lfi) {
+ unsigned int mask;
+
+ for (mask = mask_16x16 | mask_8x8 | mask_4x4; mask; mask >>= 1) {
+ if (mask & 1) {
+ if (!only_4x4_1) {
+ if (mask_16x16 & 1) {
+ vp9_mb_lpf_horizontal_edge_w(s, pitch, lfi->mblim, lfi->lim,
+ lfi->hev_thr);
+ assert(!(mask_8x8 & 1));
+ assert(!(mask_4x4 & 1));
+ assert(!(mask_4x4_int & 1));
+ } else if (mask_8x8 & 1) {
+ vp9_mbloop_filter_horizontal_edge(s, pitch, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 1);
+ assert(!(mask_16x16 & 1));
+ assert(!(mask_4x4 & 1));
+ } else if (mask_4x4 & 1) {
+ vp9_loop_filter_horizontal_edge(s, pitch, lfi->mblim, lfi->lim,
+ lfi->hev_thr, 1);
+ assert(!(mask_16x16 & 1));
+ assert(!(mask_8x8 & 1));
+ } else {
+ assert(0);
+ }
+ }
+
+ if (mask_4x4_int & 1)
+ vp9_loop_filter_horizontal_edge(s + 4 * pitch, pitch, lfi->mblim,
+ lfi->lim, lfi->hev_thr, 1);
+ }
+ s += 8;
+ lfi++;
+ mask_16x16 >>= 1;
+ mask_8x8 >>= 1;
+ mask_4x4 >>= 1;
+ mask_4x4_int >>= 1;
+ }
+}
+
+static void filter_block_plane(VP9_COMMON *cm, MACROBLOCKD *xd,
+ int plane, int mi_row, int mi_col) {
+ const int ss_x = xd->plane[plane].subsampling_x;
+ const int ss_y = xd->plane[plane].subsampling_y;
+ const int row_step = 1 << xd->plane[plane].subsampling_y;
+ const int col_step = 1 << xd->plane[plane].subsampling_x;
+ struct buf_2d * const dst = &xd->plane[plane].dst;
+ uint8_t* const dst0 = dst->buf;
+ MODE_INFO* const mi0 = xd->mode_info_context;
+ unsigned int mask_16x16[64 / MI_SIZE] = {0};
+ unsigned int mask_8x8[64 / MI_SIZE] = {0};
+ unsigned int mask_4x4[64 / MI_SIZE] = {0};
+ unsigned int mask_4x4_int[64 / MI_SIZE] = {0};
+ struct loop_filter_info lfi[64 / MI_SIZE][64 / MI_SIZE];
+ int r, c;
+
+ for (r = 0; r < 64 / MI_SIZE && mi_row + r < cm->mi_rows; r += row_step) {
+ unsigned int mask_16x16_c = 0;
+ unsigned int mask_8x8_c = 0;
+ unsigned int mask_4x4_c = 0;
+ unsigned int border_mask;
+
+ // Determine the vertical edges that need filtering
+ for (c = 0; c < 64 / MI_SIZE && mi_col + c < cm->mi_cols; c += col_step) {
+ const MODE_INFO * const mi = xd->mode_info_context;
+ const int skip_this = mi[c].mbmi.mb_skip_coeff
+ && mi[c].mbmi.ref_frame[0] != INTRA_FRAME;
+ // left edge of current unit is block/partition edge -> no skip
+ const int block_edge_left = b_width_log2(mi[c].mbmi.sb_type) ?
+ !(c & ((1 << (b_width_log2(mi[c].mbmi.sb_type)-1)) - 1)) : 1;
+ const int skip_this_c = skip_this && !block_edge_left;
+ // top edge of current unit is block/partition edge -> no skip
+ const int block_edge_above = b_height_log2(mi[c].mbmi.sb_type) ?
+ !(r & ((1 << (b_height_log2(mi[c].mbmi.sb_type)-1)) - 1)) : 1;
+ const int skip_this_r = skip_this && !block_edge_above;
+ const TX_SIZE tx_size = plane ? get_uv_tx_size(&mi[c].mbmi)
+ : mi[c].mbmi.txfm_size;
+ const int skip_border_4x4_c = ss_x && mi_col + c == cm->mi_cols - 1;
+ const int skip_border_4x4_r = ss_y && mi_row + r == cm->mi_rows - 1;
+
+ // Filter level can vary per MI
+ if (!build_lfi(cm, &mi[c].mbmi,
+ lfi[r] + (c >> xd->plane[plane].subsampling_x)))
+ continue;
+
+ // Build masks based on the transform size of each block
+ if (tx_size == TX_32X32) {
+ if (!skip_this_c && ((c >> ss_x) & 3) == 0) {
+ if (!skip_border_4x4_c)
+ mask_16x16_c |= 1 << (c >> ss_x);
+ else
+ mask_8x8_c |= 1 << (c >> ss_x);
+ }
+ if (!skip_this_r && ((r >> ss_y) & 3) == 0) {
+ if (!skip_border_4x4_r)
+ mask_16x16[r] |= 1 << (c >> ss_x);
+ else
+ mask_8x8[r] |= 1 << (c >> ss_x);
+ }
+ } else if (tx_size == TX_16X16) {
+ if (!skip_this_c && ((c >> ss_x) & 1) == 0) {
+ if (!skip_border_4x4_c)
+ mask_16x16_c |= 1 << (c >> ss_x);
+ else
+ mask_8x8_c |= 1 << (c >> ss_x);
+ }
+ if (!skip_this_r && ((r >> ss_y) & 1) == 0) {
+ if (!skip_border_4x4_r)
+ mask_16x16[r] |= 1 << (c >> ss_x);
+ else
+ mask_8x8[r] |= 1 << (c >> ss_x);
+ }
+ } else {
+ // force 8x8 filtering on 32x32 boundaries
+ if (!skip_this_c) {
+ if (tx_size == TX_8X8 || ((c >> ss_x) & 3) == 0)
+ mask_8x8_c |= 1 << (c >> ss_x);
+ else
+ mask_4x4_c |= 1 << (c >> ss_x);
+ }
+
+ if (!skip_this_r) {
+ if (tx_size == TX_8X8 || ((r >> ss_y) & 3) == 0)
+ mask_8x8[r] |= 1 << (c >> ss_x);
+ else
+ mask_4x4[r] |= 1 << (c >> ss_x);
+ }
+
+ if (!skip_this && tx_size < TX_8X8 && !skip_border_4x4_c)
+ mask_4x4_int[r] |= 1 << (c >> ss_x);
+ }
+ }
+
+ // Disable filtering on the leftmost column
+ border_mask = ~(mi_col == 0);
+ filter_selectively_vert(dst->buf, dst->stride,
+ mask_16x16_c & border_mask,
+ mask_8x8_c & border_mask,
+ mask_4x4_c & border_mask,
+ mask_4x4_int[r], lfi[r]);
+ dst->buf += 8 * dst->stride;
+ xd->mode_info_context += cm->mode_info_stride * row_step;
+ }
+
+ // Now do horizontal pass
+ dst->buf = dst0;
+ xd->mode_info_context = mi0;
+ for (r = 0; r < 64 / MI_SIZE && mi_row + r < cm->mi_rows; r += row_step) {
+ const int skip_border_4x4_r = ss_y && mi_row + r == cm->mi_rows - 1;
+ const unsigned int mask_4x4_int_r = skip_border_4x4_r ? 0 : mask_4x4_int[r];
+
+ filter_selectively_horiz(dst->buf, dst->stride,
+ mask_16x16[r],
+ mask_8x8[r],
+ mask_4x4[r],
+ mask_4x4_int_r, mi_row + r == 0, lfi[r]);
+ dst->buf += 8 * dst->stride;
+ xd->mode_info_context += cm->mode_info_stride * row_step;
+ }
+}
+
+void vp9_loop_filter_frame(VP9_COMMON *cm,
+ MACROBLOCKD *xd,
+ int frame_filter_level,
+ int y_only) {
+ int mi_row, mi_col;
+
+ // Initialize the loop filter for this frame.
+ vp9_loop_filter_frame_init(cm, xd, frame_filter_level);
+
+ for (mi_row = 0; mi_row < cm->mi_rows; mi_row += 64 / MI_SIZE) {
+ MODE_INFO* const mi = cm->mi + mi_row * cm->mode_info_stride;
+
+ for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 64 / MI_SIZE) {
+ int plane;
+
+ setup_dst_planes(xd, cm->frame_to_show, mi_row, mi_col);
+ for (plane = 0; plane < (y_only ? 1 : MAX_MB_PLANE); plane++) {
+ xd->mode_info_context = mi + mi_col;
+ filter_block_plane(cm, xd, plane, mi_row, mi_col);
+ }
+ }
+ }
+}
diff --git a/libvpx/vp9/common/vp9_loopfilter.h b/libvpx/vp9/common/vp9_loopfilter.h
new file mode 100644
index 0000000..ce954c0
--- /dev/null
+++ b/libvpx/vp9/common/vp9_loopfilter.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_LOOPFILTER_H_
+#define VP9_COMMON_VP9_LOOPFILTER_H_
+
+#include "vpx_ports/mem.h"
+#include "vpx_config.h"
+#include "vp9/common/vp9_blockd.h"
+
+#define MAX_LOOP_FILTER 63
+#define SIMD_WIDTH 16
+
+/* Need to align this structure so when it is declared and
+ * passed it can be loaded into vector registers.
+ */
+typedef struct {
+ DECLARE_ALIGNED(SIMD_WIDTH, unsigned char,
+ mblim[MAX_LOOP_FILTER + 1][SIMD_WIDTH]);
+ DECLARE_ALIGNED(SIMD_WIDTH, unsigned char,
+ blim[MAX_LOOP_FILTER + 1][SIMD_WIDTH]);
+ DECLARE_ALIGNED(SIMD_WIDTH, unsigned char,
+ lim[MAX_LOOP_FILTER + 1][SIMD_WIDTH]);
+ DECLARE_ALIGNED(SIMD_WIDTH, unsigned char,
+ hev_thr[4][SIMD_WIDTH]);
+ unsigned char lvl[MAX_MB_SEGMENTS][4][4];
+ unsigned char mode_lf_lut[MB_MODE_COUNT];
+} loop_filter_info_n;
+
+struct loop_filter_info {
+ const unsigned char *mblim;
+ const unsigned char *blim;
+ const unsigned char *lim;
+ const unsigned char *hev_thr;
+};
+
+#define prototype_loopfilter(sym) \
+ void sym(uint8_t *src, int pitch, const unsigned char *blimit, \
+ const unsigned char *limit, const unsigned char *thresh, int count)
+
+#define prototype_loopfilter_block(sym) \
+ void sym(uint8_t *y, uint8_t *u, uint8_t *v, \
+ int ystride, int uv_stride, struct loop_filter_info *lfi)
+
+#if ARCH_X86 || ARCH_X86_64
+#include "x86/vp9_loopfilter_x86.h"
+#endif
+
+typedef void loop_filter_uvfunction(uint8_t *u, /* source pointer */
+ int p, /* pitch */
+ const unsigned char *blimit,
+ const unsigned char *limit,
+ const unsigned char *thresh,
+ uint8_t *v);
+
+/* assorted loopfilter functions which get used elsewhere */
+struct VP9Common;
+struct macroblockd;
+
+void vp9_loop_filter_init(struct VP9Common *cm);
+
+void vp9_loop_filter_frame_init(struct VP9Common *cm,
+ struct macroblockd *mbd,
+ int default_filt_lvl);
+
+void vp9_loop_filter_frame(struct VP9Common *cm,
+ struct macroblockd *mbd,
+ int filter_level,
+ int y_only);
+
+void vp9_loop_filter_partial_frame(struct VP9Common *cm,
+ struct macroblockd *mbd,
+ int default_filt_lvl);
+
+void vp9_loop_filter_update_sharpness(loop_filter_info_n *lfi,
+ int sharpness_lvl);
+
+#endif // VP9_COMMON_VP9_LOOPFILTER_H_
diff --git a/libvpx/vp9/common/vp9_loopfilter_filters.c b/libvpx/vp9/common/vp9_loopfilter_filters.c
new file mode 100644
index 0000000..0efbcaf
--- /dev/null
+++ b/libvpx/vp9/common/vp9_loopfilter_filters.c
@@ -0,0 +1,308 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_loopfilter.h"
+#include "vp9/common/vp9_onyxc_int.h"
+
+static INLINE int8_t signed_char_clamp(int t) {
+ return (int8_t)clamp(t, -128, 127);
+}
+
+// should we apply any filter at all: 11111111 yes, 00000000 no
+static INLINE int8_t filter_mask(uint8_t limit, uint8_t blimit,
+ uint8_t p3, uint8_t p2,
+ uint8_t p1, uint8_t p0,
+ uint8_t q0, uint8_t q1,
+ uint8_t q2, uint8_t q3) {
+ int8_t mask = 0;
+ mask |= (abs(p3 - p2) > limit) * -1;
+ mask |= (abs(p2 - p1) > limit) * -1;
+ mask |= (abs(p1 - p0) > limit) * -1;
+ mask |= (abs(q1 - q0) > limit) * -1;
+ mask |= (abs(q2 - q1) > limit) * -1;
+ mask |= (abs(q3 - q2) > limit) * -1;
+ mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
+ return ~mask;
+}
+
+// is there high edge variance internal edge: 11111111 yes, 00000000 no
+static INLINE int8_t hevmask(uint8_t thresh, uint8_t p1, uint8_t p0,
+ uint8_t q0, uint8_t q1) {
+ int8_t hev = 0;
+ hev |= (abs(p1 - p0) > thresh) * -1;
+ hev |= (abs(q1 - q0) > thresh) * -1;
+ return hev;
+}
+
+static INLINE void filter(int8_t mask, uint8_t hev, uint8_t *op1,
+ uint8_t *op0, uint8_t *oq0, uint8_t *oq1) {
+ int8_t filter1, filter2;
+
+ const int8_t ps1 = (int8_t) *op1 ^ 0x80;
+ const int8_t ps0 = (int8_t) *op0 ^ 0x80;
+ const int8_t qs0 = (int8_t) *oq0 ^ 0x80;
+ const int8_t qs1 = (int8_t) *oq1 ^ 0x80;
+
+ // add outer taps if we have high edge variance
+ int8_t filter = signed_char_clamp(ps1 - qs1) & hev;
+
+ // inner taps
+ filter = signed_char_clamp(filter + 3 * (qs0 - ps0)) & mask;
+
+ // save bottom 3 bits so that we round one side +4 and the other +3
+ // if it equals 4 we'll set to adjust by -1 to account for the fact
+ // we'd round 3 the other way
+ filter1 = signed_char_clamp(filter + 4) >> 3;
+ filter2 = signed_char_clamp(filter + 3) >> 3;
+
+ *oq0 = signed_char_clamp(qs0 - filter1) ^ 0x80;
+ *op0 = signed_char_clamp(ps0 + filter2) ^ 0x80;
+
+ // outer tap adjustments
+ filter = ((filter1 + 1) >> 1) & ~hev;
+
+ *oq1 = signed_char_clamp(qs1 - filter) ^ 0x80;
+ *op1 = signed_char_clamp(ps1 + filter) ^ 0x80;
+}
+
+void vp9_loop_filter_horizontal_edge_c(uint8_t *s, int p /* pitch */,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh,
+ int count) {
+ int i;
+
+ // loop filter designed to work using chars so that we can make maximum use
+ // of 8 bit simd instructions.
+ for (i = 0; i < 8 * count; ++i) {
+ const uint8_t p3 = s[-4 * p], p2 = s[-3 * p], p1 = s[-2 * p], p0 = s[-p];
+ const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p];
+ const int8_t mask = filter_mask(*limit, *blimit,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+ const int8_t hev = hevmask(*thresh, p1, p0, q0, q1);
+ filter(mask, hev, s - 2 * p, s - 1 * p, s, s + 1 * p);
+ ++s;
+ }
+}
+
+void vp9_loop_filter_vertical_edge_c(uint8_t *s, int pitch,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh,
+ int count) {
+ int i;
+
+ // loop filter designed to work using chars so that we can make maximum use
+ // of 8 bit simd instructions.
+ for (i = 0; i < 8 * count; ++i) {
+ const uint8_t p3 = s[-4], p2 = s[-3], p1 = s[-2], p0 = s[-1];
+ const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3];
+ const int8_t mask = filter_mask(*limit, *blimit,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+ const int8_t hev = hevmask(*thresh, p1, p0, q0, q1);
+ filter(mask, hev, s - 2, s - 1, s, s + 1);
+ s += pitch;
+ }
+}
+
+static INLINE int8_t flatmask4(uint8_t thresh,
+ uint8_t p3, uint8_t p2,
+ uint8_t p1, uint8_t p0,
+ uint8_t q0, uint8_t q1,
+ uint8_t q2, uint8_t q3) {
+ int8_t flat = 0;
+ flat |= (abs(p1 - p0) > thresh) * -1;
+ flat |= (abs(q1 - q0) > thresh) * -1;
+ flat |= (abs(p0 - p2) > thresh) * -1;
+ flat |= (abs(q0 - q2) > thresh) * -1;
+ flat |= (abs(p3 - p0) > thresh) * -1;
+ flat |= (abs(q3 - q0) > thresh) * -1;
+ return ~flat;
+}
+static INLINE signed char flatmask5(uint8_t thresh,
+ uint8_t p4, uint8_t p3, uint8_t p2,
+ uint8_t p1, uint8_t p0,
+ uint8_t q0, uint8_t q1, uint8_t q2,
+ uint8_t q3, uint8_t q4) {
+ int8_t flat = 0;
+ flat |= (abs(p4 - p0) > thresh) * -1;
+ flat |= (abs(q4 - q0) > thresh) * -1;
+ flat = ~flat;
+ return flat & flatmask4(thresh, p3, p2, p1, p0, q0, q1, q2, q3);
+}
+
+
+static INLINE void mbfilter(int8_t mask, uint8_t hev, uint8_t flat,
+ uint8_t *op3, uint8_t *op2,
+ uint8_t *op1, uint8_t *op0,
+ uint8_t *oq0, uint8_t *oq1,
+ uint8_t *oq2, uint8_t *oq3) {
+ // use a 7 tap filter [1, 1, 1, 2, 1, 1, 1] for flat line
+ if (flat && mask) {
+ const uint8_t p3 = *op3, p2 = *op2, p1 = *op1, p0 = *op0;
+ const uint8_t q0 = *oq0, q1 = *oq1, q2 = *oq2, q3 = *oq3;
+
+ *op2 = ROUND_POWER_OF_TWO(p3 + p3 + p3 + p2 + p2 + p1 + p0 + q0, 3);
+ *op1 = ROUND_POWER_OF_TWO(p3 + p3 + p2 + p1 + p1 + p0 + q0 + q1, 3);
+ *op0 = ROUND_POWER_OF_TWO(p3 + p2 + p1 + p0 + p0 + q0 + q1 + q2, 3);
+ *oq0 = ROUND_POWER_OF_TWO(p2 + p1 + p0 + q0 + q0 + q1 + q2 + q3, 3);
+ *oq1 = ROUND_POWER_OF_TWO(p1 + p0 + q0 + q1 + q1 + q2 + q3 + q3, 3);
+ *oq2 = ROUND_POWER_OF_TWO(p0 + q0 + q1 + q2 + q2 + q3 + q3 + q3, 3);
+ } else {
+ filter(mask, hev, op1, op0, oq0, oq1);
+ }
+}
+
+void vp9_mbloop_filter_horizontal_edge_c(uint8_t *s, int p,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh,
+ int count) {
+ int i;
+
+ // loop filter designed to work using chars so that we can make maximum use
+ // of 8 bit simd instructions.
+ for (i = 0; i < 8 * count; ++i) {
+ const uint8_t p3 = s[-4 * p], p2 = s[-3 * p], p1 = s[-2 * p], p0 = s[-p];
+ const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p];
+
+ const int8_t mask = filter_mask(*limit, *blimit,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+ const int8_t hev = hevmask(*thresh, p1, p0, q0, q1);
+ const int8_t flat = flatmask4(1, p3, p2, p1, p0, q0, q1, q2, q3);
+ mbfilter(mask, hev, flat,
+ s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p,
+ s, s + 1 * p, s + 2 * p, s + 3 * p);
+ ++s;
+ }
+}
+
+void vp9_mbloop_filter_vertical_edge_c(uint8_t *s, int pitch,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh,
+ int count) {
+ int i;
+
+ for (i = 0; i < 8 * count; ++i) {
+ const uint8_t p3 = s[-4], p2 = s[-3], p1 = s[-2], p0 = s[-1];
+ const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3];
+ const int8_t mask = filter_mask(*limit, *blimit,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+ const int8_t hev = hevmask(thresh[0], p1, p0, q0, q1);
+ const int8_t flat = flatmask4(1, p3, p2, p1, p0, q0, q1, q2, q3);
+ mbfilter(mask, hev, flat, s - 4, s - 3, s - 2, s - 1,
+ s, s + 1, s + 2, s + 3);
+ s += pitch;
+ }
+}
+
+static INLINE void wide_mbfilter(int8_t mask, uint8_t hev,
+ uint8_t flat, uint8_t flat2,
+ uint8_t *op7, uint8_t *op6, uint8_t *op5,
+ uint8_t *op4, uint8_t *op3, uint8_t *op2,
+ uint8_t *op1, uint8_t *op0, uint8_t *oq0,
+ uint8_t *oq1, uint8_t *oq2, uint8_t *oq3,
+ uint8_t *oq4, uint8_t *oq5, uint8_t *oq6,
+ uint8_t *oq7) {
+ // use a 15 tap filter [1,1,1,1,1,1,1,2,1,1,1,1,1,1,1] for flat line
+ if (flat2 && flat && mask) {
+ const uint8_t p7 = *op7, p6 = *op6, p5 = *op5, p4 = *op4,
+ p3 = *op3, p2 = *op2, p1 = *op1, p0 = *op0;
+
+ const uint8_t q0 = *oq0, q1 = *oq1, q2 = *oq2, q3 = *oq3,
+ q4 = *oq4, q5 = *oq5, q6 = *oq6, q7 = *oq7;
+
+ *op6 = ROUND_POWER_OF_TWO(p7 * 7 + p6 * 2 + p5 + p4 + p3 + p2 + p1 + p0 +
+ q0, 4);
+ *op5 = ROUND_POWER_OF_TWO(p7 * 6 + p6 + p5 * 2 + p4 + p3 + p2 + p1 + p0 +
+ q0 + q1, 4);
+ *op4 = ROUND_POWER_OF_TWO(p7 * 5 + p6 + p5 + p4 * 2 + p3 + p2 + p1 + p0 +
+ q0 + q1 + q2, 4);
+ *op3 = ROUND_POWER_OF_TWO(p7 * 4 + p6 + p5 + p4 + p3 * 2 + p2 + p1 + p0 +
+ q0 + q1 + q2 + q3, 4);
+ *op2 = ROUND_POWER_OF_TWO(p7 * 3 + p6 + p5 + p4 + p3 + p2 * 2 + p1 + p0 +
+ q0 + q1 + q2 + q3 + q4, 4);
+ *op1 = ROUND_POWER_OF_TWO(p7 * 2 + p6 + p5 + p4 + p3 + p2 + p1 * 2 + p0 +
+ q0 + q1 + q2 + q3 + q4 + q5, 4);
+ *op0 = ROUND_POWER_OF_TWO(p7 + p6 + p5 + p4 + p3 + p2 + p1 + p0 * 2 +
+ q0 + q1 + q2 + q3 + q4 + q5 + q6, 4);
+ *oq0 = ROUND_POWER_OF_TWO(p6 + p5 + p4 + p3 + p2 + p1 + p0 +
+ q0 * 2 + q1 + q2 + q3 + q4 + q5 + q6 + q7, 4);
+ *oq1 = ROUND_POWER_OF_TWO(p5 + p4 + p3 + p2 + p1 + p0 +
+ q0 + q1 * 2 + q2 + q3 + q4 + q5 + q6 + q7 * 2, 4);
+ *oq2 = ROUND_POWER_OF_TWO(p4 + p3 + p2 + p1 + p0 +
+ q0 + q1 + q2 * 2 + q3 + q4 + q5 + q6 + q7 * 3, 4);
+ *oq3 = ROUND_POWER_OF_TWO(p3 + p2 + p1 + p0 +
+ q0 + q1 + q2 + q3 * 2 + q4 + q5 + q6 + q7 * 4, 4);
+ *oq4 = ROUND_POWER_OF_TWO(p2 + p1 + p0 +
+ q0 + q1 + q2 + q3 + q4 * 2 + q5 + q6 + q7 * 5, 4);
+ *oq5 = ROUND_POWER_OF_TWO(p1 + p0 +
+ q0 + q1 + q2 + q3 + q4 + q5 * 2 + q6 + q7 * 6, 4);
+ *oq6 = ROUND_POWER_OF_TWO(p0 +
+ q0 + q1 + q2 + q3 + q4 + q5 + q6 * 2 + q7 * 7, 4);
+ } else {
+ mbfilter(mask, hev, flat, op3, op2, op1, op0, oq0, oq1, oq2, oq3);
+ }
+}
+
+void vp9_mb_lpf_horizontal_edge_w_c(uint8_t *s, int p,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh) {
+ int i;
+
+ // loop filter designed to work using chars so that we can make maximum use
+ // of 8 bit simd instructions.
+ for (i = 0; i < 8; ++i) {
+ const uint8_t p3 = s[-4 * p], p2 = s[-3 * p], p1 = s[-2 * p], p0 = s[-p];
+ const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p];
+ const int8_t mask = filter_mask(*limit, *blimit,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+ const int8_t hev = hevmask(*thresh, p1, p0, q0, q1);
+ const int8_t flat = flatmask4(1, p3, p2, p1, p0, q0, q1, q2, q3);
+ const int8_t flat2 = flatmask5(1,
+ s[-8 * p], s[-7 * p], s[-6 * p], s[-5 * p], p0,
+ q0, s[4 * p], s[5 * p], s[6 * p], s[7 * p]);
+
+ wide_mbfilter(mask, hev, flat, flat2,
+ s - 8 * p, s - 7 * p, s - 6 * p, s - 5 * p,
+ s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p,
+ s, s + 1 * p, s + 2 * p, s + 3 * p,
+ s + 4 * p, s + 5 * p, s + 6 * p, s + 7 * p);
+
+ ++s;
+ }
+}
+
+void vp9_mb_lpf_vertical_edge_w_c(uint8_t *s, int p,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh) {
+ int i;
+
+ for (i = 0; i < 8; ++i) {
+ const uint8_t p3 = s[-4], p2 = s[-3], p1 = s[-2], p0 = s[-1];
+ const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3];
+ const int8_t mask = filter_mask(*limit, *blimit,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+ const int8_t hev = hevmask(*thresh, p1, p0, q0, q1);
+ const int8_t flat = flatmask4(1, p3, p2, p1, p0, q0, q1, q2, q3);
+ const int8_t flat2 = flatmask5(1, s[-8], s[-7], s[-6], s[-5], p0,
+ q0, s[4], s[5], s[6], s[7]);
+
+ wide_mbfilter(mask, hev, flat, flat2,
+ s - 8, s - 7, s - 6, s - 5, s - 4, s - 3, s - 2, s - 1,
+ s, s + 1, s + 2, s + 3, s + 4, s + 5, s + 6, s + 7);
+ s += p;
+ }
+}
diff --git a/libvpx/vp9/common/vp9_maskingmv.c b/libvpx/vp9/common/vp9_maskingmv.c
new file mode 100644
index 0000000..326201b
--- /dev/null
+++ b/libvpx/vp9/common/vp9_maskingmv.c
@@ -0,0 +1,803 @@
+/*
+ ============================================================================
+ Name : vp9_maskingmv.c
+ Author : jimbankoski
+ Version :
+ Copyright : Your copyright notice
+ Description : Hello World in C, Ansi-style
+ ============================================================================
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+unsigned int vp9_sad16x16_sse3(
+ unsigned char *src_ptr,
+ int src_stride,
+ unsigned char *ref_ptr,
+ int ref_stride,
+ int max_err);
+
+int vp8_growmaskmb_sse3(
+ unsigned char *om,
+ unsigned char *nm);
+
+void vp8_makemask_sse3(
+ unsigned char *y,
+ unsigned char *u,
+ unsigned char *v,
+ unsigned char *ym,
+ int yp,
+ int uvp,
+ int ys,
+ int us,
+ int vs,
+ int yt,
+ int ut,
+ int vt);
+
+unsigned int vp9_sad16x16_unmasked_wmt(
+ unsigned char *src_ptr,
+ int src_stride,
+ unsigned char *ref_ptr,
+ int ref_stride,
+ unsigned char *mask);
+
+unsigned int vp9_sad16x16_masked_wmt(
+ unsigned char *src_ptr,
+ int src_stride,
+ unsigned char *ref_ptr,
+ int ref_stride,
+ unsigned char *mask);
+
+unsigned int vp8_masked_predictor_wmt(
+ unsigned char *masked,
+ unsigned char *unmasked,
+ int src_stride,
+ unsigned char *dst_ptr,
+ int dst_stride,
+ unsigned char *mask);
+unsigned int vp8_masked_predictor_uv_wmt(
+ unsigned char *masked,
+ unsigned char *unmasked,
+ int src_stride,
+ unsigned char *dst_ptr,
+ int dst_stride,
+ unsigned char *mask);
+unsigned int vp8_uv_from_y_mask(
+ unsigned char *ymask,
+ unsigned char *uvmask);
+int yp = 16;
+unsigned char sxy[] = {
+ 40, 40, 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 80, 120, 120, 90, 90, 90, 90, 90, 80, 120, 120, 90, 90, 90, 90, 90,
+ 40, 40, 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 80, 120, 120, 90, 90, 90, 90, 90, 80, 120, 120, 90, 90, 90, 90, 90,
+ 40, 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 40, 80, 120, 120, 90, 90, 90, 90, 90, 80, 120, 120, 90, 90, 90, 90, 90,
+ 40, 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 40, 80, 120, 120, 90, 90, 90, 90, 90, 80, 120, 120, 90, 90, 90, 90, 90,
+ 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 40, 40, 80, 120, 120, 90, 90, 90, 90, 90, 80, 120, 120, 90, 90, 90, 90, 90,
+ 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 40, 40, 40, 80, 120, 120, 90, 90, 90, 90, 90, 80, 120, 120, 90, 90, 90, 90, 90,
+ 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 40, 40, 40, 80, 120, 120, 90, 90, 90, 90, 90, 80, 120, 120, 90, 90, 90, 90, 90,
+ 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 40, 40, 40, 80, 120, 120, 90, 90, 90, 90, 90, 80, 120, 120, 90, 90, 90, 90, 90,
+ 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 40, 40, 80, 120, 120, 90, 90, 90, 90, 90, 80, 120, 120, 90, 90, 90, 90, 90,
+ 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 40, 40, 80, 120, 120, 90, 90, 90, 90, 90, 80, 120, 120, 90, 90, 90, 90, 90,
+ 40, 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 40, 80, 120, 120, 90, 90, 90, 90, 90, 80, 120, 120, 90, 90, 90, 90, 90,
+ 40, 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 40, 80, 120, 120, 90, 90, 90, 90, 90, 80, 120, 120, 90, 90, 90, 90, 90,
+ 40, 40, 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 80, 120, 120, 90, 90, 90, 90, 90, 80, 120, 120, 90, 90, 90, 90, 90,
+ 40, 40, 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 80, 120, 120, 90, 90, 90, 90, 90, 80, 120, 120, 90, 90, 90, 90, 90,
+ 40, 40, 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 80, 120, 120, 90, 90, 90, 90, 90, 80, 120, 120, 90, 90, 90, 90, 90,
+ 40, 40, 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 80, 120, 120, 90, 90, 90, 90, 90, 80, 120, 120, 90, 90, 90, 90, 90
+};
+
+unsigned char sts[] = {
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+};
+unsigned char str[] = {
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+};
+
+unsigned char y[] = {
+ 40, 40, 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40,
+ 40, 40, 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40,
+ 40, 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 40,
+ 40, 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 40,
+ 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 40, 40,
+ 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 40, 40, 40,
+ 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 40, 40, 40,
+ 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 40, 40, 40,
+ 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 40, 40,
+ 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 40, 40,
+ 40, 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 40,
+ 40, 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40, 40,
+ 40, 40, 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40,
+ 40, 40, 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40,
+ 40, 40, 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40,
+ 40, 40, 40, 60, 60, 60, 60, 40, 40, 40, 40, 60, 60, 60, 60, 40
+};
+int uvp = 8;
+unsigned char u[] = {
+ 90, 80, 70, 70, 90, 90, 90, 17,
+ 90, 80, 70, 70, 90, 90, 90, 17,
+ 84, 70, 70, 90, 90, 90, 17, 17,
+ 84, 70, 70, 90, 90, 90, 17, 17,
+ 80, 70, 70, 90, 90, 90, 17, 17,
+ 90, 80, 70, 70, 90, 90, 90, 17,
+ 90, 80, 70, 70, 90, 90, 90, 17,
+ 90, 80, 70, 70, 90, 90, 90, 17
+};
+
+unsigned char v[] = {
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80
+};
+
+unsigned char ym[256];
+unsigned char uvm[64];
+typedef struct {
+ unsigned char y;
+ unsigned char yt;
+ unsigned char u;
+ unsigned char ut;
+ unsigned char v;
+ unsigned char vt;
+ unsigned char use;
+} COLOR_SEG_ELEMENT;
+
+/*
+COLOR_SEG_ELEMENT segmentation[]=
+{
+ { 60,4,80,17,80,10, 1},
+ { 40,4,15,10,80,10, 1},
+};
+*/
+
+COLOR_SEG_ELEMENT segmentation[] = {
+ { 79, 44, 92, 44, 237, 60, 1},
+};
+
+unsigned char pixel_mask(unsigned char y, unsigned char u, unsigned char v,
+ COLOR_SEG_ELEMENT sgm[],
+ int c) {
+ COLOR_SEG_ELEMENT *s = sgm;
+ unsigned char m = 0;
+ int i;
+ for (i = 0; i < c; i++, s++)
+ m |= (abs(y - s->y) < s->yt &&
+ abs(u - s->u) < s->ut &&
+ abs(v - s->v) < s->vt ? 255 : 0);
+
+ return m;
+}
+int neighbors[256][8];
+int makeneighbors(void) {
+ int i, j;
+ for (i = 0; i < 256; i++) {
+ int r = (i >> 4), c = (i & 15);
+ int ni = 0;
+ for (j = 0; j < 8; j++)
+ neighbors[i][j] = i;
+ for (j = 0; j < 256; j++) {
+ int nr = (j >> 4), nc = (j & 15);
+ if (abs(nr - r) < 2 && abs(nc - c) < 2)
+ neighbors[i][ni++] = j;
+ }
+ }
+ return 0;
+}
+void grow_ymask(unsigned char *ym) {
+ unsigned char nym[256];
+ int i, j;
+
+ for (i = 0; i < 256; i++) {
+ nym[i] = ym[i];
+ for (j = 0; j < 8; j++) {
+ nym[i] |= ym[neighbors[i][j]];
+ }
+ }
+ for (i = 0; i < 256; i++)
+ ym[i] = nym[i];
+}
+
+void make_mb_mask(unsigned char *y, unsigned char *u, unsigned char *v,
+ unsigned char *ym, unsigned char *uvm,
+ int yp, int uvp,
+ COLOR_SEG_ELEMENT sgm[],
+ int count) {
+ int r, c;
+ unsigned char *oym = ym;
+
+ memset(ym, 20, 256);
+ for (r = 0; r < 8; r++, uvm += 8, u += uvp, v += uvp, y += (yp + yp), ym += 32)
+ for (c = 0; c < 8; c++) {
+ int y1 = y[c << 1];
+ int u1 = u[c];
+ int v1 = v[c];
+ int m = pixel_mask(y1, u1, v1, sgm, count);
+ uvm[c] = m;
+ ym[c << 1] = uvm[c]; // = pixel_mask(y[c<<1],u[c],v[c],sgm,count);
+ ym[(c << 1) + 1] = pixel_mask(y[1 + (c << 1)], u[c], v[c], sgm, count);
+ ym[(c << 1) + 16] = pixel_mask(y[yp + (c << 1)], u[c], v[c], sgm, count);
+ ym[(c << 1) + 17] = pixel_mask(y[1 + yp + (c << 1)], u[c], v[c], sgm, count);
+ }
+ grow_ymask(oym);
+}
+
+int masked_sad(unsigned char *src, int p, unsigned char *dst, int dp,
+ unsigned char *ym) {
+ int i, j;
+ unsigned sad = 0;
+ for (i = 0; i < 16; i++, src += p, dst += dp, ym += 16)
+ for (j = 0; j < 16; j++)
+ if (ym[j])
+ sad += abs(src[j] - dst[j]);
+
+ return sad;
+}
+
+int compare_masks(unsigned char *sym, unsigned char *ym) {
+ int i, j;
+ unsigned sad = 0;
+ for (i = 0; i < 16; i++, sym += 16, ym += 16)
+ for (j = 0; j < 16; j++)
+ sad += (sym[j] != ym[j] ? 1 : 0);
+
+ return sad;
+}
+
+int unmasked_sad(unsigned char *src, int p, unsigned char *dst, int dp,
+ unsigned char *ym) {
+ int i, j;
+ unsigned sad = 0;
+ for (i = 0; i < 16; i++, src += p, dst += dp, ym += 16)
+ for (j = 0; j < 16; j++)
+ if (!ym[j])
+ sad += abs(src[j] - dst[j]);
+
+ return sad;
+}
+
+int masked_motion_search(unsigned char *y, unsigned char *u, unsigned char *v,
+ int yp, int uvp,
+ unsigned char *dy, unsigned char *du, unsigned char *dv,
+ int dyp, int duvp,
+ COLOR_SEG_ELEMENT sgm[],
+ int count,
+ int *mi,
+ int *mj,
+ int *ui,
+ int *uj,
+ int *wm) {
+ int i, j;
+
+ unsigned char ym[256];
+ unsigned char uvm[64];
+ unsigned char dym[256];
+ unsigned char duvm[64];
+ unsigned int e = 0;
+ int beste = 256;
+ int bmi = -32, bmj = -32;
+ int bui = -32, buj = -32;
+ int beste1 = 256;
+ int bmi1 = -32, bmj1 = -32;
+ int bui1 = -32, buj1 = -32;
+ int obeste;
+
+ // first try finding best mask and then unmasked
+ beste = 0xffffffff;
+
+ // find best unmasked mv
+ for (i = -32; i < 32; i++) {
+ unsigned char *dyz = i * dyp + dy;
+ unsigned char *duz = i / 2 * duvp + du;
+ unsigned char *dvz = i / 2 * duvp + dv;
+ for (j = -32; j < 32; j++) {
+ // 0,0 masked destination
+ make_mb_mask(dyz + j, duz + j / 2, dvz + j / 2, dym, duvm, dyp, duvp, sgm, count);
+
+ e = unmasked_sad(y, yp, dyz + j, dyp, dym);
+
+ if (e < beste) {
+ bui = i;
+ buj = j;
+ beste = e;
+ }
+ }
+ }
+ // bui=0;buj=0;
+ // best mv masked destination
+ make_mb_mask(dy + bui * dyp + buj, du + bui / 2 * duvp + buj / 2, dv + bui / 2 * duvp + buj / 2,
+ dym, duvm, dyp, duvp, sgm, count);
+
+ obeste = beste;
+ beste = 0xffffffff;
+
+ // find best masked
+ for (i = -32; i < 32; i++) {
+ unsigned char *dyz = i * dyp + dy;
+ for (j = -32; j < 32; j++) {
+ e = masked_sad(y, yp, dyz + j, dyp, dym);
+
+ if (e < beste) {
+ bmi = i;
+ bmj = j;
+ beste = e;
+ }
+ }
+ }
+ beste1 = beste + obeste;
+ bmi1 = bmi;
+ bmj1 = bmj;
+ bui1 = bui;
+ buj1 = buj;
+
+ beste = 0xffffffff;
+ // source mask
+ make_mb_mask(y, u, v, ym, uvm, yp, uvp, sgm, count);
+
+ // find best mask
+ for (i = -32; i < 32; i++) {
+ unsigned char *dyz = i * dyp + dy;
+ unsigned char *duz = i / 2 * duvp + du;
+ unsigned char *dvz = i / 2 * duvp + dv;
+ for (j = -32; j < 32; j++) {
+ // 0,0 masked destination
+ make_mb_mask(dyz + j, duz + j / 2, dvz + j / 2, dym, duvm, dyp, duvp, sgm, count);
+
+ e = compare_masks(ym, dym);
+
+ if (e < beste) {
+ bmi = i;
+ bmj = j;
+ beste = e;
+ }
+ }
+ }
+
+
+ // best mv masked destination
+ make_mb_mask(dy + bmi * dyp + bmj, du + bmi / 2 * duvp + bmj / 2, dv + bmi / 2 * duvp + bmj / 2,
+ dym, duvm, dyp, duvp, sgm, count);
+
+ obeste = masked_sad(y, yp, dy + bmi * dyp + bmj, dyp, dym);
+
+ beste = 0xffffffff;
+
+ // find best unmasked mv
+ for (i = -32; i < 32; i++) {
+ unsigned char *dyz = i * dyp + dy;
+ for (j = -32; j < 32; j++) {
+ e = unmasked_sad(y, yp, dyz + j, dyp, dym);
+
+ if (e < beste) {
+ bui = i;
+ buj = j;
+ beste = e;
+ }
+ }
+ }
+ beste += obeste;
+
+
+ if (beste < beste1) {
+ *mi = bmi;
+ *mj = bmj;
+ *ui = bui;
+ *uj = buj;
+ *wm = 1;
+ } else {
+ *mi = bmi1;
+ *mj = bmj1;
+ *ui = bui1;
+ *uj = buj1;
+ *wm = 0;
+
+ }
+ return 0;
+}
+
+int predict(unsigned char *src, int p, unsigned char *dst, int dp,
+ unsigned char *ym, unsigned char *prd) {
+ int i, j;
+ for (i = 0; i < 16; i++, src += p, dst += dp, ym += 16, prd += 16)
+ for (j = 0; j < 16; j++)
+ prd[j] = (ym[j] ? src[j] : dst[j]);
+ return 0;
+}
+
+int fast_masked_motion_search(unsigned char *y, unsigned char *u, unsigned char *v,
+ int yp, int uvp,
+ unsigned char *dy, unsigned char *du, unsigned char *dv,
+ int dyp, int duvp,
+ COLOR_SEG_ELEMENT sgm[],
+ int count,
+ int *mi,
+ int *mj,
+ int *ui,
+ int *uj,
+ int *wm) {
+ int i, j;
+
+ unsigned char ym[256];
+ unsigned char ym2[256];
+ unsigned char uvm[64];
+ unsigned char dym2[256];
+ unsigned char dym[256];
+ unsigned char duvm[64];
+ unsigned int e = 0;
+ int beste = 256;
+ int bmi = -32, bmj = -32;
+ int bui = -32, buj = -32;
+ int beste1 = 256;
+ int bmi1 = -32, bmj1 = -32;
+ int bui1 = -32, buj1 = -32;
+ int obeste;
+
+ // first try finding best mask and then unmasked
+ beste = 0xffffffff;
+
+#if 0
+ for (i = 0; i < 16; i++) {
+ unsigned char *dy = i * yp + y;
+ for (j = 0; j < 16; j++)
+ printf("%2x", dy[j]);
+ printf("\n");
+ }
+ printf("\n");
+
+ for (i = -32; i < 48; i++) {
+ unsigned char *dyz = i * dyp + dy;
+ for (j = -32; j < 48; j++)
+ printf("%2x", dyz[j]);
+ printf("\n");
+ }
+#endif
+
+ // find best unmasked mv
+ for (i = -32; i < 32; i++) {
+ unsigned char *dyz = i * dyp + dy;
+ unsigned char *duz = i / 2 * duvp + du;
+ unsigned char *dvz = i / 2 * duvp + dv;
+ for (j = -32; j < 32; j++) {
+ // 0,0 masked destination
+ vp8_makemask_sse3(dyz + j, duz + j / 2, dvz + j / 2, dym, dyp, duvp,
+ sgm[0].y, sgm[0].u, sgm[0].v,
+ sgm[0].yt, sgm[0].ut, sgm[0].vt);
+
+ vp8_growmaskmb_sse3(dym, dym2);
+
+ e = vp9_sad16x16_unmasked_wmt(y, yp, dyz + j, dyp, dym2);
+
+ if (e < beste) {
+ bui = i;
+ buj = j;
+ beste = e;
+ }
+ }
+ }
+ // bui=0;buj=0;
+ // best mv masked destination
+
+ vp8_makemask_sse3(dy + bui * dyp + buj, du + bui / 2 * duvp + buj / 2, dv + bui / 2 * duvp + buj / 2,
+ dym, dyp, duvp,
+ sgm[0].y, sgm[0].u, sgm[0].v,
+ sgm[0].yt, sgm[0].ut, sgm[0].vt);
+
+ vp8_growmaskmb_sse3(dym, dym2);
+
+ obeste = beste;
+ beste = 0xffffffff;
+
+ // find best masked
+ for (i = -32; i < 32; i++) {
+ unsigned char *dyz = i * dyp + dy;
+ for (j = -32; j < 32; j++) {
+ e = vp9_sad16x16_masked_wmt(y, yp, dyz + j, dyp, dym2);
+ if (e < beste) {
+ bmi = i;
+ bmj = j;
+ beste = e;
+ }
+ }
+ }
+ beste1 = beste + obeste;
+ bmi1 = bmi;
+ bmj1 = bmj;
+ bui1 = bui;
+ buj1 = buj;
+
+ // source mask
+ vp8_makemask_sse3(y, u, v,
+ ym, yp, uvp,
+ sgm[0].y, sgm[0].u, sgm[0].v,
+ sgm[0].yt, sgm[0].ut, sgm[0].vt);
+
+ vp8_growmaskmb_sse3(ym, ym2);
+
+ // find best mask
+ for (i = -32; i < 32; i++) {
+ unsigned char *dyz = i * dyp + dy;
+ unsigned char *duz = i / 2 * duvp + du;
+ unsigned char *dvz = i / 2 * duvp + dv;
+ for (j = -32; j < 32; j++) {
+ // 0,0 masked destination
+ vp8_makemask_sse3(dyz + j, duz + j / 2, dvz + j / 2, dym, dyp, duvp,
+ sgm[0].y, sgm[0].u, sgm[0].v,
+ sgm[0].yt, sgm[0].ut, sgm[0].vt);
+
+ vp8_growmaskmb_sse3(dym, dym2);
+
+ e = compare_masks(ym2, dym2);
+
+ if (e < beste) {
+ bmi = i;
+ bmj = j;
+ beste = e;
+ }
+ }
+ }
+
+ vp8_makemask_sse3(dy + bmi * dyp + bmj, du + bmi / 2 * duvp + bmj / 2, dv + bmi / 2 * duvp + bmj / 2,
+ dym, dyp, duvp,
+ sgm[0].y, sgm[0].u, sgm[0].v,
+ sgm[0].yt, sgm[0].ut, sgm[0].vt);
+
+ vp8_growmaskmb_sse3(dym, dym2);
+
+ obeste = vp9_sad16x16_masked_wmt(y, yp, dy + bmi * dyp + bmj, dyp, dym2);
+
+ beste = 0xffffffff;
+
+ // find best unmasked mv
+ for (i = -32; i < 32; i++) {
+ unsigned char *dyz = i * dyp + dy;
+ for (j = -32; j < 32; j++) {
+ e = vp9_sad16x16_unmasked_wmt(y, yp, dyz + j, dyp, dym2);
+
+ if (e < beste) {
+ bui = i;
+ buj = j;
+ beste = e;
+ }
+ }
+ }
+ beste += obeste;
+
+ if (beste < beste1) {
+ *mi = bmi;
+ *mj = bmj;
+ *ui = bui;
+ *uj = buj;
+ *wm = 1;
+ } else {
+ *mi = bmi1;
+ *mj = bmj1;
+ *ui = bui1;
+ *uj = buj1;
+ *wm = 0;
+ beste = beste1;
+
+ }
+ return beste;
+}
+
+int predict_all(unsigned char *ym, unsigned char *um, unsigned char *vm,
+ int ymp, int uvmp,
+ unsigned char *yp, unsigned char *up, unsigned char *vp,
+ int ypp, int uvpp,
+ COLOR_SEG_ELEMENT sgm[],
+ int count,
+ int mi,
+ int mj,
+ int ui,
+ int uj,
+ int wm) {
+ int i, j;
+ unsigned char dym[256];
+ unsigned char dym2[256];
+ unsigned char duvm[64];
+ unsigned char *yu = ym, *uu = um, *vu = vm;
+
+ unsigned char *dym3 = dym2;
+
+ ym += mi * ymp + mj;
+ um += mi / 2 * uvmp + mj / 2;
+ vm += mi / 2 * uvmp + mj / 2;
+
+ yu += ui * ymp + uj;
+ uu += ui / 2 * uvmp + uj / 2;
+ vu += ui / 2 * uvmp + uj / 2;
+
+ // best mv masked destination
+ if (wm)
+ vp8_makemask_sse3(ym, um, vm, dym, ymp, uvmp,
+ sgm[0].y, sgm[0].u, sgm[0].v,
+ sgm[0].yt, sgm[0].ut, sgm[0].vt);
+ else
+ vp8_makemask_sse3(yu, uu, vu, dym, ymp, uvmp,
+ sgm[0].y, sgm[0].u, sgm[0].v,
+ sgm[0].yt, sgm[0].ut, sgm[0].vt);
+
+ vp8_growmaskmb_sse3(dym, dym2);
+ vp8_masked_predictor_wmt(ym, yu, ymp, yp, ypp, dym3);
+ vp8_uv_from_y_mask(dym3, duvm);
+ vp8_masked_predictor_uv_wmt(um, uu, uvmp, up, uvpp, duvm);
+ vp8_masked_predictor_uv_wmt(vm, vu, uvmp, vp, uvpp, duvm);
+
+ return 0;
+}
+
+unsigned char f0p[1280 * 720 * 3 / 2];
+unsigned char f1p[1280 * 720 * 3 / 2];
+unsigned char prd[1280 * 720 * 3 / 2];
+unsigned char msk[1280 * 720 * 3 / 2];
+
+
+int mainz(int argc, char *argv[]) {
+
+ FILE *f = fopen(argv[1], "rb");
+ FILE *g = fopen(argv[2], "wb");
+ int w = atoi(argv[3]), h = atoi(argv[4]);
+ int y_stride = w, uv_stride = w / 2;
+ int r, c;
+ unsigned char *f0 = f0p, *f1 = f1p, *t;
+ unsigned char ym[256], uvm[64];
+ unsigned char ym2[256], uvm2[64];
+ unsigned char ym3[256], uvm3[64];
+ int a, b;
+
+ COLOR_SEG_ELEMENT last = { 20, 20, 20, 20, 230, 20, 1}, best;
+#if 0
+ makeneighbors();
+ COLOR_SEG_ELEMENT segmentation[] = {
+ { 60, 4, 80, 17, 80, 10, 1},
+ { 40, 4, 15, 10, 80, 10, 1},
+ };
+ make_mb_mask(y, u, v, ym2, uvm2, 16, 8, segmentation, 1);
+
+ vp8_makemask_sse3(y, u, v, ym, (int) 16, (int) 8,
+ (int) segmentation[0].y, (int) segmentation[0].u, (int) segmentation[0].v,
+ segmentation[0].yt, segmentation[0].ut, segmentation[0].vt);
+
+ vp8_growmaskmb_sse3(ym, ym3);
+
+ a = vp9_sad16x16_masked_wmt(str, 16, sts, 16, ym3);
+ b = vp9_sad16x16_unmasked_wmt(str, 16, sts, 16, ym3);
+
+ vp8_masked_predictor_wmt(str, sts, 16, ym, 16, ym3);
+
+ vp8_uv_from_y_mask(ym3, uvm3);
+
+ return 4;
+#endif
+ makeneighbors();
+
+
+ memset(prd, 128, w * h * 3 / 2);
+
+ fread(f0, w * h * 3 / 2, 1, f);
+
+ while (!feof(f)) {
+ unsigned char *ys = f1, *yd = f0, *yp = prd;
+ unsigned char *us = f1 + w * h, *ud = f0 + w * h, *up = prd + w * h;
+ unsigned char *vs = f1 + w * h * 5 / 4, *vd = f0 + w * h * 5 / 4, *vp = prd + w * h * 5 / 4;
+ fread(f1, w * h * 3 / 2, 1, f);
+
+ ys += 32 * y_stride;
+ yd += 32 * y_stride;
+ yp += 32 * y_stride;
+ us += 16 * uv_stride;
+ ud += 16 * uv_stride;
+ up += 16 * uv_stride;
+ vs += 16 * uv_stride;
+ vd += 16 * uv_stride;
+ vp += 16 * uv_stride;
+ for (r = 32; r < h - 32; r += 16,
+ ys += 16 * w, yd += 16 * w, yp += 16 * w,
+ us += 8 * uv_stride, ud += 8 * uv_stride, up += 8 * uv_stride,
+ vs += 8 * uv_stride, vd += 8 * uv_stride, vp += 8 * uv_stride) {
+ for (c = 32; c < w - 32; c += 16) {
+ int mi, mj, ui, uj, wm;
+ int bmi, bmj, bui, buj, bwm;
+ unsigned char ym[256];
+
+ if (vp9_sad16x16_sse3(ys + c, y_stride, yd + c, y_stride, 0xffff) == 0)
+ bmi = bmj = bui = buj = bwm = 0;
+ else {
+ COLOR_SEG_ELEMENT cs[5];
+ int j;
+ unsigned int beste = 0xfffffff;
+ unsigned int bestj = 0;
+
+ // try color from last mb segmentation
+ cs[0] = last;
+
+ // try color segs from 4 pixels in mb recon as segmentation
+ cs[1].y = yd[c + y_stride + 1];
+ cs[1].u = ud[c / 2 + uv_stride];
+ cs[1].v = vd[c / 2 + uv_stride];
+ cs[1].yt = cs[1].ut = cs[1].vt = 20;
+ cs[2].y = yd[c + w + 14];
+ cs[2].u = ud[c / 2 + uv_stride + 7];
+ cs[2].v = vd[c / 2 + uv_stride + 7];
+ cs[2].yt = cs[2].ut = cs[2].vt = 20;
+ cs[3].y = yd[c + w * 14 + 1];
+ cs[3].u = ud[c / 2 + uv_stride * 7];
+ cs[3].v = vd[c / 2 + uv_stride * 7];
+ cs[3].yt = cs[3].ut = cs[3].vt = 20;
+ cs[4].y = yd[c + w * 14 + 14];
+ cs[4].u = ud[c / 2 + uv_stride * 7 + 7];
+ cs[4].v = vd[c / 2 + uv_stride * 7 + 7];
+ cs[4].yt = cs[4].ut = cs[4].vt = 20;
+
+ for (j = 0; j < 5; j++) {
+ int e;
+
+ e = fast_masked_motion_search(
+ ys + c, us + c / 2, vs + c / 2, y_stride, uv_stride,
+ yd + c, ud + c / 2, vd + c / 2, y_stride, uv_stride,
+ &cs[j], 1, &mi, &mj, &ui, &uj, &wm);
+
+ if (e < beste) {
+ bmi = mi;
+ bmj = mj;
+ bui = ui;
+ buj = uj, bwm = wm;
+ bestj = j;
+ beste = e;
+ }
+ }
+ best = cs[bestj];
+ // best = segmentation[0];
+ last = best;
+ }
+ predict_all(yd + c, ud + c / 2, vd + c / 2, w, uv_stride,
+ yp + c, up + c / 2, vp + c / 2, w, uv_stride,
+ &best, 1, bmi, bmj, bui, buj, bwm);
+
+ }
+ }
+ fwrite(prd, w * h * 3 / 2, 1, g);
+ t = f0;
+ f0 = f1;
+ f1 = t;
+
+ }
+ fclose(f);
+ fclose(g);
+ return 0;
+}
diff --git a/libvpx/vp9/common/vp9_mbpitch.c b/libvpx/vp9/common/vp9_mbpitch.c
new file mode 100644
index 0000000..3cf37ff
--- /dev/null
+++ b/libvpx/vp9/common/vp9_mbpitch.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vp9/common/vp9_blockd.h"
+
+void vp9_setup_block_dptrs(MACROBLOCKD *mb,
+ int subsampling_x, int subsampling_y) {
+ int i;
+
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ mb->plane[i].plane_type = i ? PLANE_TYPE_UV : PLANE_TYPE_Y_WITH_DC;
+ mb->plane[i].subsampling_x = i ? subsampling_x : 0;
+ mb->plane[i].subsampling_y = i ? subsampling_y : 0;
+ }
+#if CONFIG_ALPHA
+ // TODO(jkoleszar): Using the Y w/h for now
+ mb->plane[3].subsampling_x = 0;
+ mb->plane[3].subsampling_y = 0;
+#endif
+}
diff --git a/libvpx/vp9/common/vp9_modecont.c b/libvpx/vp9/common/vp9_modecont.c
new file mode 100644
index 0000000..5d92cfa
--- /dev/null
+++ b/libvpx/vp9/common/vp9_modecont.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vp9/common/vp9_modecont.h"
+
+const vp9_prob vp9_default_inter_mode_probs[INTER_MODE_CONTEXTS]
+ [VP9_INTER_MODES - 1] = {
+ {2, 173, 34}, // 0 = both zero mv
+ {7, 145, 85}, // 1 = one zero mv + one a predicted mv
+ {7, 166, 63}, // 2 = two predicted mvs
+ {7, 94, 66}, // 3 = one predicted/zero and one new mv
+ {8, 64, 46}, // 4 = two new mvs
+ {17, 81, 31}, // 5 = one intra neighbour + x
+ {25, 29, 30}, // 6 = two intra neighbours
+};
diff --git a/libvpx/vp9/common/vp9_modecont.h b/libvpx/vp9/common/vp9_modecont.h
new file mode 100644
index 0000000..3ec6079
--- /dev/null
+++ b/libvpx/vp9/common/vp9_modecont.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_MODECONT_H_
+#define VP9_COMMON_VP9_MODECONT_H_
+
+#include "vp9/common/vp9_entropy.h"
+
+extern const vp9_prob vp9_default_inter_mode_probs[INTER_MODE_CONTEXTS]
+ [VP9_INTER_MODES - 1];
+
+#endif // VP9_COMMON_VP9_MODECONT_H_
diff --git a/libvpx/vp9/common/vp9_modecontext.c b/libvpx/vp9/common/vp9_modecontext.c
new file mode 100644
index 0000000..a79ab2a
--- /dev/null
+++ b/libvpx/vp9/common/vp9_modecontext.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vp9/common/vp9_entropymode.h"
+
+const vp9_prob vp9_kf_default_bmode_probs[VP9_INTRA_MODES]
+ [VP9_INTRA_MODES]
+ [VP9_INTRA_MODES - 1] = {
+ { /* above = dc */
+ { 137, 30, 42, 148, 151, 207, 70, 52, 91 } /* left = dc */,
+ { 92, 45, 102, 136, 116, 180, 74, 90, 100 } /* left = v */,
+ { 73, 32, 19, 187, 222, 215, 46, 34, 100 } /* left = h */,
+ { 91, 30, 32, 116, 121, 186, 93, 86, 94 } /* left = d45 */,
+ { 72, 35, 36, 149, 68, 206, 68, 63, 105 } /* left = d135 */,
+ { 73, 31, 28, 138, 57, 124, 55, 122, 151 } /* left = d117 */,
+ { 67, 23, 21, 140, 126, 197, 40, 37, 171 } /* left = d153 */,
+ { 86, 27, 28, 128, 154, 212, 45, 43, 53 } /* left = d27 */,
+ { 74, 32, 27, 107, 86, 160, 63, 134, 102 } /* left = d63 */,
+ { 59, 67, 44, 140, 161, 202, 78, 67, 119 } /* left = tm */
+ }, { /* above = v */
+ { 63, 36, 126, 146, 123, 158, 60, 90, 96 } /* left = dc */,
+ { 43, 46, 168, 134, 107, 128, 69, 142, 92 } /* left = v */,
+ { 44, 29, 68, 159, 201, 177, 50, 57, 77 } /* left = h */,
+ { 58, 38, 76, 114, 97, 172, 78, 133, 92 } /* left = d45 */,
+ { 46, 41, 76, 140, 63, 184, 69, 112, 57 } /* left = d135 */,
+ { 38, 32, 85, 140, 46, 112, 54, 151, 133 } /* left = d117 */,
+ { 39, 27, 61, 131, 110, 175, 44, 75, 136 } /* left = d153 */,
+ { 52, 30, 74, 113, 130, 175, 51, 64, 58 } /* left = d27 */,
+ { 47, 35, 80, 100, 74, 143, 64, 163, 74 } /* left = d63 */,
+ { 36, 61, 116, 114, 128, 162, 80, 125, 82 } /* left = tm */
+ }, { /* above = h */
+ { 82, 26, 26, 171, 208, 204, 44, 32, 105 } /* left = dc */,
+ { 55, 44, 68, 166, 179, 192, 57, 57, 108 } /* left = v */,
+ { 42, 26, 11, 199, 241, 228, 23, 15, 85 } /* left = h */,
+ { 68, 42, 19, 131, 160, 199, 55, 52, 83 } /* left = d45 */,
+ { 58, 50, 25, 139, 115, 232, 39, 52, 118 } /* left = d135 */,
+ { 50, 35, 33, 153, 104, 162, 64, 59, 131 } /* left = d117 */,
+ { 44, 24, 16, 150, 177, 202, 33, 19, 156 } /* left = d153 */,
+ { 55, 27, 12, 153, 203, 218, 26, 27, 49 } /* left = d27 */,
+ { 53, 49, 21, 110, 116, 168, 59, 80, 76 } /* left = d63 */,
+ { 38, 72, 19, 168, 203, 212, 50, 50, 107 } /* left = tm */
+ }, { /* above = d45 */
+ { 103, 26, 36, 129, 132, 201, 83, 80, 93 } /* left = dc */,
+ { 59, 38, 83, 112, 103, 162, 98, 136, 90 } /* left = v */,
+ { 62, 30, 23, 158, 200, 207, 59, 57, 50 } /* left = h */,
+ { 67, 30, 29, 84, 86, 191, 102, 91, 59 } /* left = d45 */,
+ { 60, 32, 33, 112, 71, 220, 64, 89, 104 } /* left = d135 */,
+ { 53, 26, 34, 130, 56, 149, 84, 120, 103 } /* left = d117 */,
+ { 53, 21, 23, 133, 109, 210, 56, 77, 172 } /* left = d153 */,
+ { 77, 19, 29, 112, 142, 228, 55, 66, 36 } /* left = d27 */,
+ { 61, 29, 29, 93, 97, 165, 83, 175, 162 } /* left = d63 */,
+ { 47, 47, 43, 114, 137, 181, 100, 99, 95 } /* left = tm */
+ }, { /* above = d135 */
+ { 69, 23, 29, 128, 83, 199, 46, 44, 101 } /* left = dc */,
+ { 53, 40, 55, 139, 69, 183, 61, 80, 110 } /* left = v */,
+ { 40, 29, 19, 161, 180, 207, 43, 24, 91 } /* left = h */,
+ { 60, 34, 19, 105, 61, 198, 53, 64, 89 } /* left = d45 */,
+ { 52, 31, 22, 158, 40, 209, 58, 62, 89 } /* left = d135 */,
+ { 44, 31, 29, 147, 46, 158, 56, 102, 198 } /* left = d117 */,
+ { 35, 19, 12, 135, 87, 209, 41, 45, 167 } /* left = d153 */,
+ { 55, 25, 21, 118, 95, 215, 38, 39, 66 } /* left = d27 */,
+ { 51, 38, 25, 113, 58, 164, 70, 93, 97 } /* left = d63 */,
+ { 47, 54, 34, 146, 108, 203, 72, 103, 151 } /* left = tm */
+ }, { /* above = d117 */
+ { 64, 19, 37, 156, 66, 138, 49, 95, 133 } /* left = dc */,
+ { 46, 27, 80, 150, 55, 124, 55, 121, 135 } /* left = v */,
+ { 36, 23, 27, 165, 149, 166, 54, 64, 118 } /* left = h */,
+ { 53, 21, 36, 131, 63, 163, 60, 109, 81 } /* left = d45 */,
+ { 40, 26, 35, 154, 40, 185, 51, 97, 123 } /* left = d135 */,
+ { 35, 19, 34, 179, 19, 97, 48, 129, 124 } /* left = d117 */,
+ { 36, 20, 26, 136, 62, 164, 33, 77, 154 } /* left = d153 */,
+ { 45, 18, 32, 130, 90, 157, 40, 79, 91 } /* left = d27 */,
+ { 45, 26, 28, 129, 45, 129, 49, 147, 123 } /* left = d63 */,
+ { 38, 44, 51, 136, 74, 162, 57, 97, 121 } /* left = tm */
+ }, { /* above = d153 */
+ { 75, 17, 22, 136, 138, 185, 32, 34, 166 } /* left = dc */,
+ { 56, 39, 58, 133, 117, 173, 48, 53, 187 } /* left = v */,
+ { 35, 21, 12, 161, 212, 207, 20, 23, 145 } /* left = h */,
+ { 56, 29, 19, 117, 109, 181, 55, 68, 112 } /* left = d45 */,
+ { 47, 29, 17, 153, 64, 220, 59, 51, 114 } /* left = d135 */,
+ { 46, 16, 24, 136, 76, 147, 41, 64, 172 } /* left = d117 */,
+ { 34, 17, 11, 108, 152, 187, 13, 15, 209 } /* left = d153 */,
+ { 51, 24, 14, 115, 133, 209, 32, 26, 104 } /* left = d27 */,
+ { 55, 30, 18, 122, 79, 179, 44, 88, 116 } /* left = d63 */,
+ { 37, 49, 25, 129, 168, 164, 41, 54, 148 } /* left = tm */
+ }, { /* above = d27 */
+ { 82, 22, 32, 127, 143, 213, 39, 41, 70 } /* left = dc */,
+ { 62, 44, 61, 123, 105, 189, 48, 57, 64 } /* left = v */,
+ { 47, 25, 17, 175, 222, 220, 24, 30, 86 } /* left = h */,
+ { 68, 36, 17, 106, 102, 206, 59, 74, 74 } /* left = d45 */,
+ { 57, 39, 23, 151, 68, 216, 55, 63, 58 } /* left = d135 */,
+ { 49, 30, 35, 141, 70, 168, 82, 40, 115 } /* left = d117 */,
+ { 51, 25, 15, 136, 129, 202, 38, 35, 139 } /* left = d153 */,
+ { 68, 26, 16, 111, 141, 215, 29, 28, 28 } /* left = d27 */,
+ { 59, 39, 19, 114, 75, 180, 77, 104, 42 } /* left = d63 */,
+ { 40, 61, 26, 126, 152, 206, 61, 59, 93 } /* left = tm */
+ }, { /* above = d63 */
+ { 78, 23, 39, 111, 117, 170, 74, 124, 94 } /* left = dc */,
+ { 48, 34, 86, 101, 92, 146, 78, 179, 134 } /* left = v */,
+ { 47, 22, 24, 138, 187, 178, 68, 69, 59 } /* left = h */,
+ { 56, 25, 33, 105, 112, 187, 95, 177, 129 } /* left = d45 */,
+ { 48, 31, 27, 114, 63, 183, 82, 116, 56 } /* left = d135 */,
+ { 43, 28, 37, 121, 63, 123, 61, 192, 169 } /* left = d117 */,
+ { 42, 17, 24, 109, 97, 177, 56, 76, 122 } /* left = d153 */,
+ { 58, 18, 28, 105, 139, 182, 70, 92, 63 } /* left = d27 */,
+ { 46, 23, 32, 74, 86, 150, 67, 183, 88 } /* left = d63 */,
+ { 36, 38, 48, 92, 122, 165, 88, 137, 91 } /* left = tm */
+ }, { /* above = tm */
+ { 65, 70, 60, 155, 159, 199, 61, 60, 81 } /* left = dc */,
+ { 44, 78, 115, 132, 119, 173, 71, 112, 93 } /* left = v */,
+ { 39, 38, 21, 184, 227, 206, 42, 32, 64 } /* left = h */,
+ { 58, 47, 36, 124, 137, 193, 80, 82, 78 } /* left = d45 */,
+ { 49, 50, 35, 144, 95, 205, 63, 78, 59 } /* left = d135 */,
+ { 41, 53, 52, 148, 71, 142, 65, 128, 51 } /* left = d117 */,
+ { 40, 36, 28, 143, 143, 202, 40, 55, 137 } /* left = d153 */,
+ { 52, 34, 29, 129, 183, 227, 42, 35, 43 } /* left = d27 */,
+ { 42, 44, 44, 104, 105, 164, 64, 130, 80 } /* left = d63 */,
+ { 43, 81, 53, 140, 169, 204, 68, 84, 72 } /* left = tm */
+ }
+};
diff --git a/libvpx/vp9/common/vp9_mv.h b/libvpx/vp9/common/vp9_mv.h
new file mode 100644
index 0000000..a1eef46
--- /dev/null
+++ b/libvpx/vp9/common/vp9_mv.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_MV_H_
+#define VP9_COMMON_VP9_MV_H_
+
+#include "vpx/vpx_integer.h"
+
+typedef struct {
+ int16_t row;
+ int16_t col;
+} MV;
+
+typedef union int_mv {
+ uint32_t as_int;
+ MV as_mv;
+} int_mv; /* facilitates faster equality tests and copies */
+
+struct mv32 {
+ int32_t row;
+ int32_t col;
+};
+
+typedef union int_mv32 {
+ uint64_t as_int;
+ struct mv32 as_mv;
+} int_mv32; /* facilitates faster equality tests and copies */
+
+#endif // VP9_COMMON_VP9_MV_H_
diff --git a/libvpx/vp9/common/vp9_mvref_common.c b/libvpx/vp9/common/vp9_mvref_common.c
new file mode 100644
index 0000000..78fb2f0
--- /dev/null
+++ b/libvpx/vp9/common/vp9_mvref_common.c
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp9/common/vp9_mvref_common.h"
+
+#define MVREF_NEIGHBOURS 8
+static int mv_ref_blocks[BLOCK_SIZE_TYPES][MVREF_NEIGHBOURS][2] = {
+ // SB4X4
+ {{0, -1}, {-1, 0}, {-1, -1}, {0, -2}, {-2, 0}, {-1, -2}, {-2, -1}, {-2, -2}},
+ // SB4X8
+ {{0, -1}, {-1, 0}, {-1, -1}, {0, -2}, {-2, 0}, {-1, -2}, {-2, -1}, {-2, -2}},
+ // SB8X4
+ {{0, -1}, {-1, 0}, {-1, -1}, {0, -2}, {-2, 0}, {-1, -2}, {-2, -1}, {-2, -2}},
+ // SB8X8
+ {{0, -1}, {-1, 0}, {-1, -1}, {0, -2}, {-2, 0}, {-1, -2}, {-2, -1}, {-2, -2}},
+ // SB8X16
+ {{-1, 0}, {0, -1}, {-1, 1}, {-1, -1}, {-2, 0}, {0, -2}, {-1, -2}, {-2, -1}},
+ // SB16X8
+ {{0, -1}, {-1, 0}, {1, -1}, {-1, -1}, {0, -2}, {-2, 0}, {-2, -1}, {-1, -2}},
+ // SB16X16
+ {{0, -1}, {-1, 0}, {1, -1}, {-1, 1}, {-1, -1}, {0, -3}, {-3, 0}, {-3, -3}},
+ // SB16X32
+ {{-1, 0}, {0, -1}, {-1, 2}, {-1, -1}, {1, -1}, {-3, 0}, {0, -3}, {-3, -3}},
+ // SB32X16
+ {{0, -1}, {-1, 0}, {2, -1}, {-1, -1}, {-1, 1}, {0, -3}, {-3, 0}, {-3, -3}},
+ // SB32X32
+ {{1, -1}, {-1, 1}, {2, -1}, {-1, 2}, {-1, -1}, {0, -3}, {-3, 0}, {-3, -3}},
+ // SB32X64
+ {{-1, 0}, {0, -1}, {-1, 4}, {2, -1}, {-1, -1}, {-3, 0}, {0, -3}, {-1, 2}},
+ // SB64X32
+ {{0, -1}, {-1, 0}, {4, -1}, {-1, 2}, {-1, -1}, {0, -3}, {-3, 0}, {2, -1}},
+ // SB64X64
+ {{3, -1}, {-1, 3}, {4, -1}, {-1, 4}, {-1, -1}, {0, -1}, {-1, 0}, {6, -1}}
+};
+// clamp_mv_ref
+#define MV_BORDER (16 << 3) // Allow 16 pels in 1/8th pel units
+
+static void clamp_mv_ref(const MACROBLOCKD *xd, int_mv *mv) {
+ mv->as_mv.col = clamp(mv->as_mv.col, xd->mb_to_left_edge - MV_BORDER,
+ xd->mb_to_right_edge + MV_BORDER);
+ mv->as_mv.row = clamp(mv->as_mv.row, xd->mb_to_top_edge - MV_BORDER,
+ xd->mb_to_bottom_edge + MV_BORDER);
+}
+
+// Gets a candidate reference motion vector from the given mode info
+// structure if one exists that matches the given reference frame.
+static int get_matching_candidate(const MODE_INFO *candidate_mi,
+ MV_REFERENCE_FRAME ref_frame,
+ int_mv *c_mv, int block_idx) {
+ if (ref_frame == candidate_mi->mbmi.ref_frame[0]) {
+ if (block_idx >= 0 && candidate_mi->mbmi.sb_type < BLOCK_SIZE_SB8X8)
+ c_mv->as_int = candidate_mi->bmi[block_idx].as_mv[0].as_int;
+ else
+ c_mv->as_int = candidate_mi->mbmi.mv[0].as_int;
+ } else if (ref_frame == candidate_mi->mbmi.ref_frame[1]) {
+ if (block_idx >= 0 && candidate_mi->mbmi.sb_type < BLOCK_SIZE_SB8X8)
+ c_mv->as_int = candidate_mi->bmi[block_idx].as_mv[1].as_int;
+ else
+ c_mv->as_int = candidate_mi->mbmi.mv[1].as_int;
+ } else {
+ return 0;
+ }
+
+ return 1;
+}
+
+// Gets candidate reference motion vector(s) from the given mode info
+// structure if they exists and do NOT match the given reference frame.
+static void get_non_matching_candidates(const MODE_INFO *candidate_mi,
+ MV_REFERENCE_FRAME ref_frame,
+ MV_REFERENCE_FRAME *c_ref_frame,
+ int_mv *c_mv,
+ MV_REFERENCE_FRAME *c2_ref_frame,
+ int_mv *c2_mv) {
+
+ c_mv->as_int = 0;
+ c2_mv->as_int = 0;
+ *c_ref_frame = INTRA_FRAME;
+ *c2_ref_frame = INTRA_FRAME;
+
+ // If first candidate not valid neither will be.
+ if (candidate_mi->mbmi.ref_frame[0] > INTRA_FRAME) {
+ // First candidate
+ if (candidate_mi->mbmi.ref_frame[0] != ref_frame) {
+ *c_ref_frame = candidate_mi->mbmi.ref_frame[0];
+ c_mv->as_int = candidate_mi->mbmi.mv[0].as_int;
+ }
+
+ // Second candidate
+ if ((candidate_mi->mbmi.ref_frame[1] > INTRA_FRAME) &&
+ (candidate_mi->mbmi.ref_frame[1] != ref_frame) &&
+ (candidate_mi->mbmi.mv[1].as_int != candidate_mi->mbmi.mv[0].as_int)) {
+ *c2_ref_frame = candidate_mi->mbmi.ref_frame[1];
+ c2_mv->as_int = candidate_mi->mbmi.mv[1].as_int;
+ }
+ }
+}
+
+
+// Performs mv sign inversion if indicated by the reference frame combination.
+static void scale_mv(MACROBLOCKD *xd, MV_REFERENCE_FRAME this_ref_frame,
+ MV_REFERENCE_FRAME candidate_ref_frame,
+ int_mv *candidate_mv, int *ref_sign_bias) {
+
+ // Sign inversion where appropriate.
+ if (ref_sign_bias[candidate_ref_frame] != ref_sign_bias[this_ref_frame]) {
+ candidate_mv->as_mv.row = -candidate_mv->as_mv.row;
+ candidate_mv->as_mv.col = -candidate_mv->as_mv.col;
+ }
+}
+
+// Add a candidate mv.
+// Discard if it has already been seen.
+static void add_candidate_mv(int_mv *mv_list, int *mv_scores,
+ int *candidate_count, int_mv candidate_mv,
+ int weight) {
+ if (*candidate_count == 0) {
+ mv_list[0].as_int = candidate_mv.as_int;
+ mv_scores[0] = weight;
+ *candidate_count += 1;
+ } else if ((*candidate_count == 1) &&
+ (candidate_mv.as_int != mv_list[0].as_int)) {
+ mv_list[1].as_int = candidate_mv.as_int;
+ mv_scores[1] = weight;
+ *candidate_count += 1;
+ }
+}
+
+// This function searches the neighbourhood of a given MB/SB
+// to try and find candidate reference vectors.
+//
+void vp9_find_mv_refs_idx(VP9_COMMON *cm, MACROBLOCKD *xd, MODE_INFO *here,
+ MODE_INFO *lf_here, MV_REFERENCE_FRAME ref_frame,
+ int_mv *mv_ref_list, int *ref_sign_bias,
+ int block_idx) {
+ int i;
+ MODE_INFO *candidate_mi;
+ MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
+ int_mv c_refmv;
+ int_mv c2_refmv;
+ MV_REFERENCE_FRAME c_ref_frame;
+ MV_REFERENCE_FRAME c2_ref_frame;
+ int candidate_scores[MAX_MV_REF_CANDIDATES];
+ int refmv_count = 0;
+ int split_count = 0;
+ int (*mv_ref_search)[2];
+ const int mi_col = get_mi_col(xd);
+ const int mi_row = get_mi_row(xd);
+ int intra_count = 0;
+ int zero_count = 0;
+ int newmv_count = 0;
+ int x_idx = 0, y_idx = 0;
+
+ // Blank the reference vector lists and other local structures.
+ vpx_memset(mv_ref_list, 0, sizeof(int_mv) * MAX_MV_REF_CANDIDATES);
+ vpx_memset(candidate_scores, 0, sizeof(candidate_scores));
+
+ mv_ref_search = mv_ref_blocks[mbmi->sb_type];
+ if (mbmi->sb_type < BLOCK_SIZE_SB8X8) {
+ x_idx = block_idx & 1;
+ y_idx = block_idx >> 1;
+ }
+
+ // We first scan for candidate vectors that match the current reference frame
+ // Look at nearest neigbours
+ for (i = 0; i < 2; ++i) {
+ const int mi_search_col = mi_col + mv_ref_search[i][0];
+ const int mi_search_row = mi_row + mv_ref_search[i][1];
+ if ((mi_search_col >= cm->cur_tile_mi_col_start) &&
+ (mi_search_col < cm->cur_tile_mi_col_end) &&
+ (mi_search_row >= 0) && (mi_search_row < cm->mi_rows)) {
+ int b;
+
+ candidate_mi = here + mv_ref_search[i][0] +
+ (mv_ref_search[i][1] * xd->mode_info_stride);
+
+ if (block_idx >= 0) {
+ if (mv_ref_search[i][0])
+ b = 1 + y_idx * 2;
+ else
+ b = 2 + x_idx;
+ } else {
+ b = -1;
+ }
+ if (get_matching_candidate(candidate_mi, ref_frame, &c_refmv, b)) {
+ add_candidate_mv(mv_ref_list, candidate_scores,
+ &refmv_count, c_refmv, 16);
+ }
+ split_count += (candidate_mi->mbmi.sb_type < BLOCK_SIZE_SB8X8 &&
+ candidate_mi->mbmi.ref_frame[0] != INTRA_FRAME);
+
+ // Count number of neihgbours coded intra and zeromv
+ intra_count += (candidate_mi->mbmi.mode < NEARESTMV);
+ zero_count += (candidate_mi->mbmi.mode == ZEROMV);
+ newmv_count += (candidate_mi->mbmi.mode >= NEWMV);
+ }
+ }
+
+ // More distant neigbours
+ for (i = 2; (i < MVREF_NEIGHBOURS) &&
+ (refmv_count < MAX_MV_REF_CANDIDATES); ++i) {
+ const int mi_search_col = mi_col + mv_ref_search[i][0];
+ const int mi_search_row = mi_row + mv_ref_search[i][1];
+ if ((mi_search_col >= cm->cur_tile_mi_col_start) &&
+ (mi_search_col < cm->cur_tile_mi_col_end) &&
+ (mi_search_row >= 0) && (mi_search_row < cm->mi_rows)) {
+ candidate_mi = here + mv_ref_search[i][0] +
+ (mv_ref_search[i][1] * xd->mode_info_stride);
+
+ if (get_matching_candidate(candidate_mi, ref_frame, &c_refmv, -1)) {
+ add_candidate_mv(mv_ref_list, candidate_scores,
+ &refmv_count, c_refmv, 16);
+ }
+ }
+ }
+
+ // Look in the last frame if it exists
+ if (lf_here && (refmv_count < MAX_MV_REF_CANDIDATES)) {
+ candidate_mi = lf_here;
+ if (get_matching_candidate(candidate_mi, ref_frame, &c_refmv, -1)) {
+ add_candidate_mv(mv_ref_list, candidate_scores,
+ &refmv_count, c_refmv, 16);
+ }
+ }
+
+ // If we have not found enough candidates consider ones where the
+ // reference frame does not match. Break out when we have
+ // MAX_MV_REF_CANDIDATES candidates.
+ // Look first at spatial neighbours
+ for (i = 0; (i < MVREF_NEIGHBOURS) &&
+ (refmv_count < MAX_MV_REF_CANDIDATES); ++i) {
+ const int mi_search_col = mi_col + mv_ref_search[i][0];
+ const int mi_search_row = mi_row + mv_ref_search[i][1];
+ if ((mi_search_col >= cm->cur_tile_mi_col_start) &&
+ (mi_search_col < cm->cur_tile_mi_col_end) &&
+ (mi_search_row >= 0) && (mi_search_row < cm->mi_rows)) {
+ candidate_mi = here + mv_ref_search[i][0] +
+ (mv_ref_search[i][1] * xd->mode_info_stride);
+
+ get_non_matching_candidates(candidate_mi, ref_frame,
+ &c_ref_frame, &c_refmv,
+ &c2_ref_frame, &c2_refmv);
+
+ if (c_ref_frame != INTRA_FRAME) {
+ scale_mv(xd, ref_frame, c_ref_frame, &c_refmv, ref_sign_bias);
+ add_candidate_mv(mv_ref_list, candidate_scores,
+ &refmv_count, c_refmv, 1);
+ }
+
+ if (c2_ref_frame != INTRA_FRAME) {
+ scale_mv(xd, ref_frame, c2_ref_frame, &c2_refmv, ref_sign_bias);
+ add_candidate_mv(mv_ref_list, candidate_scores,
+ &refmv_count, c2_refmv, 1);
+ }
+ }
+ }
+
+ // Look at the last frame if it exists
+ if (lf_here && (refmv_count < MAX_MV_REF_CANDIDATES)) {
+ candidate_mi = lf_here;
+ get_non_matching_candidates(candidate_mi, ref_frame,
+ &c_ref_frame, &c_refmv,
+ &c2_ref_frame, &c2_refmv);
+
+ if (c_ref_frame != INTRA_FRAME) {
+ scale_mv(xd, ref_frame, c_ref_frame, &c_refmv, ref_sign_bias);
+ add_candidate_mv(mv_ref_list, candidate_scores,
+ &refmv_count, c_refmv, 1);
+ }
+
+ if (c2_ref_frame != INTRA_FRAME) {
+ scale_mv(xd, ref_frame, c2_ref_frame, &c2_refmv, ref_sign_bias);
+ add_candidate_mv(mv_ref_list, candidate_scores,
+ &refmv_count, c2_refmv, 1);
+ }
+ }
+
+ if (!intra_count) {
+ if (!newmv_count) {
+ // 0 = both zero mv
+ // 1 = one zero mv + one a predicted mv
+ // 2 = two predicted mvs
+ mbmi->mb_mode_context[ref_frame] = 2 - zero_count;
+ } else {
+ // 3 = one predicted/zero and one new mv
+ // 4 = two new mvs
+ mbmi->mb_mode_context[ref_frame] = 2 + newmv_count;
+ }
+ } else {
+ // 5 = one intra neighbour + x
+ // 6 = two intra neighbours
+ mbmi->mb_mode_context[ref_frame] = 4 + intra_count;
+ }
+
+ // Clamp vectors
+ for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i) {
+ clamp_mv_ref(xd, &mv_ref_list[i]);
+ }
+}
diff --git a/libvpx/vp9/common/vp9_mvref_common.h b/libvpx/vp9/common/vp9_mvref_common.h
new file mode 100644
index 0000000..7290f10
--- /dev/null
+++ b/libvpx/vp9/common/vp9_mvref_common.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/common/vp9_blockd.h"
+
+#ifndef VP9_COMMON_VP9_MVREF_COMMON_H_
+#define VP9_COMMON_VP9_MVREF_COMMON_H_
+
+void vp9_find_mv_refs_idx(VP9_COMMON *cm,
+ MACROBLOCKD *xd,
+ MODE_INFO *here,
+ MODE_INFO *lf_here,
+ MV_REFERENCE_FRAME ref_frame,
+ int_mv *mv_ref_list,
+ int *ref_sign_bias,
+ int block_idx);
+
+static INLINE void vp9_find_mv_refs(VP9_COMMON *cm,
+ MACROBLOCKD *xd,
+ MODE_INFO *here,
+ MODE_INFO *lf_here,
+ MV_REFERENCE_FRAME ref_frame,
+ int_mv *mv_ref_list,
+ int *ref_sign_bias) {
+ vp9_find_mv_refs_idx(cm, xd, here, lf_here, ref_frame,
+ mv_ref_list, ref_sign_bias, -1);
+}
+
+#endif // VP9_COMMON_VP9_MVREF_COMMON_H_
diff --git a/libvpx/vp9/common/vp9_onyx.h b/libvpx/vp9/common/vp9_onyx.h
new file mode 100644
index 0000000..b85b889
--- /dev/null
+++ b/libvpx/vp9/common/vp9_onyx.h
@@ -0,0 +1,247 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_ONYX_H_
+#define VP9_COMMON_VP9_ONYX_H_
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include "./vpx_config.h"
+#include "vpx/internal/vpx_codec_internal.h"
+#include "vpx/vp8cx.h"
+#include "vpx_scale/yv12config.h"
+#include "vp9/common/vp9_ppflags.h"
+
+#define MAX_MB_SEGMENTS 8
+
+ typedef int *VP9_PTR;
+
+ /* Create/destroy static data structures. */
+
+ typedef enum {
+ NORMAL = 0,
+ FOURFIVE = 1,
+ THREEFIVE = 2,
+ ONETWO = 3
+
+ } VPX_SCALING;
+
+ typedef enum {
+ VP9_LAST_FLAG = 1,
+ VP9_GOLD_FLAG = 2,
+ VP9_ALT_FLAG = 4
+ } VP9_REFFRAME;
+
+
+ typedef enum {
+ USAGE_STREAM_FROM_SERVER = 0x0,
+ USAGE_LOCAL_FILE_PLAYBACK = 0x1,
+ USAGE_CONSTRAINED_QUALITY = 0x2
+ } END_USAGE;
+
+
+ typedef enum {
+ MODE_GOODQUALITY = 0x1,
+ MODE_BESTQUALITY = 0x2,
+ MODE_FIRSTPASS = 0x3,
+ MODE_SECONDPASS = 0x4,
+ MODE_SECONDPASS_BEST = 0x5,
+ } MODE;
+
+ typedef enum {
+ FRAMEFLAGS_KEY = 1,
+ FRAMEFLAGS_GOLDEN = 2,
+ FRAMEFLAGS_ALTREF = 4,
+ } FRAMETYPE_FLAGS;
+
+
+#include <assert.h>
+ static INLINE void Scale2Ratio(int mode, int *hr, int *hs) {
+ switch (mode) {
+ case NORMAL:
+ *hr = 1;
+ *hs = 1;
+ break;
+ case FOURFIVE:
+ *hr = 4;
+ *hs = 5;
+ break;
+ case THREEFIVE:
+ *hr = 3;
+ *hs = 5;
+ break;
+ case ONETWO:
+ *hr = 1;
+ *hs = 2;
+ break;
+ default:
+ *hr = 1;
+ *hs = 1;
+ assert(0);
+ break;
+ }
+ }
+
+ typedef struct {
+ int version; // 4 versions of bitstream defined:
+ // 0 - best quality/slowest decode,
+ // 3 - lowest quality/fastest decode
+ int width; // width of data passed to the compressor
+ int height; // height of data passed to the compressor
+ double frame_rate; // set to passed in framerate
+ int64_t target_bandwidth; // bandwidth to be used in kilobits per second
+
+ int noise_sensitivity; // parameter used for applying pre processing blur: recommendation 0
+ int Sharpness; // parameter used for sharpening output: recommendation 0:
+ int cpu_used;
+ unsigned int rc_max_intra_bitrate_pct;
+
+ // mode ->
+ // (0)=Realtime/Live Encoding. This mode is optimized for realtim encoding (for example, capturing
+ // a television signal or feed from a live camera). ( speed setting controls how fast )
+ // (1)=Good Quality Fast Encoding. The encoder balances quality with the amount of time it takes to
+ // encode the output. ( speed setting controls how fast )
+ // (2)=One Pass - Best Quality. The encoder places priority on the quality of the output over encoding
+ // speed. The output is compressed at the highest possible quality. This option takes the longest
+ // amount of time to encode. ( speed setting ignored )
+ // (3)=Two Pass - First Pass. The encoder generates a file of statistics for use in the second encoding
+ // pass. ( speed setting controls how fast )
+ // (4)=Two Pass - Second Pass. The encoder uses the statistics that were generated in the first encoding
+ // pass to create the compressed output. ( speed setting controls how fast )
+ // (5)=Two Pass - Second Pass Best. The encoder uses the statistics that were generated in the first
+ // encoding pass to create the compressed output using the highest possible quality, and taking a
+ // longer amount of time to encode.. ( speed setting ignored )
+ int Mode; //
+
+ // Key Framing Operations
+ int auto_key; // automatically detect cut scenes and set the keyframes
+ int key_freq; // maximum distance to key frame.
+
+ int allow_lag; // allow lagged compression (if 0 lagin frames is ignored)
+ int lag_in_frames; // how many frames lag before we start encoding
+
+ // ----------------------------------------------------------------
+ // DATARATE CONTROL OPTIONS
+
+ int end_usage; // vbr or cbr
+
+ // buffer targeting aggressiveness
+ int under_shoot_pct;
+ int over_shoot_pct;
+
+ // buffering parameters
+ int64_t starting_buffer_level; // in seconds
+ int64_t optimal_buffer_level;
+ int64_t maximum_buffer_size;
+
+ // controlling quality
+ int fixed_q;
+ int worst_allowed_q;
+ int best_allowed_q;
+ int cq_level;
+ int lossless;
+
+ // two pass datarate control
+ int two_pass_vbrbias; // two pass datarate control tweaks
+ int two_pass_vbrmin_section;
+ int two_pass_vbrmax_section;
+ // END DATARATE CONTROL OPTIONS
+ // ----------------------------------------------------------------
+
+
+ // these parameters aren't to be used in final build don't use!!!
+ int play_alternate;
+ int alt_freq;
+
+ int encode_breakout; // early breakout encode threshold : for video conf recommend 800
+
+ /* Bitfield defining the error resiliency features to enable.
+ * Can provide decodable frames after losses in previous
+ * frames and decodable partitions after losses in the same frame.
+ */
+ unsigned int error_resilient_mode;
+
+ /* Bitfield defining the parallel decoding mode where the
+ * decoding in successive frames may be conducted in parallel
+ * just by decoding the frame headers.
+ */
+ unsigned int frame_parallel_decoding_mode;
+
+ int arnr_max_frames;
+ int arnr_strength;
+ int arnr_type;
+
+ int tile_columns;
+ int tile_rows;
+
+ struct vpx_fixed_buf two_pass_stats_in;
+ struct vpx_codec_pkt_list *output_pkt_list;
+
+ vp8e_tuning tuning;
+ } VP9_CONFIG;
+
+
+ void vp9_initialize_enc();
+
+ VP9_PTR vp9_create_compressor(VP9_CONFIG *oxcf);
+ void vp9_remove_compressor(VP9_PTR *comp);
+
+ void vp9_change_config(VP9_PTR onyx, VP9_CONFIG *oxcf);
+
+// receive a frames worth of data caller can assume that a copy of this frame is made
+// and not just a copy of the pointer..
+ int vp9_receive_raw_frame(VP9_PTR comp, unsigned int frame_flags,
+ YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
+ int64_t end_time_stamp);
+
+ int vp9_get_compressed_data(VP9_PTR comp, unsigned int *frame_flags,
+ unsigned long *size, unsigned char *dest,
+ int64_t *time_stamp, int64_t *time_end,
+ int flush);
+
+ int vp9_get_preview_raw_frame(VP9_PTR comp, YV12_BUFFER_CONFIG *dest,
+ vp9_ppflags_t *flags);
+
+ int vp9_use_as_reference(VP9_PTR comp, int ref_frame_flags);
+
+ int vp9_update_reference(VP9_PTR comp, int ref_frame_flags);
+
+ int vp9_copy_reference_enc(VP9_PTR comp, VP9_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd);
+
+ int vp9_get_reference_enc(VP9_PTR ptr, int index, YV12_BUFFER_CONFIG **fb);
+
+ int vp9_set_reference_enc(VP9_PTR comp, VP9_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd);
+
+ int vp9_update_entropy(VP9_PTR comp, int update);
+
+ int vp9_set_roimap(VP9_PTR comp, unsigned char *map,
+ unsigned int rows, unsigned int cols,
+ int delta_q[MAX_MB_SEGMENTS],
+ int delta_lf[MAX_MB_SEGMENTS],
+ unsigned int threshold[MAX_MB_SEGMENTS]);
+
+ int vp9_set_active_map(VP9_PTR comp, unsigned char *map,
+ unsigned int rows, unsigned int cols);
+
+ int vp9_set_internal_size(VP9_PTR comp,
+ VPX_SCALING horiz_mode, VPX_SCALING vert_mode);
+
+ int vp9_get_quantizer(VP9_PTR c);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // VP9_COMMON_VP9_ONYX_H_
diff --git a/libvpx/vp9/common/vp9_onyxc_int.h b/libvpx/vp9/common/vp9_onyxc_int.h
new file mode 100644
index 0000000..0d8b0f4
--- /dev/null
+++ b/libvpx/vp9/common/vp9_onyxc_int.h
@@ -0,0 +1,371 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_ONYXC_INT_H_
+#define VP9_COMMON_VP9_ONYXC_INT_H_
+
+#include "vpx_config.h"
+#include "vpx/internal/vpx_codec_internal.h"
+#include "vp9_rtcd.h"
+#include "vp9/common/vp9_loopfilter.h"
+#include "vp9/common/vp9_entropymv.h"
+#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_entropymode.h"
+#include "vp9/common/vp9_quant_common.h"
+
+#if CONFIG_POSTPROC
+#include "vp9/common/vp9_postproc.h"
+#endif
+
+/* Create/destroy static data structures. */
+
+// Define the number of candidate reference buffers.
+#define NUM_REF_FRAMES 8
+#define NUM_REF_FRAMES_LG2 3
+
+#define ALLOWED_REFS_PER_FRAME 3
+
+// 1 scratch frame for the new frame, 3 for scaled references on the encoder
+// TODO(jkoleszar): These 3 extra references could probably come from the
+// normal reference pool.
+#define NUM_YV12_BUFFERS (NUM_REF_FRAMES + 4)
+
+#define NUM_FRAME_CONTEXTS_LG2 2
+#define NUM_FRAME_CONTEXTS (1 << NUM_FRAME_CONTEXTS_LG2)
+
+#define MAX_LAG_BUFFERS 25
+
+typedef struct frame_contexts {
+ vp9_prob y_mode_prob[BLOCK_SIZE_GROUPS][VP9_INTRA_MODES - 1];
+ vp9_prob uv_mode_prob[VP9_INTRA_MODES][VP9_INTRA_MODES - 1];
+ vp9_prob partition_prob[NUM_FRAME_TYPES][NUM_PARTITION_CONTEXTS]
+ [PARTITION_TYPES - 1];
+
+ nmv_context nmvc;
+ nmv_context pre_nmvc;
+ /* interframe intra mode probs */
+ vp9_prob pre_y_mode_prob[BLOCK_SIZE_GROUPS][VP9_INTRA_MODES - 1];
+ vp9_prob pre_uv_mode_prob[VP9_INTRA_MODES][VP9_INTRA_MODES - 1];
+ vp9_prob pre_partition_prob[NUM_PARTITION_CONTEXTS][PARTITION_TYPES - 1];
+ /* interframe intra mode probs */
+ unsigned int y_mode_counts[BLOCK_SIZE_GROUPS][VP9_INTRA_MODES];
+ unsigned int uv_mode_counts[VP9_INTRA_MODES][VP9_INTRA_MODES];
+ unsigned int partition_counts[NUM_PARTITION_CONTEXTS][PARTITION_TYPES];
+
+ vp9_coeff_probs_model coef_probs[TX_SIZE_MAX_SB][BLOCK_TYPES];
+ vp9_coeff_probs_model pre_coef_probs[TX_SIZE_MAX_SB][BLOCK_TYPES];
+ vp9_coeff_count_model coef_counts[TX_SIZE_MAX_SB][BLOCK_TYPES];
+ unsigned int eob_branch_counts[TX_SIZE_MAX_SB][BLOCK_TYPES][REF_TYPES]
+ [COEF_BANDS][PREV_COEF_CONTEXTS];
+
+ nmv_context_counts NMVcount;
+ vp9_prob switchable_interp_prob[VP9_SWITCHABLE_FILTERS + 1]
+ [VP9_SWITCHABLE_FILTERS - 1];
+ vp9_prob pre_switchable_interp_prob[VP9_SWITCHABLE_FILTERS + 1]
+ [VP9_SWITCHABLE_FILTERS - 1];
+ unsigned int switchable_interp_count[VP9_SWITCHABLE_FILTERS + 1]
+ [VP9_SWITCHABLE_FILTERS];
+
+ vp9_prob inter_mode_probs[INTER_MODE_CONTEXTS][VP9_INTER_MODES - 1];
+ vp9_prob pre_inter_mode_probs[INTER_MODE_CONTEXTS][VP9_INTER_MODES - 1];
+ unsigned int inter_mode_counts[INTER_MODE_CONTEXTS][VP9_INTER_MODES - 1][2];
+
+ vp9_prob intra_inter_prob[INTRA_INTER_CONTEXTS];
+ vp9_prob comp_inter_prob[COMP_INTER_CONTEXTS];
+ vp9_prob single_ref_prob[REF_CONTEXTS][2];
+ vp9_prob comp_ref_prob[REF_CONTEXTS];
+ vp9_prob pre_intra_inter_prob[INTRA_INTER_CONTEXTS];
+ vp9_prob pre_comp_inter_prob[COMP_INTER_CONTEXTS];
+ vp9_prob pre_single_ref_prob[REF_CONTEXTS][2];
+ vp9_prob pre_comp_ref_prob[REF_CONTEXTS];
+ unsigned int intra_inter_count[INTRA_INTER_CONTEXTS][2];
+ unsigned int comp_inter_count[COMP_INTER_CONTEXTS][2];
+ unsigned int single_ref_count[REF_CONTEXTS][2][2];
+ unsigned int comp_ref_count[REF_CONTEXTS][2];
+
+ vp9_prob tx_probs_32x32p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 1];
+ vp9_prob tx_probs_16x16p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 2];
+ vp9_prob tx_probs_8x8p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 3];
+ vp9_prob pre_tx_probs_32x32p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 1];
+ vp9_prob pre_tx_probs_16x16p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 2];
+ vp9_prob pre_tx_probs_8x8p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 3];
+ unsigned int tx_count_32x32p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB];
+ unsigned int tx_count_16x16p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 1];
+ unsigned int tx_count_8x8p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 2];
+
+ vp9_prob mbskip_probs[MBSKIP_CONTEXTS];
+ vp9_prob pre_mbskip_probs[MBSKIP_CONTEXTS];
+ unsigned int mbskip_count[MBSKIP_CONTEXTS][2];
+} FRAME_CONTEXT;
+
+typedef enum {
+ SINGLE_PREDICTION_ONLY = 0,
+ COMP_PREDICTION_ONLY = 1,
+ HYBRID_PREDICTION = 2,
+ NB_PREDICTION_TYPES = 3,
+} COMPPREDMODE_TYPE;
+
+typedef enum {
+ ONLY_4X4 = 0,
+ ALLOW_8X8 = 1,
+ ALLOW_16X16 = 2,
+ ALLOW_32X32 = 3,
+ TX_MODE_SELECT = 4,
+ NB_TXFM_MODES = 5,
+} TXFM_MODE;
+
+typedef struct VP9Common {
+ struct vpx_internal_error_info error;
+
+ DECLARE_ALIGNED(16, int16_t, y_dequant[QINDEX_RANGE][2]);
+ DECLARE_ALIGNED(16, int16_t, uv_dequant[QINDEX_RANGE][2]);
+#if CONFIG_ALPHA
+ DECLARE_ALIGNED(16, int16_t, a_dequant[QINDEX_RANGE][2]);
+#endif
+
+ int width;
+ int height;
+ int display_width;
+ int display_height;
+ int last_width;
+ int last_height;
+
+ // TODO(jkoleszar): this implies chroma ss right now, but could vary per
+ // plane. Revisit as part of the future change to YV12_BUFFER_CONFIG to
+ // support additional planes.
+ int subsampling_x;
+ int subsampling_y;
+
+ YUV_TYPE clr_type;
+
+ YV12_BUFFER_CONFIG *frame_to_show;
+
+ YV12_BUFFER_CONFIG yv12_fb[NUM_YV12_BUFFERS];
+ int fb_idx_ref_cnt[NUM_YV12_BUFFERS]; /* reference counts */
+ int ref_frame_map[NUM_REF_FRAMES]; /* maps fb_idx to reference slot */
+
+ // TODO(jkoleszar): could expand active_ref_idx to 4, with 0 as intra, and
+ // roll new_fb_idx into it.
+
+ // Each frame can reference ALLOWED_REFS_PER_FRAME buffers
+ int active_ref_idx[ALLOWED_REFS_PER_FRAME];
+ struct scale_factors active_ref_scale[ALLOWED_REFS_PER_FRAME];
+ int new_fb_idx;
+
+
+ YV12_BUFFER_CONFIG post_proc_buffer;
+ YV12_BUFFER_CONFIG temp_scale_frame;
+
+
+ FRAME_TYPE last_frame_type; /* Save last frame's frame type for motion search. */
+ FRAME_TYPE frame_type;
+
+ int show_frame;
+ int last_show_frame;
+
+ // Flag signaling that the frame is encoded using only INTRA modes.
+ int intra_only;
+
+ // Flag signaling that the frame context should be reset to default values.
+ // 0 or 1 implies don't reset, 2 reset just the context specified in the
+ // frame header, 3 reset all contexts.
+ int reset_frame_context;
+
+ int frame_flags;
+ // MBs, mb_rows/cols is in 16-pixel units; mi_rows/cols is in
+ // MODE_INFO (8-pixel) units.
+ int MBs;
+ int mb_rows, mi_rows;
+ int mb_cols, mi_cols;
+ int mode_info_stride;
+
+ /* profile settings */
+ TXFM_MODE txfm_mode;
+
+ int base_qindex;
+ int last_kf_gf_q; /* Q used on the last GF or KF */
+
+ int y_dc_delta_q;
+ int uv_dc_delta_q;
+ int uv_ac_delta_q;
+#if CONFIG_ALPHA
+ int a_dc_delta_q;
+ int a_ac_delta_q;
+#endif
+
+ unsigned int frames_since_golden;
+ unsigned int frames_till_alt_ref_frame;
+
+ /* We allocate a MODE_INFO struct for each macroblock, together with
+ an extra row on top and column on the left to simplify prediction. */
+
+ MODE_INFO *mip; /* Base of allocated array */
+ MODE_INFO *mi; /* Corresponds to upper left visible macroblock */
+ MODE_INFO *prev_mip; /* MODE_INFO array 'mip' from last decoded frame */
+ MODE_INFO *prev_mi; /* 'mi' from last frame (points into prev_mip) */
+
+
+ // Persistent mb segment id map used in prediction.
+ unsigned char *last_frame_seg_map;
+
+ INTERPOLATIONFILTERTYPE mcomp_filter_type;
+
+ loop_filter_info_n lf_info;
+
+ int filter_level;
+ int last_sharpness_level;
+ int sharpness_level;
+
+ int refresh_frame_context; /* Two state 0 = NO, 1 = YES */
+
+ int ref_frame_sign_bias[MAX_REF_FRAMES]; /* Two state 0, 1 */
+
+ /* Y,U,V */
+ ENTROPY_CONTEXT *above_context[MAX_MB_PLANE];
+ ENTROPY_CONTEXT left_context[MAX_MB_PLANE][16];
+
+ // partition contexts
+ PARTITION_CONTEXT *above_seg_context;
+ PARTITION_CONTEXT left_seg_context[8];
+
+ /* keyframe block modes are predicted by their above, left neighbors */
+
+ vp9_prob kf_y_mode_prob[VP9_INTRA_MODES]
+ [VP9_INTRA_MODES]
+ [VP9_INTRA_MODES - 1];
+ vp9_prob kf_uv_mode_prob[VP9_INTRA_MODES] [VP9_INTRA_MODES - 1];
+
+ // Context probabilities when using predictive coding of segment id
+ vp9_prob segment_pred_probs[PREDICTION_PROBS];
+ unsigned char temporal_update;
+
+ // Context probabilities for reference frame prediction
+ int allow_comp_inter_inter;
+ MV_REFERENCE_FRAME comp_fixed_ref;
+ MV_REFERENCE_FRAME comp_var_ref[2];
+ COMPPREDMODE_TYPE comp_pred_mode;
+
+ FRAME_CONTEXT fc; /* this frame entropy */
+ FRAME_CONTEXT frame_contexts[NUM_FRAME_CONTEXTS];
+ unsigned int frame_context_idx; /* Context to use/update */
+
+ unsigned int current_video_frame;
+ int near_boffset[3];
+ int version;
+
+ double bitrate;
+ double framerate;
+
+#if CONFIG_POSTPROC
+ struct postproc_state postproc_state;
+#endif
+
+ int error_resilient_mode;
+ int frame_parallel_decoding_mode;
+
+ int tile_columns, log2_tile_columns;
+ int cur_tile_mi_col_start, cur_tile_mi_col_end, cur_tile_col_idx;
+ int tile_rows, log2_tile_rows;
+ int cur_tile_mi_row_start, cur_tile_mi_row_end, cur_tile_row_idx;
+} VP9_COMMON;
+
+static int get_free_fb(VP9_COMMON *cm) {
+ int i;
+ for (i = 0; i < NUM_YV12_BUFFERS; i++)
+ if (cm->fb_idx_ref_cnt[i] == 0)
+ break;
+
+ assert(i < NUM_YV12_BUFFERS);
+ cm->fb_idx_ref_cnt[i] = 1;
+ return i;
+}
+
+static void ref_cnt_fb(int *buf, int *idx, int new_idx) {
+ if (buf[*idx] > 0)
+ buf[*idx]--;
+
+ *idx = new_idx;
+
+ buf[new_idx]++;
+}
+
+static int mi_cols_aligned_to_sb(VP9_COMMON *cm) {
+ return 2 * ((cm->mb_cols + 3) & ~3);
+}
+
+static INLINE void set_partition_seg_context(VP9_COMMON *cm,
+ MACROBLOCKD *xd,
+ int mi_row, int mi_col) {
+ xd->above_seg_context = cm->above_seg_context + mi_col;
+ xd->left_seg_context = cm->left_seg_context + (mi_row & MI_MASK);
+}
+
+static int check_bsize_coverage(VP9_COMMON *cm, MACROBLOCKD *xd,
+ int mi_row, int mi_col,
+ BLOCK_SIZE_TYPE bsize) {
+ int bsl = mi_width_log2(bsize), bs = 1 << bsl;
+ int ms = bs / 2;
+
+ if ((mi_row + ms < cm->mi_rows) && (mi_col + ms < cm->mi_cols))
+ return 0;
+
+ // frame width/height are multiples of 8, hence 8x8 block should always
+ // pass the above check
+ assert(bsize > BLOCK_SIZE_SB8X8);
+
+ // return the node index in the prob tree for binary coding
+ // only allow horizontal/split partition types
+ if ((mi_col + ms < cm->mi_cols) && (mi_row + ms >= cm->mi_rows))
+ return 1;
+ // only allow vertical/split partition types
+ if ((mi_row + ms < cm->mi_rows) && (mi_col + ms >= cm->mi_cols))
+ return 2;
+
+ return -1;
+}
+
+static void set_mi_row_col(VP9_COMMON *cm, MACROBLOCKD *xd,
+ int mi_row, int bh,
+ int mi_col, int bw) {
+ xd->mb_to_top_edge = -((mi_row * MI_SIZE) << 3);
+ xd->mb_to_bottom_edge = ((cm->mi_rows - bh - mi_row) * MI_SIZE) << 3;
+ xd->mb_to_left_edge = -((mi_col * MI_SIZE) << 3);
+ xd->mb_to_right_edge = ((cm->mi_cols - bw - mi_col) * MI_SIZE) << 3;
+
+ // Are edges available for intra prediction?
+ xd->up_available = (mi_row != 0);
+ xd->left_available = (mi_col > cm->cur_tile_mi_col_start);
+ xd->right_available = (mi_col + bw < cm->cur_tile_mi_col_end);
+}
+
+static int get_mi_row(const MACROBLOCKD *xd) {
+ return ((-xd->mb_to_top_edge) >> (3 + LOG2_MI_SIZE));
+}
+
+static int get_mi_col(const MACROBLOCKD *xd) {
+ return ((-xd->mb_to_left_edge) >> (3 + LOG2_MI_SIZE));
+}
+
+static int get_token_alloc(int mb_rows, int mb_cols) {
+ return mb_rows * mb_cols * (48 * 16 + 4);
+}
+
+static void set_prev_mi(VP9_COMMON *cm) {
+ const int use_prev_in_find_mv_refs = cm->width == cm->last_width &&
+ cm->height == cm->last_height &&
+ !cm->error_resilient_mode &&
+ !cm->intra_only &&
+ cm->last_show_frame;
+ // Special case: set prev_mi to NULL when the previous mode info
+ // context cannot be used.
+ cm->prev_mi = use_prev_in_find_mv_refs ?
+ cm->prev_mip + cm->mode_info_stride + 1 : NULL;
+}
+#endif // VP9_COMMON_VP9_ONYXC_INT_H_
diff --git a/libvpx/vp9/common/vp9_postproc.c b/libvpx/vp9/common/vp9_postproc.c
new file mode 100644
index 0000000..4282ddd
--- /dev/null
+++ b/libvpx/vp9/common/vp9_postproc.c
@@ -0,0 +1,1017 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "./vpx_config.h"
+#include "vpx_scale/yv12config.h"
+#include "vp9/common/vp9_postproc.h"
+#include "vp9/common/vp9_textblit.h"
+#include "vpx_scale/vpx_scale.h"
+#include "vp9/common/vp9_systemdependent.h"
+#include "./vp9_rtcd.h"
+#include "./vpx_scale_rtcd.h"
+
+
+#include <math.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#define RGB_TO_YUV(t) \
+ ( (0.257*(float)(t >> 16)) + (0.504*(float)(t >> 8 & 0xff)) + \
+ (0.098*(float)(t & 0xff)) + 16), \
+ (-(0.148*(float)(t >> 16)) - (0.291*(float)(t >> 8 & 0xff)) + \
+ (0.439*(float)(t & 0xff)) + 128), \
+ ( (0.439*(float)(t >> 16)) - (0.368*(float)(t >> 8 & 0xff)) - \
+ (0.071*(float)(t & 0xff)) + 128)
+
+/* global constants */
+#if 0 && CONFIG_POSTPROC_VISUALIZER
+static const unsigned char MB_PREDICTION_MODE_colors[MB_MODE_COUNT][3] = {
+ { RGB_TO_YUV(0x98FB98) }, /* PaleGreen */
+ { RGB_TO_YUV(0x00FF00) }, /* Green */
+ { RGB_TO_YUV(0xADFF2F) }, /* GreenYellow */
+ { RGB_TO_YUV(0x8F0000) }, /* Dark Red */
+ { RGB_TO_YUV(0x008F8F) }, /* Dark Cyan */
+ { RGB_TO_YUV(0x008F8F) }, /* Dark Cyan */
+ { RGB_TO_YUV(0x008F8F) }, /* Dark Cyan */
+ { RGB_TO_YUV(0x8F0000) }, /* Dark Red */
+ { RGB_TO_YUV(0x8F0000) }, /* Dark Red */
+ { RGB_TO_YUV(0x228B22) }, /* ForestGreen */
+ { RGB_TO_YUV(0x006400) }, /* DarkGreen */
+ { RGB_TO_YUV(0x98F5FF) }, /* Cadet Blue */
+ { RGB_TO_YUV(0x6CA6CD) }, /* Sky Blue */
+ { RGB_TO_YUV(0x00008B) }, /* Dark blue */
+ { RGB_TO_YUV(0x551A8B) }, /* Purple */
+ { RGB_TO_YUV(0xFF0000) } /* Red */
+ { RGB_TO_YUV(0xCC33FF) }, /* Magenta */
+};
+
+static const unsigned char B_PREDICTION_MODE_colors[VP9_INTRA_MODES][3] = {
+ { RGB_TO_YUV(0x6633ff) }, /* Purple */
+ { RGB_TO_YUV(0xcc33ff) }, /* Magenta */
+ { RGB_TO_YUV(0xff33cc) }, /* Pink */
+ { RGB_TO_YUV(0xff3366) }, /* Coral */
+ { RGB_TO_YUV(0x3366ff) }, /* Blue */
+ { RGB_TO_YUV(0xed00f5) }, /* Dark Blue */
+ { RGB_TO_YUV(0x2e00b8) }, /* Dark Purple */
+ { RGB_TO_YUV(0xff6633) }, /* Orange */
+ { RGB_TO_YUV(0x33ccff) }, /* Light Blue */
+ { RGB_TO_YUV(0x8ab800) }, /* Green */
+ { RGB_TO_YUV(0xffcc33) }, /* Light Orange */
+ { RGB_TO_YUV(0x33ffcc) }, /* Aqua */
+ { RGB_TO_YUV(0x66ff33) }, /* Light Green */
+ { RGB_TO_YUV(0xccff33) }, /* Yellow */
+};
+
+static const unsigned char MV_REFERENCE_FRAME_colors[MAX_REF_FRAMES][3] = {
+ { RGB_TO_YUV(0x00ff00) }, /* Blue */
+ { RGB_TO_YUV(0x0000ff) }, /* Green */
+ { RGB_TO_YUV(0xffff00) }, /* Yellow */
+ { RGB_TO_YUV(0xff0000) }, /* Red */
+};
+#endif
+
+static const short kernel5[] = {
+ 1, 1, 4, 1, 1
+};
+
+const short vp9_rv[] = {
+ 8, 5, 2, 2, 8, 12, 4, 9, 8, 3,
+ 0, 3, 9, 0, 0, 0, 8, 3, 14, 4,
+ 10, 1, 11, 14, 1, 14, 9, 6, 12, 11,
+ 8, 6, 10, 0, 0, 8, 9, 0, 3, 14,
+ 8, 11, 13, 4, 2, 9, 0, 3, 9, 6,
+ 1, 2, 3, 14, 13, 1, 8, 2, 9, 7,
+ 3, 3, 1, 13, 13, 6, 6, 5, 2, 7,
+ 11, 9, 11, 8, 7, 3, 2, 0, 13, 13,
+ 14, 4, 12, 5, 12, 10, 8, 10, 13, 10,
+ 4, 14, 4, 10, 0, 8, 11, 1, 13, 7,
+ 7, 14, 6, 14, 13, 2, 13, 5, 4, 4,
+ 0, 10, 0, 5, 13, 2, 12, 7, 11, 13,
+ 8, 0, 4, 10, 7, 2, 7, 2, 2, 5,
+ 3, 4, 7, 3, 3, 14, 14, 5, 9, 13,
+ 3, 14, 3, 6, 3, 0, 11, 8, 13, 1,
+ 13, 1, 12, 0, 10, 9, 7, 6, 2, 8,
+ 5, 2, 13, 7, 1, 13, 14, 7, 6, 7,
+ 9, 6, 10, 11, 7, 8, 7, 5, 14, 8,
+ 4, 4, 0, 8, 7, 10, 0, 8, 14, 11,
+ 3, 12, 5, 7, 14, 3, 14, 5, 2, 6,
+ 11, 12, 12, 8, 0, 11, 13, 1, 2, 0,
+ 5, 10, 14, 7, 8, 0, 4, 11, 0, 8,
+ 0, 3, 10, 5, 8, 0, 11, 6, 7, 8,
+ 10, 7, 13, 9, 2, 5, 1, 5, 10, 2,
+ 4, 3, 5, 6, 10, 8, 9, 4, 11, 14,
+ 0, 10, 0, 5, 13, 2, 12, 7, 11, 13,
+ 8, 0, 4, 10, 7, 2, 7, 2, 2, 5,
+ 3, 4, 7, 3, 3, 14, 14, 5, 9, 13,
+ 3, 14, 3, 6, 3, 0, 11, 8, 13, 1,
+ 13, 1, 12, 0, 10, 9, 7, 6, 2, 8,
+ 5, 2, 13, 7, 1, 13, 14, 7, 6, 7,
+ 9, 6, 10, 11, 7, 8, 7, 5, 14, 8,
+ 4, 4, 0, 8, 7, 10, 0, 8, 14, 11,
+ 3, 12, 5, 7, 14, 3, 14, 5, 2, 6,
+ 11, 12, 12, 8, 0, 11, 13, 1, 2, 0,
+ 5, 10, 14, 7, 8, 0, 4, 11, 0, 8,
+ 0, 3, 10, 5, 8, 0, 11, 6, 7, 8,
+ 10, 7, 13, 9, 2, 5, 1, 5, 10, 2,
+ 4, 3, 5, 6, 10, 8, 9, 4, 11, 14,
+ 3, 8, 3, 7, 8, 5, 11, 4, 12, 3,
+ 11, 9, 14, 8, 14, 13, 4, 3, 1, 2,
+ 14, 6, 5, 4, 4, 11, 4, 6, 2, 1,
+ 5, 8, 8, 12, 13, 5, 14, 10, 12, 13,
+ 0, 9, 5, 5, 11, 10, 13, 9, 10, 13,
+};
+
+
+/****************************************************************************
+ */
+void vp9_post_proc_down_and_across_c(const uint8_t *src_ptr,
+ uint8_t *dst_ptr,
+ int src_pixels_per_line,
+ int dst_pixels_per_line,
+ int rows,
+ int cols,
+ int flimit) {
+ uint8_t const *p_src;
+ uint8_t *p_dst;
+ int row;
+ int col;
+ int i;
+ int v;
+ int pitch = src_pixels_per_line;
+ uint8_t d[8];
+ (void)dst_pixels_per_line;
+
+ for (row = 0; row < rows; row++) {
+ /* post_proc_down for one row */
+ p_src = src_ptr;
+ p_dst = dst_ptr;
+
+ for (col = 0; col < cols; col++) {
+
+ int kernel = 4;
+ int v = p_src[col];
+
+ for (i = -2; i <= 2; i++) {
+ if (abs(v - p_src[col + i * pitch]) > flimit)
+ goto down_skip_convolve;
+
+ kernel += kernel5[2 + i] * p_src[col + i * pitch];
+ }
+
+ v = (kernel >> 3);
+ down_skip_convolve:
+ p_dst[col] = v;
+ }
+
+ /* now post_proc_across */
+ p_src = dst_ptr;
+ p_dst = dst_ptr;
+
+ for (i = 0; i < 8; i++)
+ d[i] = p_src[i];
+
+ for (col = 0; col < cols; col++) {
+ int kernel = 4;
+ v = p_src[col];
+
+ d[col & 7] = v;
+
+ for (i = -2; i <= 2; i++) {
+ if (abs(v - p_src[col + i]) > flimit)
+ goto across_skip_convolve;
+
+ kernel += kernel5[2 + i] * p_src[col + i];
+ }
+
+ d[col & 7] = (kernel >> 3);
+ across_skip_convolve:
+
+ if (col >= 2)
+ p_dst[col - 2] = d[(col - 2) & 7];
+ }
+
+ /* handle the last two pixels */
+ p_dst[col - 2] = d[(col - 2) & 7];
+ p_dst[col - 1] = d[(col - 1) & 7];
+
+
+ /* next row */
+ src_ptr += pitch;
+ dst_ptr += pitch;
+ }
+}
+
+static int q2mbl(int x) {
+ if (x < 20) x = 20;
+
+ x = 50 + (x - 50) * 10 / 8;
+ return x * x / 3;
+}
+
+void vp9_mbpost_proc_across_ip_c(uint8_t *src, int pitch,
+ int rows, int cols, int flimit) {
+ int r, c, i;
+
+ uint8_t *s = src;
+ uint8_t d[16];
+
+
+ for (r = 0; r < rows; r++) {
+ int sumsq = 0;
+ int sum = 0;
+
+ for (i = -8; i <= 6; i++) {
+ sumsq += s[i] * s[i];
+ sum += s[i];
+ d[i + 8] = 0;
+ }
+
+ for (c = 0; c < cols + 8; c++) {
+ int x = s[c + 7] - s[c - 8];
+ int y = s[c + 7] + s[c - 8];
+
+ sum += x;
+ sumsq += x * y;
+
+ d[c & 15] = s[c];
+
+ if (sumsq * 15 - sum * sum < flimit) {
+ d[c & 15] = (8 + sum + s[c]) >> 4;
+ }
+
+ s[c - 8] = d[(c - 8) & 15];
+ }
+
+ s += pitch;
+ }
+}
+
+void vp9_mbpost_proc_down_c(uint8_t *dst, int pitch,
+ int rows, int cols, int flimit) {
+ int r, c, i;
+ const short *rv3 = &vp9_rv[63 & rand()];
+
+ for (c = 0; c < cols; c++) {
+ uint8_t *s = &dst[c];
+ int sumsq = 0;
+ int sum = 0;
+ uint8_t d[16];
+ const short *rv2 = rv3 + ((c * 17) & 127);
+
+ for (i = -8; i <= 6; i++) {
+ sumsq += s[i * pitch] * s[i * pitch];
+ sum += s[i * pitch];
+ }
+
+ for (r = 0; r < rows + 8; r++) {
+ sumsq += s[7 * pitch] * s[ 7 * pitch] - s[-8 * pitch] * s[-8 * pitch];
+ sum += s[7 * pitch] - s[-8 * pitch];
+ d[r & 15] = s[0];
+
+ if (sumsq * 15 - sum * sum < flimit) {
+ d[r & 15] = (rv2[r & 127] + sum + s[0]) >> 4;
+ }
+
+ s[-8 * pitch] = d[(r - 8) & 15];
+ s += pitch;
+ }
+ }
+}
+
+static void deblock_and_de_macro_block(YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *post,
+ int q,
+ int low_var_thresh,
+ int flag) {
+ double level = 6.0e-05 * q * q * q - .0067 * q * q + .306 * q + .0065;
+ int ppl = (int)(level + .5);
+ (void) low_var_thresh;
+ (void) flag;
+
+ vp9_post_proc_down_and_across(source->y_buffer, post->y_buffer,
+ source->y_stride, post->y_stride,
+ source->y_height, source->y_width, ppl);
+
+ vp9_mbpost_proc_across_ip(post->y_buffer, post->y_stride, post->y_height,
+ post->y_width, q2mbl(q));
+
+ vp9_mbpost_proc_down(post->y_buffer, post->y_stride, post->y_height,
+ post->y_width, q2mbl(q));
+
+ vp9_post_proc_down_and_across(source->u_buffer, post->u_buffer,
+ source->uv_stride, post->uv_stride,
+ source->uv_height, source->uv_width, ppl);
+ vp9_post_proc_down_and_across(source->v_buffer, post->v_buffer,
+ source->uv_stride, post->uv_stride,
+ source->uv_height, source->uv_width, ppl);
+}
+
+void vp9_deblock(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst,
+ int q) {
+ const int ppl = (int)(6.0e-05 * q * q * q - 0.0067 * q * q + 0.306 * q
+ + 0.0065 + 0.5);
+ int i;
+
+ const uint8_t *const srcs[4] = {src->y_buffer, src->u_buffer, src->v_buffer,
+ src->alpha_buffer};
+ const int src_strides[4] = {src->y_stride, src->uv_stride, src->uv_stride,
+ src->alpha_stride};
+ const int src_widths[4] = {src->y_width, src->uv_width, src->uv_width,
+ src->alpha_width};
+ const int src_heights[4] = {src->y_height, src->uv_height, src->uv_height,
+ src->alpha_height};
+
+ uint8_t *const dsts[4] = {dst->y_buffer, dst->u_buffer, dst->v_buffer,
+ dst->alpha_buffer};
+ const int dst_strides[4] = {dst->y_stride, dst->uv_stride, dst->uv_stride,
+ dst->alpha_stride};
+
+ for (i = 0; i < MAX_MB_PLANE; ++i)
+ vp9_post_proc_down_and_across(srcs[i], dsts[i],
+ src_strides[i], dst_strides[i],
+ src_heights[i], src_widths[i], ppl);
+}
+
+void vp9_denoise(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst,
+ int q) {
+ const int ppl = (int)(6.0e-05 * q * q * q - 0.0067 * q * q + 0.306 * q
+ + 0.0065 + 0.5);
+ int i;
+
+ const uint8_t *const srcs[4] = {src->y_buffer, src->u_buffer, src->v_buffer,
+ src->alpha_buffer};
+ const int src_strides[4] = {src->y_stride, src->uv_stride, src->uv_stride,
+ src->alpha_stride};
+ const int src_widths[4] = {src->y_width, src->uv_width, src->uv_width,
+ src->alpha_width};
+ const int src_heights[4] = {src->y_height, src->uv_height, src->uv_height,
+ src->alpha_height};
+
+ uint8_t *const dsts[4] = {dst->y_buffer, dst->u_buffer, dst->v_buffer,
+ dst->alpha_buffer};
+ const int dst_strides[4] = {dst->y_stride, dst->uv_stride, dst->uv_stride,
+ dst->alpha_stride};
+
+ for (i = 0; i < MAX_MB_PLANE; ++i) {
+ const int src_stride = src_strides[i];
+ const uint8_t *const src = srcs[i] + 2 * src_stride + 2;
+ const int src_width = src_widths[i] - 4;
+ const int src_height = src_heights[i] - 4;
+
+ const int dst_stride = dst_strides[i];
+ uint8_t *const dst = dsts[i] + 2 * dst_stride + 2;
+
+ vp9_post_proc_down_and_across(src, dst, src_stride, dst_stride,
+ src_height, src_width, ppl);
+ }
+}
+
+double vp9_gaussian(double sigma, double mu, double x) {
+ return 1 / (sigma * sqrt(2.0 * 3.14159265)) *
+ (exp(-(x - mu) * (x - mu) / (2 * sigma * sigma)));
+}
+
+static void fillrd(struct postproc_state *state, int q, int a) {
+ char char_dist[300];
+
+ double sigma;
+ int ai = a, qi = q, i;
+
+ vp9_clear_system_state();
+
+ sigma = ai + .5 + .6 * (63 - qi) / 63.0;
+
+ /* set up a lookup table of 256 entries that matches
+ * a gaussian distribution with sigma determined by q.
+ */
+ {
+ double i;
+ int next, j;
+
+ next = 0;
+
+ for (i = -32; i < 32; i++) {
+ int a = (int)(.5 + 256 * vp9_gaussian(sigma, 0, i));
+
+ if (a) {
+ for (j = 0; j < a; j++) {
+ char_dist[next + j] = (char) i;
+ }
+
+ next = next + j;
+ }
+
+ }
+
+ for (next = next; next < 256; next++)
+ char_dist[next] = 0;
+ }
+
+ for (i = 0; i < 3072; i++) {
+ state->noise[i] = char_dist[rand() & 0xff];
+ }
+
+ for (i = 0; i < 16; i++) {
+ state->blackclamp[i] = -char_dist[0];
+ state->whiteclamp[i] = -char_dist[0];
+ state->bothclamp[i] = -2 * char_dist[0];
+ }
+
+ state->last_q = q;
+ state->last_noise = a;
+}
+
+/****************************************************************************
+ *
+ * ROUTINE : plane_add_noise_c
+ *
+ * INPUTS : unsigned char *Start starting address of buffer to
+ * add gaussian noise to
+ * unsigned int width width of plane
+ * unsigned int height height of plane
+ * int pitch distance between subsequent lines of frame
+ * int q quantizer used to determine amount of noise
+ * to add
+ *
+ * OUTPUTS : None.
+ *
+ * RETURNS : void.
+ *
+ * FUNCTION : adds gaussian noise to a plane of pixels
+ *
+ * SPECIAL NOTES : None.
+ *
+ ****************************************************************************/
+void vp9_plane_add_noise_c(uint8_t *start, char *noise,
+ char blackclamp[16],
+ char whiteclamp[16],
+ char bothclamp[16],
+ unsigned int width, unsigned int height, int pitch) {
+ unsigned int i, j;
+
+ for (i = 0; i < height; i++) {
+ uint8_t *pos = start + i * pitch;
+ char *ref = (char *)(noise + (rand() & 0xff)); // NOLINT
+
+ for (j = 0; j < width; j++) {
+ if (pos[j] < blackclamp[0])
+ pos[j] = blackclamp[0];
+
+ if (pos[j] > 255 + whiteclamp[0])
+ pos[j] = 255 + whiteclamp[0];
+
+ pos[j] += ref[j];
+ }
+ }
+}
+
+/* Blend the macro block with a solid colored square. Leave the
+ * edges unblended to give distinction to macro blocks in areas
+ * filled with the same color block.
+ */
+void vp9_blend_mb_inner_c(uint8_t *y, uint8_t *u, uint8_t *v,
+ int y1, int u1, int v1, int alpha, int stride) {
+ int i, j;
+ int y1_const = y1 * ((1 << 16) - alpha);
+ int u1_const = u1 * ((1 << 16) - alpha);
+ int v1_const = v1 * ((1 << 16) - alpha);
+
+ y += 2 * stride + 2;
+ for (i = 0; i < 12; i++) {
+ for (j = 0; j < 12; j++) {
+ y[j] = (y[j] * alpha + y1_const) >> 16;
+ }
+ y += stride;
+ }
+
+ stride >>= 1;
+
+ u += stride + 1;
+ v += stride + 1;
+
+ for (i = 0; i < 6; i++) {
+ for (j = 0; j < 6; j++) {
+ u[j] = (u[j] * alpha + u1_const) >> 16;
+ v[j] = (v[j] * alpha + v1_const) >> 16;
+ }
+ u += stride;
+ v += stride;
+ }
+}
+
+/* Blend only the edge of the macro block. Leave center
+ * unblended to allow for other visualizations to be layered.
+ */
+void vp9_blend_mb_outer_c(uint8_t *y, uint8_t *u, uint8_t *v,
+ int y1, int u1, int v1, int alpha, int stride) {
+ int i, j;
+ int y1_const = y1 * ((1 << 16) - alpha);
+ int u1_const = u1 * ((1 << 16) - alpha);
+ int v1_const = v1 * ((1 << 16) - alpha);
+
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 16; j++) {
+ y[j] = (y[j] * alpha + y1_const) >> 16;
+ }
+ y += stride;
+ }
+
+ for (i = 0; i < 12; i++) {
+ y[0] = (y[0] * alpha + y1_const) >> 16;
+ y[1] = (y[1] * alpha + y1_const) >> 16;
+ y[14] = (y[14] * alpha + y1_const) >> 16;
+ y[15] = (y[15] * alpha + y1_const) >> 16;
+ y += stride;
+ }
+
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 16; j++) {
+ y[j] = (y[j] * alpha + y1_const) >> 16;
+ }
+ y += stride;
+ }
+
+ stride >>= 1;
+
+ for (j = 0; j < 8; j++) {
+ u[j] = (u[j] * alpha + u1_const) >> 16;
+ v[j] = (v[j] * alpha + v1_const) >> 16;
+ }
+ u += stride;
+ v += stride;
+
+ for (i = 0; i < 6; i++) {
+ u[0] = (u[0] * alpha + u1_const) >> 16;
+ v[0] = (v[0] * alpha + v1_const) >> 16;
+
+ u[7] = (u[7] * alpha + u1_const) >> 16;
+ v[7] = (v[7] * alpha + v1_const) >> 16;
+
+ u += stride;
+ v += stride;
+ }
+
+ for (j = 0; j < 8; j++) {
+ u[j] = (u[j] * alpha + u1_const) >> 16;
+ v[j] = (v[j] * alpha + v1_const) >> 16;
+ }
+}
+
+void vp9_blend_b_c(uint8_t *y, uint8_t *u, uint8_t *v,
+ int y1, int u1, int v1, int alpha, int stride) {
+ int i, j;
+ int y1_const = y1 * ((1 << 16) - alpha);
+ int u1_const = u1 * ((1 << 16) - alpha);
+ int v1_const = v1 * ((1 << 16) - alpha);
+
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 4; j++) {
+ y[j] = (y[j] * alpha + y1_const) >> 16;
+ }
+ y += stride;
+ }
+
+ stride >>= 1;
+
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 2; j++) {
+ u[j] = (u[j] * alpha + u1_const) >> 16;
+ v[j] = (v[j] * alpha + v1_const) >> 16;
+ }
+ u += stride;
+ v += stride;
+ }
+}
+
+static void constrain_line(int x0, int *x1, int y0, int *y1,
+ int width, int height) {
+ int dx;
+ int dy;
+
+ if (*x1 > width) {
+ dx = *x1 - x0;
+ dy = *y1 - y0;
+
+ *x1 = width;
+ if (dx)
+ *y1 = ((width - x0) * dy) / dx + y0;
+ }
+ if (*x1 < 0) {
+ dx = *x1 - x0;
+ dy = *y1 - y0;
+
+ *x1 = 0;
+ if (dx)
+ *y1 = ((0 - x0) * dy) / dx + y0;
+ }
+ if (*y1 > height) {
+ dx = *x1 - x0;
+ dy = *y1 - y0;
+
+ *y1 = height;
+ if (dy)
+ *x1 = ((height - y0) * dx) / dy + x0;
+ }
+ if (*y1 < 0) {
+ dx = *x1 - x0;
+ dy = *y1 - y0;
+
+ *y1 = 0;
+ if (dy)
+ *x1 = ((0 - y0) * dx) / dy + x0;
+ }
+}
+
+int vp9_post_proc_frame(VP9_COMMON *oci, YV12_BUFFER_CONFIG *dest,
+ vp9_ppflags_t *ppflags) {
+ int q = oci->filter_level * 10 / 6;
+ int flags = ppflags->post_proc_flag;
+ int deblock_level = ppflags->deblocking_level;
+ int noise_level = ppflags->noise_level;
+
+ if (!oci->frame_to_show)
+ return -1;
+
+ if (q > 63)
+ q = 63;
+
+ if (!flags) {
+ *dest = *oci->frame_to_show;
+ return 0;
+ }
+
+#if ARCH_X86||ARCH_X86_64
+ vpx_reset_mmx_state();
+#endif
+
+ if (flags & VP9D_DEMACROBLOCK) {
+ deblock_and_de_macro_block(oci->frame_to_show, &oci->post_proc_buffer,
+ q + (deblock_level - 5) * 10, 1, 0);
+ } else if (flags & VP9D_DEBLOCK) {
+ vp9_deblock(oci->frame_to_show, &oci->post_proc_buffer, q);
+ } else {
+ vp8_yv12_copy_frame(oci->frame_to_show, &oci->post_proc_buffer);
+ }
+
+ if (flags & VP9D_ADDNOISE) {
+ if (oci->postproc_state.last_q != q
+ || oci->postproc_state.last_noise != noise_level) {
+ fillrd(&oci->postproc_state, 63 - q, noise_level);
+ }
+
+ vp9_plane_add_noise(oci->post_proc_buffer.y_buffer,
+ oci->postproc_state.noise,
+ oci->postproc_state.blackclamp,
+ oci->postproc_state.whiteclamp,
+ oci->postproc_state.bothclamp,
+ oci->post_proc_buffer.y_width,
+ oci->post_proc_buffer.y_height,
+ oci->post_proc_buffer.y_stride);
+ }
+
+#if 0 && CONFIG_POSTPROC_VISUALIZER
+ if (flags & VP9D_DEBUG_TXT_FRAME_INFO) {
+ char message[512];
+ sprintf(message, "F%1dG%1dQ%3dF%3dP%d_s%dx%d",
+ (oci->frame_type == KEY_FRAME),
+ oci->refresh_golden_frame,
+ oci->base_qindex,
+ oci->filter_level,
+ flags,
+ oci->mb_cols, oci->mb_rows);
+ vp9_blit_text(message, oci->post_proc_buffer.y_buffer,
+ oci->post_proc_buffer.y_stride);
+ }
+
+ if (flags & VP9D_DEBUG_TXT_MBLK_MODES) {
+ int i, j;
+ uint8_t *y_ptr;
+ YV12_BUFFER_CONFIG *post = &oci->post_proc_buffer;
+ int mb_rows = post->y_height >> 4;
+ int mb_cols = post->y_width >> 4;
+ int mb_index = 0;
+ MODE_INFO *mi = oci->mi;
+
+ y_ptr = post->y_buffer + 4 * post->y_stride + 4;
+
+ /* vp9_filter each macro block */
+ for (i = 0; i < mb_rows; i++) {
+ for (j = 0; j < mb_cols; j++) {
+ char zz[4];
+
+ sprintf(zz, "%c", mi[mb_index].mbmi.mode + 'a');
+
+ vp9_blit_text(zz, y_ptr, post->y_stride);
+ mb_index++;
+ y_ptr += 16;
+ }
+
+ mb_index++; /* border */
+ y_ptr += post->y_stride * 16 - post->y_width;
+
+ }
+ }
+
+ if (flags & VP9D_DEBUG_TXT_DC_DIFF) {
+ int i, j;
+ uint8_t *y_ptr;
+ YV12_BUFFER_CONFIG *post = &oci->post_proc_buffer;
+ int mb_rows = post->y_height >> 4;
+ int mb_cols = post->y_width >> 4;
+ int mb_index = 0;
+ MODE_INFO *mi = oci->mi;
+
+ y_ptr = post->y_buffer + 4 * post->y_stride + 4;
+
+ /* vp9_filter each macro block */
+ for (i = 0; i < mb_rows; i++) {
+ for (j = 0; j < mb_cols; j++) {
+ char zz[4];
+ int dc_diff = !(mi[mb_index].mbmi.mode != I4X4_PRED &&
+ mi[mb_index].mbmi.mode != SPLITMV &&
+ mi[mb_index].mbmi.mb_skip_coeff);
+
+ if (oci->frame_type == KEY_FRAME)
+ sprintf(zz, "a");
+ else
+ sprintf(zz, "%c", dc_diff + '0');
+
+ vp9_blit_text(zz, y_ptr, post->y_stride);
+ mb_index++;
+ y_ptr += 16;
+ }
+
+ mb_index++; /* border */
+ y_ptr += post->y_stride * 16 - post->y_width;
+
+ }
+ }
+
+ if (flags & VP9D_DEBUG_TXT_RATE_INFO) {
+ char message[512];
+ snprintf(message, sizeof(message),
+ "Bitrate: %10.2f frame_rate: %10.2f ",
+ oci->bitrate, oci->framerate);
+ vp9_blit_text(message, oci->post_proc_buffer.y_buffer,
+ oci->post_proc_buffer.y_stride);
+ }
+
+ /* Draw motion vectors */
+ if ((flags & VP9D_DEBUG_DRAW_MV) && ppflags->display_mv_flag) {
+ YV12_BUFFER_CONFIG *post = &oci->post_proc_buffer;
+ int width = post->y_width;
+ int height = post->y_height;
+ uint8_t *y_buffer = oci->post_proc_buffer.y_buffer;
+ int y_stride = oci->post_proc_buffer.y_stride;
+ MODE_INFO *mi = oci->mi;
+ int x0, y0;
+
+ for (y0 = 0; y0 < height; y0 += 16) {
+ for (x0 = 0; x0 < width; x0 += 16) {
+ int x1, y1;
+
+ if (!(ppflags->display_mv_flag & (1 << mi->mbmi.mode))) {
+ mi++;
+ continue;
+ }
+
+ if (mi->mbmi.mode == SPLITMV) {
+ switch (mi->mbmi.partitioning) {
+ case PARTITIONING_16X8 : { /* mv_top_bottom */
+ union b_mode_info *bmi = &mi->bmi[0];
+ MV *mv = &bmi->mv.as_mv;
+
+ x1 = x0 + 8 + (mv->col >> 3);
+ y1 = y0 + 4 + (mv->row >> 3);
+
+ constrain_line(x0 + 8, &x1, y0 + 4, &y1, width, height);
+ vp9_blit_line(x0 + 8, x1, y0 + 4, y1, y_buffer, y_stride);
+
+ bmi = &mi->bmi[8];
+
+ x1 = x0 + 8 + (mv->col >> 3);
+ y1 = y0 + 12 + (mv->row >> 3);
+
+ constrain_line(x0 + 8, &x1, y0 + 12, &y1, width, height);
+ vp9_blit_line(x0 + 8, x1, y0 + 12, y1, y_buffer, y_stride);
+
+ break;
+ }
+ case PARTITIONING_8X16 : { /* mv_left_right */
+ union b_mode_info *bmi = &mi->bmi[0];
+ MV *mv = &bmi->mv.as_mv;
+
+ x1 = x0 + 4 + (mv->col >> 3);
+ y1 = y0 + 8 + (mv->row >> 3);
+
+ constrain_line(x0 + 4, &x1, y0 + 8, &y1, width, height);
+ vp9_blit_line(x0 + 4, x1, y0 + 8, y1, y_buffer, y_stride);
+
+ bmi = &mi->bmi[2];
+
+ x1 = x0 + 12 + (mv->col >> 3);
+ y1 = y0 + 8 + (mv->row >> 3);
+
+ constrain_line(x0 + 12, &x1, y0 + 8, &y1, width, height);
+ vp9_blit_line(x0 + 12, x1, y0 + 8, y1, y_buffer, y_stride);
+
+ break;
+ }
+ case PARTITIONING_8X8 : { /* mv_quarters */
+ union b_mode_info *bmi = &mi->bmi[0];
+ MV *mv = &bmi->mv.as_mv;
+
+ x1 = x0 + 4 + (mv->col >> 3);
+ y1 = y0 + 4 + (mv->row >> 3);
+
+ constrain_line(x0 + 4, &x1, y0 + 4, &y1, width, height);
+ vp9_blit_line(x0 + 4, x1, y0 + 4, y1, y_buffer, y_stride);
+
+ bmi = &mi->bmi[2];
+
+ x1 = x0 + 12 + (mv->col >> 3);
+ y1 = y0 + 4 + (mv->row >> 3);
+
+ constrain_line(x0 + 12, &x1, y0 + 4, &y1, width, height);
+ vp9_blit_line(x0 + 12, x1, y0 + 4, y1, y_buffer, y_stride);
+
+ bmi = &mi->bmi[8];
+
+ x1 = x0 + 4 + (mv->col >> 3);
+ y1 = y0 + 12 + (mv->row >> 3);
+
+ constrain_line(x0 + 4, &x1, y0 + 12, &y1, width, height);
+ vp9_blit_line(x0 + 4, x1, y0 + 12, y1, y_buffer, y_stride);
+
+ bmi = &mi->bmi[10];
+
+ x1 = x0 + 12 + (mv->col >> 3);
+ y1 = y0 + 12 + (mv->row >> 3);
+
+ constrain_line(x0 + 12, &x1, y0 + 12, &y1, width, height);
+ vp9_blit_line(x0 + 12, x1, y0 + 12, y1, y_buffer, y_stride);
+ break;
+ }
+ case PARTITIONING_4X4:
+ default : {
+ union b_mode_info *bmi = mi->bmi;
+ int bx0, by0;
+
+ for (by0 = y0; by0 < (y0 + 16); by0 += 4) {
+ for (bx0 = x0; bx0 < (x0 + 16); bx0 += 4) {
+ MV *mv = &bmi->mv.as_mv;
+
+ x1 = bx0 + 2 + (mv->col >> 3);
+ y1 = by0 + 2 + (mv->row >> 3);
+
+ constrain_line(bx0 + 2, &x1, by0 + 2, &y1, width, height);
+ vp9_blit_line(bx0 + 2, x1, by0 + 2, y1, y_buffer, y_stride);
+
+ bmi++;
+ }
+ }
+ }
+ }
+ } else if (mi->mbmi.mode >= NEARESTMV) {
+ MV *mv = &mi->mbmi.mv.as_mv;
+ const int lx0 = x0 + 8;
+ const int ly0 = y0 + 8;
+
+ x1 = lx0 + (mv->col >> 3);
+ y1 = ly0 + (mv->row >> 3);
+
+ if (x1 != lx0 && y1 != ly0) {
+ constrain_line(lx0, &x1, ly0 - 1, &y1, width, height);
+ vp9_blit_line(lx0, x1, ly0 - 1, y1, y_buffer, y_stride);
+
+ constrain_line(lx0, &x1, ly0 + 1, &y1, width, height);
+ vp9_blit_line(lx0, x1, ly0 + 1, y1, y_buffer, y_stride);
+ } else
+ vp9_blit_line(lx0, x1, ly0, y1, y_buffer, y_stride);
+ }
+
+ mi++;
+ }
+ mi++;
+ }
+ }
+
+ /* Color in block modes */
+ if ((flags & VP9D_DEBUG_CLR_BLK_MODES)
+ && (ppflags->display_mb_modes_flag || ppflags->display_b_modes_flag)) {
+ int y, x;
+ YV12_BUFFER_CONFIG *post = &oci->post_proc_buffer;
+ int width = post->y_width;
+ int height = post->y_height;
+ uint8_t *y_ptr = oci->post_proc_buffer.y_buffer;
+ uint8_t *u_ptr = oci->post_proc_buffer.u_buffer;
+ uint8_t *v_ptr = oci->post_proc_buffer.v_buffer;
+ int y_stride = oci->post_proc_buffer.y_stride;
+ MODE_INFO *mi = oci->mi;
+
+ for (y = 0; y < height; y += 16) {
+ for (x = 0; x < width; x += 16) {
+ int Y = 0, U = 0, V = 0;
+
+ if (mi->mbmi.mode == I4X4_PRED &&
+ ((ppflags->display_mb_modes_flag & I4X4_PRED) ||
+ ppflags->display_b_modes_flag)) {
+ int by, bx;
+ uint8_t *yl, *ul, *vl;
+ union b_mode_info *bmi = mi->bmi;
+
+ yl = y_ptr + x;
+ ul = u_ptr + (x >> 1);
+ vl = v_ptr + (x >> 1);
+
+ for (by = 0; by < 16; by += 4) {
+ for (bx = 0; bx < 16; bx += 4) {
+ if ((ppflags->display_b_modes_flag & (1 << mi->mbmi.mode))
+ || (ppflags->display_mb_modes_flag & I4X4_PRED)) {
+ Y = B_PREDICTION_MODE_colors[bmi->as_mode.first][0];
+ U = B_PREDICTION_MODE_colors[bmi->as_mode.first][1];
+ V = B_PREDICTION_MODE_colors[bmi->as_mode.first][2];
+
+ vp9_blend_b(yl + bx, ul + (bx >> 1), vl + (bx >> 1), Y, U, V,
+ 0xc000, y_stride);
+ }
+ bmi++;
+ }
+
+ yl += y_stride * 4;
+ ul += y_stride * 1;
+ vl += y_stride * 1;
+ }
+ } else if (ppflags->display_mb_modes_flag & (1 << mi->mbmi.mode)) {
+ Y = MB_PREDICTION_MODE_colors[mi->mbmi.mode][0];
+ U = MB_PREDICTION_MODE_colors[mi->mbmi.mode][1];
+ V = MB_PREDICTION_MODE_colors[mi->mbmi.mode][2];
+
+ vp9_blend_mb_inner(y_ptr + x, u_ptr + (x >> 1), v_ptr + (x >> 1),
+ Y, U, V, 0xc000, y_stride);
+ }
+
+ mi++;
+ }
+ y_ptr += y_stride * 16;
+ u_ptr += y_stride * 4;
+ v_ptr += y_stride * 4;
+
+ mi++;
+ }
+ }
+
+ /* Color in frame reference blocks */
+ if ((flags & VP9D_DEBUG_CLR_FRM_REF_BLKS) &&
+ ppflags->display_ref_frame_flag) {
+ int y, x;
+ YV12_BUFFER_CONFIG *post = &oci->post_proc_buffer;
+ int width = post->y_width;
+ int height = post->y_height;
+ uint8_t *y_ptr = oci->post_proc_buffer.y_buffer;
+ uint8_t *u_ptr = oci->post_proc_buffer.u_buffer;
+ uint8_t *v_ptr = oci->post_proc_buffer.v_buffer;
+ int y_stride = oci->post_proc_buffer.y_stride;
+ MODE_INFO *mi = oci->mi;
+
+ for (y = 0; y < height; y += 16) {
+ for (x = 0; x < width; x += 16) {
+ int Y = 0, U = 0, V = 0;
+
+ if (ppflags->display_ref_frame_flag & (1 << mi->mbmi.ref_frame)) {
+ Y = MV_REFERENCE_FRAME_colors[mi->mbmi.ref_frame][0];
+ U = MV_REFERENCE_FRAME_colors[mi->mbmi.ref_frame][1];
+ V = MV_REFERENCE_FRAME_colors[mi->mbmi.ref_frame][2];
+
+ vp9_blend_mb_outer(y_ptr + x, u_ptr + (x >> 1), v_ptr + (x >> 1),
+ Y, U, V, 0xc000, y_stride);
+ }
+
+ mi++;
+ }
+ y_ptr += y_stride * 16;
+ u_ptr += y_stride * 4;
+ v_ptr += y_stride * 4;
+
+ mi++;
+ }
+ }
+#endif
+
+ *dest = oci->post_proc_buffer;
+
+ /* handle problem with extending borders */
+ dest->y_width = oci->width;
+ dest->y_height = oci->height;
+ dest->uv_height = dest->y_height / 2;
+
+ return 0;
+}
diff --git a/libvpx/vp9/common/vp9_postproc.h b/libvpx/vp9/common/vp9_postproc.h
new file mode 100644
index 0000000..2c0d333
--- /dev/null
+++ b/libvpx/vp9/common/vp9_postproc.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_COMMON_VP9_POSTPROC_H_
+#define VP9_COMMON_VP9_POSTPROC_H_
+
+#include "vpx_ports/mem.h"
+
+struct postproc_state {
+ int last_q;
+ int last_noise;
+ char noise[3072];
+ DECLARE_ALIGNED(16, char, blackclamp[16]);
+ DECLARE_ALIGNED(16, char, whiteclamp[16]);
+ DECLARE_ALIGNED(16, char, bothclamp[16]);
+};
+
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/common/vp9_ppflags.h"
+
+int vp9_post_proc_frame(struct VP9Common *oci, YV12_BUFFER_CONFIG *dest,
+ vp9_ppflags_t *flags);
+
+void vp9_denoise(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, int q);
+
+void vp9_deblock(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, int q);
+
+#endif // VP9_COMMON_VP9_POSTPROC_H_
diff --git a/libvpx/vp9/common/vp9_ppflags.h b/libvpx/vp9/common/vp9_ppflags.h
new file mode 100644
index 0000000..561c930
--- /dev/null
+++ b/libvpx/vp9/common/vp9_ppflags.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_PPFLAGS_H_
+#define VP9_COMMON_VP9_PPFLAGS_H_
+
+enum {
+ VP9D_NOFILTERING = 0,
+ VP9D_DEBLOCK = 1 << 0,
+ VP9D_DEMACROBLOCK = 1 << 1,
+ VP9D_ADDNOISE = 1 << 2,
+ VP9D_DEBUG_TXT_FRAME_INFO = 1 << 3,
+ VP9D_DEBUG_TXT_MBLK_MODES = 1 << 4,
+ VP9D_DEBUG_TXT_DC_DIFF = 1 << 5,
+ VP9D_DEBUG_TXT_RATE_INFO = 1 << 6,
+ VP9D_DEBUG_DRAW_MV = 1 << 7,
+ VP9D_DEBUG_CLR_BLK_MODES = 1 << 8,
+ VP9D_DEBUG_CLR_FRM_REF_BLKS = 1 << 9
+};
+
+typedef struct {
+ int post_proc_flag;
+ int deblocking_level;
+ int noise_level;
+ int display_ref_frame_flag;
+ int display_mb_modes_flag;
+ int display_b_modes_flag;
+ int display_mv_flag;
+} vp9_ppflags_t;
+
+#endif // VP9_COMMON_VP9_PPFLAGS_H_
diff --git a/libvpx/vpx_scale/scale_mode.h b/libvpx/vp9/common/vp9_pragmas.h
index 5581385..f079161 100644
--- a/libvpx/vpx_scale/scale_mode.h
+++ b/libvpx/vp9/common/vp9_pragmas.h
@@ -8,21 +8,15 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#ifndef VP9_COMMON_VP9_PRAGMAS_H_
+#define VP9_COMMON_VP9_PRAGMAS_H_
-/****************************************************************************
-*
-*****************************************************************************
-*/
-
-#ifndef SCALE_MODE_H
-#define SCALE_MODE_H
-
-typedef enum {
- MAINTAIN_ASPECT_RATIO = 0x0,
- SCALE_TO_FIT = 0x1,
- CENTER = 0x2,
- OTHER = 0x3
-} SCALE_MODE;
-
+#ifdef __INTEL_COMPILER
+#pragma warning(disable:997 1011 170)
+#endif
+#ifdef _MSC_VER
+#pragma warning(disable:4799)
#endif
+
+#endif // VP9_COMMON_VP9_PRAGMAS_H_
diff --git a/libvpx/vp9/common/vp9_pred_common.c b/libvpx/vp9/common/vp9_pred_common.c
new file mode 100644
index 0000000..17da4f2
--- /dev/null
+++ b/libvpx/vp9/common/vp9_pred_common.c
@@ -0,0 +1,520 @@
+
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits.h>
+
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_pred_common.h"
+#include "vp9/common/vp9_seg_common.h"
+#include "vp9/common/vp9_treecoder.h"
+
+// TBD prediction functions for various bitstream signals
+
+// Returns a context number for the given MB prediction signal
+unsigned char vp9_get_pred_context(const VP9_COMMON *const cm,
+ const MACROBLOCKD *const xd,
+ PRED_ID pred_id) {
+ int pred_context;
+ const MODE_INFO *const mi = xd->mode_info_context;
+ const MODE_INFO *const above_mi = mi - cm->mode_info_stride;
+ const MODE_INFO *const left_mi = mi - 1;
+ const int left_in_image = xd->left_available && left_mi->mbmi.mb_in_image;
+ const int above_in_image = xd->up_available && above_mi->mbmi.mb_in_image;
+ // Note:
+ // The mode info data structure has a one element border above and to the
+ // left of the entries correpsonding to real macroblocks.
+ // The prediction flags in these dummy entries are initialised to 0.
+ switch (pred_id) {
+ case PRED_SEG_ID:
+ pred_context = above_mi->mbmi.seg_id_predicted;
+ if (xd->left_available)
+ pred_context += left_mi->mbmi.seg_id_predicted;
+ break;
+
+ case PRED_MBSKIP:
+ pred_context = above_mi->mbmi.mb_skip_coeff;
+ if (xd->left_available)
+ pred_context += left_mi->mbmi.mb_skip_coeff;
+ break;
+
+ case PRED_SWITCHABLE_INTERP: {
+ // left
+ const int left_mv_pred = is_inter_mode(left_mi->mbmi.mode);
+ const int left_interp = left_in_image && left_mv_pred ?
+ vp9_switchable_interp_map[left_mi->mbmi.interp_filter] :
+ VP9_SWITCHABLE_FILTERS;
+
+ // above
+ const int above_mv_pred = is_inter_mode(above_mi->mbmi.mode);
+ const int above_interp = above_in_image && above_mv_pred ?
+ vp9_switchable_interp_map[above_mi->mbmi.interp_filter] :
+ VP9_SWITCHABLE_FILTERS;
+
+ assert(left_interp != -1);
+ assert(above_interp != -1);
+
+ if (left_interp == above_interp)
+ pred_context = left_interp;
+ else if (left_interp == VP9_SWITCHABLE_FILTERS &&
+ above_interp != VP9_SWITCHABLE_FILTERS)
+ pred_context = above_interp;
+ else if (left_interp != VP9_SWITCHABLE_FILTERS &&
+ above_interp == VP9_SWITCHABLE_FILTERS)
+ pred_context = left_interp;
+ else
+ pred_context = VP9_SWITCHABLE_FILTERS;
+
+ break;
+ }
+
+ case PRED_INTRA_INTER: {
+ if (above_in_image && left_in_image) { // both edges available
+ if (left_mi->mbmi.ref_frame[0] == INTRA_FRAME &&
+ above_mi->mbmi.ref_frame[0] == INTRA_FRAME) { // intra/intra (3)
+ pred_context = 3;
+ } else { // intra/inter (1) or inter/inter (0)
+ pred_context = left_mi->mbmi.ref_frame[0] == INTRA_FRAME ||
+ above_mi->mbmi.ref_frame[0] == INTRA_FRAME;
+ }
+ } else if (above_in_image || left_in_image) { // one edge available
+ const MODE_INFO *edge = above_in_image ? above_mi : left_mi;
+
+ // inter: 0, intra: 2
+ pred_context = 2 * (edge->mbmi.ref_frame[0] == INTRA_FRAME);
+ } else {
+ pred_context = 0;
+ }
+ assert(pred_context >= 0 && pred_context < INTRA_INTER_CONTEXTS);
+ break;
+ }
+
+ case PRED_COMP_INTER_INTER: {
+ if (above_in_image && left_in_image) { // both edges available
+ if (above_mi->mbmi.ref_frame[1] <= INTRA_FRAME &&
+ left_mi->mbmi.ref_frame[1] <= INTRA_FRAME) {
+ // neither edge uses comp pred (0/1)
+ pred_context = ((above_mi->mbmi.ref_frame[0] == cm->comp_fixed_ref) ^
+ (left_mi->mbmi.ref_frame[0] == cm->comp_fixed_ref));
+ } else if (above_mi->mbmi.ref_frame[1] <= INTRA_FRAME) {
+ // one of two edges uses comp pred (2/3)
+ pred_context = 2 +
+ (above_mi->mbmi.ref_frame[0] == cm->comp_fixed_ref ||
+ above_mi->mbmi.ref_frame[0] == INTRA_FRAME);
+ } else if (left_mi->mbmi.ref_frame[1] <= INTRA_FRAME) {
+ // one of two edges uses comp pred (2/3)
+ pred_context = 2 +
+ (left_mi->mbmi.ref_frame[0] == cm->comp_fixed_ref ||
+ left_mi->mbmi.ref_frame[0] == INTRA_FRAME);
+ } else { // both edges use comp pred (4)
+ pred_context = 4;
+ }
+ } else if (above_in_image || left_in_image) { // one edge available
+ const MODE_INFO *edge = above_in_image ? above_mi : left_mi;
+
+ if (edge->mbmi.ref_frame[1] <= INTRA_FRAME) {
+ // edge does not use comp pred (0/1)
+ pred_context = edge->mbmi.ref_frame[0] == cm->comp_fixed_ref;
+ } else { // edge uses comp pred (3)
+ pred_context = 3;
+ }
+ } else { // no edges available (1)
+ pred_context = 1;
+ }
+ assert(pred_context >= 0 && pred_context < COMP_INTER_CONTEXTS);
+ break;
+ }
+
+ case PRED_COMP_REF_P: {
+ const int fix_ref_idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref];
+ const int var_ref_idx = !fix_ref_idx;
+
+ if (above_in_image && left_in_image) { // both edges available
+ if (above_mi->mbmi.ref_frame[0] == INTRA_FRAME &&
+ left_mi->mbmi.ref_frame[0] == INTRA_FRAME) { // intra/intra (2)
+ pred_context = 2;
+ } else if (above_mi->mbmi.ref_frame[0] == INTRA_FRAME ||
+ left_mi->mbmi.ref_frame[0] == INTRA_FRAME) { // intra/inter
+ const MODE_INFO *edge = above_mi->mbmi.ref_frame[0] == INTRA_FRAME ?
+ left_mi : above_mi;
+
+ if (edge->mbmi.ref_frame[1] <= INTRA_FRAME) { // single pred (1/3)
+ pred_context = 1 +
+ 2 * edge->mbmi.ref_frame[0] != cm->comp_var_ref[1];
+ } else { // comp pred (1/3)
+ pred_context = 1 +
+ 2 * edge->mbmi.ref_frame[var_ref_idx] != cm->comp_var_ref[1];
+ }
+ } else { // inter/inter
+ int l_sg = left_mi->mbmi.ref_frame[1] <= INTRA_FRAME;
+ int a_sg = above_mi->mbmi.ref_frame[1] <= INTRA_FRAME;
+ MV_REFERENCE_FRAME vrfa = a_sg ? above_mi->mbmi.ref_frame[0] :
+ above_mi->mbmi.ref_frame[var_ref_idx];
+ MV_REFERENCE_FRAME vrfl = l_sg ? left_mi->mbmi.ref_frame[0] :
+ left_mi->mbmi.ref_frame[var_ref_idx];
+
+ if (vrfa == vrfl && cm->comp_var_ref[1] == vrfa) {
+ pred_context = 0;
+ } else if (l_sg && a_sg) { // single/single
+ if ((vrfa == cm->comp_fixed_ref && vrfl == cm->comp_var_ref[0]) ||
+ (vrfl == cm->comp_fixed_ref && vrfa == cm->comp_var_ref[0])) {
+ pred_context = 4;
+ } else if (vrfa == vrfl) {
+ pred_context = 3;
+ } else {
+ pred_context = 1;
+ }
+ } else if (l_sg || a_sg) { // single/comp
+ MV_REFERENCE_FRAME vrfc = l_sg ? vrfa : vrfl;
+ MV_REFERENCE_FRAME rfs = a_sg ? vrfa : vrfl;
+
+ if (vrfc == cm->comp_var_ref[1] && rfs != cm->comp_var_ref[1]) {
+ pred_context = 1;
+ } else if (rfs == cm->comp_var_ref[1] &&
+ vrfc != cm->comp_var_ref[1]) {
+ pred_context = 2;
+ } else {
+ pred_context = 4;
+ }
+ } else if (vrfa == vrfl) { // comp/comp
+ pred_context = 4;
+ } else {
+ pred_context = 2;
+ }
+ }
+ } else if (above_in_image || left_in_image) { // one edge available
+ const MODE_INFO *edge = above_in_image ? above_mi : left_mi;
+
+ if (edge->mbmi.ref_frame[0] == INTRA_FRAME) {
+ pred_context = 2;
+ } else if (edge->mbmi.ref_frame[1] > INTRA_FRAME) {
+ pred_context =
+ 4 * edge->mbmi.ref_frame[var_ref_idx] != cm->comp_var_ref[1];
+ } else {
+ pred_context = 3 * edge->mbmi.ref_frame[0] != cm->comp_var_ref[1];
+ }
+ } else { // no edges available (2)
+ pred_context = 2;
+ }
+ assert(pred_context >= 0 && pred_context < REF_CONTEXTS);
+ break;
+ }
+
+ case PRED_SINGLE_REF_P1: {
+ if (above_in_image && left_in_image) { // both edges available
+ if (above_mi->mbmi.ref_frame[0] == INTRA_FRAME &&
+ left_mi->mbmi.ref_frame[0] == INTRA_FRAME) {
+ pred_context = 2;
+ } else if (above_mi->mbmi.ref_frame[0] == INTRA_FRAME ||
+ left_mi->mbmi.ref_frame[0] == INTRA_FRAME) {
+ const MODE_INFO *edge = above_mi->mbmi.ref_frame[0] == INTRA_FRAME ?
+ left_mi : above_mi;
+
+ if (edge->mbmi.ref_frame[1] <= INTRA_FRAME) {
+ pred_context = 4 * (edge->mbmi.ref_frame[0] == LAST_FRAME);
+ } else {
+ pred_context = 1 + (edge->mbmi.ref_frame[0] == LAST_FRAME ||
+ edge->mbmi.ref_frame[1] == LAST_FRAME);
+ }
+ } else if (above_mi->mbmi.ref_frame[1] <= INTRA_FRAME &&
+ left_mi->mbmi.ref_frame[1] <= INTRA_FRAME) {
+ pred_context = 2 * (above_mi->mbmi.ref_frame[0] == LAST_FRAME) +
+ 2 * (left_mi->mbmi.ref_frame[0] == LAST_FRAME);
+ } else if (above_mi->mbmi.ref_frame[1] > INTRA_FRAME &&
+ left_mi->mbmi.ref_frame[1] > INTRA_FRAME) {
+ pred_context = 1 + (above_mi->mbmi.ref_frame[0] == LAST_FRAME ||
+ above_mi->mbmi.ref_frame[1] == LAST_FRAME ||
+ left_mi->mbmi.ref_frame[0] == LAST_FRAME ||
+ left_mi->mbmi.ref_frame[1] == LAST_FRAME);
+ } else {
+ MV_REFERENCE_FRAME rfs = above_mi->mbmi.ref_frame[1] <= INTRA_FRAME ?
+ above_mi->mbmi.ref_frame[0] : left_mi->mbmi.ref_frame[0];
+ MV_REFERENCE_FRAME crf1 = above_mi->mbmi.ref_frame[1] > INTRA_FRAME ?
+ above_mi->mbmi.ref_frame[0] : left_mi->mbmi.ref_frame[0];
+ MV_REFERENCE_FRAME crf2 = above_mi->mbmi.ref_frame[1] > INTRA_FRAME ?
+ above_mi->mbmi.ref_frame[1] : left_mi->mbmi.ref_frame[1];
+
+ if (rfs == LAST_FRAME) {
+ pred_context = 3 + (crf1 == LAST_FRAME || crf2 == LAST_FRAME);
+ } else {
+ pred_context = crf1 == LAST_FRAME || crf2 == LAST_FRAME;
+ }
+ }
+ } else if (above_in_image || left_in_image) { // one edge available
+ const MODE_INFO *edge = above_in_image ? above_mi : left_mi;
+
+ if (edge->mbmi.ref_frame[0] == INTRA_FRAME) {
+ pred_context = 2;
+ } else if (edge->mbmi.ref_frame[1] <= INTRA_FRAME) {
+ pred_context = 4 * (edge->mbmi.ref_frame[0] == LAST_FRAME);
+ } else {
+ pred_context = 1 + (edge->mbmi.ref_frame[0] == LAST_FRAME ||
+ edge->mbmi.ref_frame[1] == LAST_FRAME);
+ }
+ } else { // no edges available (2)
+ pred_context = 2;
+ }
+ assert(pred_context >= 0 && pred_context < REF_CONTEXTS);
+ break;
+ }
+
+ case PRED_SINGLE_REF_P2: {
+ if (above_in_image && left_in_image) { // both edges available
+ if (above_mi->mbmi.ref_frame[0] == INTRA_FRAME &&
+ left_mi->mbmi.ref_frame[0] == INTRA_FRAME) {
+ pred_context = 2;
+ } else if (above_mi->mbmi.ref_frame[0] == INTRA_FRAME ||
+ left_mi->mbmi.ref_frame[0] == INTRA_FRAME) {
+ const MODE_INFO *edge = above_mi->mbmi.ref_frame[0] == INTRA_FRAME ?
+ left_mi : above_mi;
+
+ if (edge->mbmi.ref_frame[1] <= INTRA_FRAME) {
+ if (edge->mbmi.ref_frame[0] == LAST_FRAME) {
+ pred_context = 3;
+ } else {
+ pred_context = 4 * (edge->mbmi.ref_frame[0] == GOLDEN_FRAME);
+ }
+ } else {
+ pred_context = 1 + 2 * (edge->mbmi.ref_frame[0] == GOLDEN_FRAME ||
+ edge->mbmi.ref_frame[1] == GOLDEN_FRAME);
+ }
+ } else if (above_mi->mbmi.ref_frame[1] <= INTRA_FRAME &&
+ left_mi->mbmi.ref_frame[1] <= INTRA_FRAME) {
+ if (above_mi->mbmi.ref_frame[0] == LAST_FRAME &&
+ left_mi->mbmi.ref_frame[0] == LAST_FRAME) {
+ pred_context = 3;
+ } else if (above_mi->mbmi.ref_frame[0] == LAST_FRAME ||
+ left_mi->mbmi.ref_frame[0] == LAST_FRAME) {
+ const MODE_INFO *edge = above_mi->mbmi.ref_frame[0] == LAST_FRAME ?
+ left_mi : above_mi;
+
+ pred_context = 4 * (edge->mbmi.ref_frame[0] == GOLDEN_FRAME);
+ } else {
+ pred_context = 2 * (above_mi->mbmi.ref_frame[0] == GOLDEN_FRAME) +
+ 2 * (left_mi->mbmi.ref_frame[0] == GOLDEN_FRAME);
+ }
+ } else if (above_mi->mbmi.ref_frame[1] > INTRA_FRAME &&
+ left_mi->mbmi.ref_frame[1] > INTRA_FRAME) {
+ if (above_mi->mbmi.ref_frame[0] == left_mi->mbmi.ref_frame[0] &&
+ above_mi->mbmi.ref_frame[1] == left_mi->mbmi.ref_frame[1]) {
+ pred_context = 3 * (above_mi->mbmi.ref_frame[0] == GOLDEN_FRAME ||
+ above_mi->mbmi.ref_frame[1] == GOLDEN_FRAME ||
+ left_mi->mbmi.ref_frame[0] == GOLDEN_FRAME ||
+ left_mi->mbmi.ref_frame[1] == GOLDEN_FRAME);
+ } else {
+ pred_context = 2;
+ }
+ } else {
+ MV_REFERENCE_FRAME rfs = above_mi->mbmi.ref_frame[1] <= INTRA_FRAME ?
+ above_mi->mbmi.ref_frame[0] : left_mi->mbmi.ref_frame[0];
+ MV_REFERENCE_FRAME crf1 = above_mi->mbmi.ref_frame[1] > INTRA_FRAME ?
+ above_mi->mbmi.ref_frame[0] : left_mi->mbmi.ref_frame[0];
+ MV_REFERENCE_FRAME crf2 = above_mi->mbmi.ref_frame[1] > INTRA_FRAME ?
+ above_mi->mbmi.ref_frame[1] : left_mi->mbmi.ref_frame[1];
+
+ if (rfs == GOLDEN_FRAME) {
+ pred_context = 3 + (crf1 == GOLDEN_FRAME || crf2 == GOLDEN_FRAME);
+ } else if (rfs == ALTREF_FRAME) {
+ pred_context = crf1 == GOLDEN_FRAME || crf2 == GOLDEN_FRAME;
+ } else {
+ pred_context =
+ 1 + 2 * (crf1 == GOLDEN_FRAME || crf2 == GOLDEN_FRAME);
+ }
+ }
+ } else if (above_in_image || left_in_image) { // one edge available
+ const MODE_INFO *edge = above_in_image ? above_mi : left_mi;
+
+ if (edge->mbmi.ref_frame[0] == INTRA_FRAME ||
+ (edge->mbmi.ref_frame[0] == LAST_FRAME &&
+ edge->mbmi.ref_frame[1] <= INTRA_FRAME)) {
+ pred_context = 2;
+ } else if (edge->mbmi.ref_frame[1] <= INTRA_FRAME) {
+ pred_context = 4 * (edge->mbmi.ref_frame[0] == GOLDEN_FRAME);
+ } else {
+ pred_context = 3 * (edge->mbmi.ref_frame[0] == GOLDEN_FRAME ||
+ edge->mbmi.ref_frame[1] == GOLDEN_FRAME);
+ }
+ } else { // no edges available (2)
+ pred_context = 2;
+ }
+ assert(pred_context >= 0 && pred_context < REF_CONTEXTS);
+ break;
+ }
+
+ case PRED_TX_SIZE: {
+ int above_context, left_context;
+ int max_tx_size;
+ if (mi->mbmi.sb_type < BLOCK_SIZE_SB8X8)
+ max_tx_size = TX_4X4;
+ else if (mi->mbmi.sb_type < BLOCK_SIZE_MB16X16)
+ max_tx_size = TX_8X8;
+ else if (mi->mbmi.sb_type < BLOCK_SIZE_SB32X32)
+ max_tx_size = TX_16X16;
+ else
+ max_tx_size = TX_32X32;
+ above_context = left_context = max_tx_size;
+ if (above_in_image) {
+ above_context = (above_mi->mbmi.mb_skip_coeff ?
+ max_tx_size : above_mi->mbmi.txfm_size);
+ }
+ if (left_in_image) {
+ left_context = (left_mi->mbmi.mb_skip_coeff ?
+ max_tx_size : left_mi->mbmi.txfm_size);
+ }
+ if (!left_in_image) {
+ left_context = above_context;
+ }
+ if (!above_in_image) {
+ above_context = left_context;
+ }
+ pred_context = (above_context + left_context > max_tx_size);
+ break;
+ }
+
+ default:
+ assert(0);
+ pred_context = 0; // *** add error trap code.
+ break;
+ }
+
+ return pred_context;
+}
+
+// This function returns a context probability for coding a given
+// prediction signal
+vp9_prob vp9_get_pred_prob(const VP9_COMMON *const cm,
+ const MACROBLOCKD *const xd,
+ PRED_ID pred_id) {
+ const int pred_context = vp9_get_pred_context(cm, xd, pred_id);
+
+ switch (pred_id) {
+ case PRED_SEG_ID:
+ return cm->segment_pred_probs[pred_context];
+ case PRED_MBSKIP:
+ return cm->fc.mbskip_probs[pred_context];
+ case PRED_INTRA_INTER:
+ return cm->fc.intra_inter_prob[pred_context];
+ case PRED_COMP_INTER_INTER:
+ return cm->fc.comp_inter_prob[pred_context];
+ case PRED_COMP_REF_P:
+ return cm->fc.comp_ref_prob[pred_context];
+ case PRED_SINGLE_REF_P1:
+ return cm->fc.single_ref_prob[pred_context][0];
+ case PRED_SINGLE_REF_P2:
+ return cm->fc.single_ref_prob[pred_context][1];
+ default:
+ assert(0);
+ return 128; // *** add error trap code.
+ }
+}
+
+// This function returns a context probability ptr for coding a given
+// prediction signal
+const vp9_prob *vp9_get_pred_probs(const VP9_COMMON *const cm,
+ const MACROBLOCKD *const xd,
+ PRED_ID pred_id) {
+ const MODE_INFO *const mi = xd->mode_info_context;
+ const int pred_context = vp9_get_pred_context(cm, xd, pred_id);
+
+ switch (pred_id) {
+ case PRED_SWITCHABLE_INTERP:
+ return &cm->fc.switchable_interp_prob[pred_context][0];
+
+ case PRED_TX_SIZE:
+ if (mi->mbmi.sb_type < BLOCK_SIZE_MB16X16)
+ return cm->fc.tx_probs_8x8p[pred_context];
+ else if (mi->mbmi.sb_type < BLOCK_SIZE_SB32X32)
+ return cm->fc.tx_probs_16x16p[pred_context];
+ else
+ return cm->fc.tx_probs_32x32p[pred_context];
+
+ default:
+ assert(0);
+ return NULL; // *** add error trap code.
+ }
+}
+
+// This function returns the status of the given prediction signal.
+// I.e. is the predicted value for the given signal correct.
+unsigned char vp9_get_pred_flag(const MACROBLOCKD *const xd,
+ PRED_ID pred_id) {
+ switch (pred_id) {
+ case PRED_SEG_ID:
+ return xd->mode_info_context->mbmi.seg_id_predicted;
+ case PRED_MBSKIP:
+ return xd->mode_info_context->mbmi.mb_skip_coeff;
+ default:
+ assert(0);
+ return 0; // *** add error trap code.
+ }
+}
+
+// This function sets the status of the given prediction signal.
+// I.e. is the predicted value for the given signal correct.
+void vp9_set_pred_flag(MACROBLOCKD *const xd,
+ PRED_ID pred_id,
+ unsigned char pred_flag) {
+ const int mis = xd->mode_info_stride;
+ BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type;
+ const int bh = 1 << mi_height_log2(bsize);
+ const int bw = 1 << mi_width_log2(bsize);
+#define sub(a, b) (b) < 0 ? (a) + (b) : (a)
+ const int x_mis = sub(bw, xd->mb_to_right_edge >> (3 + LOG2_MI_SIZE));
+ const int y_mis = sub(bh, xd->mb_to_bottom_edge >> (3 + LOG2_MI_SIZE));
+#undef sub
+ int x, y;
+
+ switch (pred_id) {
+ case PRED_SEG_ID:
+ for (y = 0; y < y_mis; y++) {
+ for (x = 0; x < x_mis; x++) {
+ xd->mode_info_context[y * mis + x].mbmi.seg_id_predicted = pred_flag;
+ }
+ }
+ break;
+
+ case PRED_MBSKIP:
+ for (y = 0; y < y_mis; y++) {
+ for (x = 0; x < x_mis; x++) {
+ xd->mode_info_context[y * mis + x].mbmi.mb_skip_coeff = pred_flag;
+ }
+ }
+ break;
+
+ default:
+ assert(0);
+ // *** add error trap code.
+ break;
+ }
+}
+
+
+// The following contain the guts of the prediction code used to
+// peredict various bitstream signals.
+
+// Macroblock segment id prediction function
+int vp9_get_pred_mi_segid(VP9_COMMON *cm, BLOCK_SIZE_TYPE sb_type,
+ int mi_row, int mi_col) {
+ const int mi_index = mi_row * cm->mi_cols + mi_col;
+ const int bw = 1 << mi_width_log2(sb_type);
+ const int bh = 1 << mi_height_log2(sb_type);
+ const int ymis = MIN(cm->mi_rows - mi_row, bh);
+ const int xmis = MIN(cm->mi_cols - mi_col, bw);
+ int segment_id = INT_MAX;
+ int x, y;
+
+ for (y = 0; y < ymis; y++) {
+ for (x = 0; x < xmis; x++) {
+ const int index = mi_index + (y * cm->mi_cols + x);
+ segment_id = MIN(segment_id, cm->last_frame_seg_map[index]);
+ }
+ }
+ return segment_id;
+}
diff --git a/libvpx/vp9/common/vp9_pred_common.h b/libvpx/vp9/common/vp9_pred_common.h
new file mode 100644
index 0000000..b728724
--- /dev/null
+++ b/libvpx/vp9/common/vp9_pred_common.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_PRED_COMMON_H_
+#define VP9_COMMON_VP9_PRED_COMMON_H_
+
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_onyxc_int.h"
+
+// Predicted items
+typedef enum {
+ PRED_SEG_ID = 0, // Segment identifier
+ PRED_MBSKIP = 1,
+ PRED_SWITCHABLE_INTERP = 2,
+ PRED_INTRA_INTER = 3,
+ PRED_COMP_INTER_INTER = 4,
+ PRED_SINGLE_REF_P1 = 5,
+ PRED_SINGLE_REF_P2 = 6,
+ PRED_COMP_REF_P = 7,
+ PRED_TX_SIZE = 8
+} PRED_ID;
+
+unsigned char vp9_get_pred_context(const VP9_COMMON *const cm,
+ const MACROBLOCKD *const xd,
+ PRED_ID pred_id);
+
+vp9_prob vp9_get_pred_prob(const VP9_COMMON *const cm,
+ const MACROBLOCKD *const xd,
+ PRED_ID pred_id);
+
+const vp9_prob *vp9_get_pred_probs(const VP9_COMMON *const cm,
+ const MACROBLOCKD *const xd,
+ PRED_ID pred_id);
+
+unsigned char vp9_get_pred_flag(const MACROBLOCKD *const xd,
+ PRED_ID pred_id);
+
+void vp9_set_pred_flag(MACROBLOCKD *const xd,
+ PRED_ID pred_id,
+ unsigned char pred_flag);
+
+
+int vp9_get_pred_mi_segid(VP9_COMMON *cm, BLOCK_SIZE_TYPE sb_type,
+ int mi_row, int mi_col);
+
+#endif // VP9_COMMON_VP9_PRED_COMMON_H_
diff --git a/libvpx/vp9/common/vp9_quant_common.c b/libvpx/vp9/common/vp9_quant_common.c
new file mode 100644
index 0000000..295c8e7
--- /dev/null
+++ b/libvpx/vp9/common/vp9_quant_common.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_quant_common.h"
+#include "vp9/common/vp9_seg_common.h"
+
+static int16_t dc_qlookup[QINDEX_RANGE];
+static int16_t ac_qlookup[QINDEX_RANGE];
+
+#define ACDC_MIN 8
+
+// TODO(dkovalev) move to common and reuse
+static double poly3(double a, double b, double c, double d, double x) {
+ return a*x*x*x + b*x*x + c*x + d;
+}
+
+void vp9_init_quant_tables() {
+ int i, val = 4;
+
+ // A "real" q of 1.0 forces lossless mode.
+ // In practice non lossless Q's between 1.0 and 2.0 (represented here by
+ // integer values from 5-7 give poor rd results (lower psnr and often
+ // larger size than the lossless encode. To block out those "not very useful"
+ // values we increment the ac and dc q lookup values by 4 after position 0.
+ ac_qlookup[0] = val;
+ dc_qlookup[0] = val;
+ val += 4;
+
+ for (i = 1; i < QINDEX_RANGE; i++) {
+ const int ac_val = val;
+
+ val = (int)(val * 1.01975);
+ if (val == ac_val)
+ ++val;
+
+ ac_qlookup[i] = (int16_t)ac_val;
+ dc_qlookup[i] = (int16_t)MAX(ACDC_MIN, poly3(0.000000305, -0.00065, 0.9,
+ 0.5, ac_val));
+ }
+}
+
+int16_t vp9_dc_quant(int qindex, int delta) {
+ return dc_qlookup[clamp(qindex + delta, 0, MAXQ)];
+}
+
+int16_t vp9_ac_quant(int qindex, int delta) {
+ return ac_qlookup[clamp(qindex + delta, 0, MAXQ)];
+}
+
+
+int vp9_get_qindex(MACROBLOCKD *xd, int segment_id, int base_qindex) {
+ if (vp9_segfeature_active(xd, segment_id, SEG_LVL_ALT_Q)) {
+ const int data = vp9_get_segdata(xd, segment_id, SEG_LVL_ALT_Q);
+ return xd->mb_segment_abs_delta == SEGMENT_ABSDATA ?
+ data : // Abs value
+ clamp(base_qindex + data, 0, MAXQ); // Delta value
+ } else {
+ return base_qindex;
+ }
+}
+
diff --git a/libvpx/vp9/common/vp9_quant_common.h b/libvpx/vp9/common/vp9_quant_common.h
new file mode 100644
index 0000000..ded9426
--- /dev/null
+++ b/libvpx/vp9/common/vp9_quant_common.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_QUANT_COMMON_H_
+#define VP9_COMMON_VP9_QUANT_COMMON_H_
+
+#include "vp9/common/vp9_blockd.h"
+
+#define MINQ 0
+#define MAXQ 255
+#define QINDEX_RANGE (MAXQ - MINQ + 1)
+#define QINDEX_BITS 8
+
+void vp9_init_quant_tables();
+
+int16_t vp9_dc_quant(int qindex, int delta);
+int16_t vp9_ac_quant(int qindex, int delta);
+
+int vp9_get_qindex(MACROBLOCKD *mb, int segment_id, int base_qindex);
+
+#endif // VP9_COMMON_VP9_QUANT_COMMON_H_
diff --git a/libvpx/vp9/common/vp9_reconinter.c b/libvpx/vp9/common/vp9_reconinter.c
new file mode 100644
index 0000000..b28d333
--- /dev/null
+++ b/libvpx/vp9/common/vp9_reconinter.c
@@ -0,0 +1,528 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+
+#include "./vpx_config.h"
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_filter.h"
+#include "vp9/common/vp9_reconinter.h"
+#include "vp9/common/vp9_reconintra.h"
+
+static int scale_value_x_with_scaling(int val,
+ const struct scale_factors *scale) {
+ return (val * scale->x_scale_fp >> VP9_REF_SCALE_SHIFT);
+}
+
+static int scale_value_y_with_scaling(int val,
+ const struct scale_factors *scale) {
+ return (val * scale->y_scale_fp >> VP9_REF_SCALE_SHIFT);
+}
+
+static int unscaled_value(int val, const struct scale_factors *scale) {
+ (void) scale;
+ return val;
+}
+
+static int_mv32 mv_q3_to_q4_with_scaling(const int_mv *src_mv,
+ const struct scale_factors *scale) {
+ // returns mv * scale + offset
+ int_mv32 result;
+ const int32_t mv_row_q4 = src_mv->as_mv.row << 1;
+ const int32_t mv_col_q4 = src_mv->as_mv.col << 1;
+
+ result.as_mv.row = (mv_row_q4 * scale->y_scale_fp >> VP9_REF_SCALE_SHIFT)
+ + scale->y_offset_q4;
+ result.as_mv.col = (mv_col_q4 * scale->x_scale_fp >> VP9_REF_SCALE_SHIFT)
+ + scale->x_offset_q4;
+ return result;
+}
+
+static int_mv32 mv_q3_to_q4_without_scaling(const int_mv *src_mv,
+ const struct scale_factors *scale) {
+ // returns mv * scale + offset
+ int_mv32 result;
+
+ result.as_mv.row = src_mv->as_mv.row << 1;
+ result.as_mv.col = src_mv->as_mv.col << 1;
+ return result;
+}
+
+static int32_t mv_component_q4_with_scaling(int mv_q4, int scale_fp,
+ int offset_q4) {
+ int32_t scaled_mv;
+ // returns the scaled and offset value of the mv component.
+ scaled_mv = (mv_q4 * scale_fp >> VP9_REF_SCALE_SHIFT) + offset_q4;
+
+ return scaled_mv;
+}
+
+static int32_t mv_component_q4_without_scaling(int mv_q4, int scale_fp,
+ int offset_q4) {
+ // returns the scaled and offset value of the mv component.
+ (void)scale_fp;
+ (void)offset_q4;
+ return mv_q4;
+}
+
+static void set_offsets_with_scaling(struct scale_factors *scale,
+ int row, int col) {
+ const int x_q4 = 16 * col;
+ const int y_q4 = 16 * row;
+
+ scale->x_offset_q4 = (x_q4 * scale->x_scale_fp >> VP9_REF_SCALE_SHIFT) & 0xf;
+ scale->y_offset_q4 = (y_q4 * scale->y_scale_fp >> VP9_REF_SCALE_SHIFT) & 0xf;
+}
+
+static void set_offsets_without_scaling(struct scale_factors *scale,
+ int row, int col) {
+ scale->x_offset_q4 = 0;
+ scale->y_offset_q4 = 0;
+}
+
+static int get_fixed_point_scale_factor(int other_size, int this_size) {
+ // Calculate scaling factor once for each reference frame
+ // and use fixed point scaling factors in decoding and encoding routines.
+ // Hardware implementations can calculate scale factor in device driver
+ // and use multiplication and shifting on hardware instead of division.
+ return (other_size << VP9_REF_SCALE_SHIFT) / this_size;
+}
+
+void vp9_setup_scale_factors_for_frame(struct scale_factors *scale,
+ int other_w, int other_h,
+ int this_w, int this_h) {
+ scale->x_scale_fp = get_fixed_point_scale_factor(other_w, this_w);
+ scale->x_offset_q4 = 0; // calculated per-mb
+ scale->x_step_q4 = (16 * scale->x_scale_fp >> VP9_REF_SCALE_SHIFT);
+
+ scale->y_scale_fp = get_fixed_point_scale_factor(other_h, this_h);
+ scale->y_offset_q4 = 0; // calculated per-mb
+ scale->y_step_q4 = (16 * scale->y_scale_fp >> VP9_REF_SCALE_SHIFT);
+
+ if ((other_w == this_w) && (other_h == this_h)) {
+ scale->scale_value_x = unscaled_value;
+ scale->scale_value_y = unscaled_value;
+ scale->set_scaled_offsets = set_offsets_without_scaling;
+ scale->scale_mv_q3_to_q4 = mv_q3_to_q4_without_scaling;
+ scale->scale_mv_component_q4 = mv_component_q4_without_scaling;
+ } else {
+ scale->scale_value_x = scale_value_x_with_scaling;
+ scale->scale_value_y = scale_value_y_with_scaling;
+ scale->set_scaled_offsets = set_offsets_with_scaling;
+ scale->scale_mv_q3_to_q4 = mv_q3_to_q4_with_scaling;
+ scale->scale_mv_component_q4 = mv_component_q4_with_scaling;
+ }
+
+ // TODO(agrange): Investigate the best choice of functions to use here
+ // for EIGHTTAP_SMOOTH. Since it is not interpolating, need to choose what
+ // to do at full-pel offsets. The current selection, where the filter is
+ // applied in one direction only, and not at all for 0,0, seems to give the
+ // best quality, but it may be worth trying an additional mode that does
+ // do the filtering on full-pel.
+ if (scale->x_step_q4 == 16) {
+ if (scale->y_step_q4 == 16) {
+ // No scaling in either direction.
+ scale->predict[0][0][0] = vp9_convolve_copy;
+ scale->predict[0][0][1] = vp9_convolve_avg;
+ scale->predict[0][1][0] = vp9_convolve8_vert;
+ scale->predict[0][1][1] = vp9_convolve8_avg_vert;
+ scale->predict[1][0][0] = vp9_convolve8_horiz;
+ scale->predict[1][0][1] = vp9_convolve8_avg_horiz;
+ } else {
+ // No scaling in x direction. Must always scale in the y direction.
+ scale->predict[0][0][0] = vp9_convolve8_vert;
+ scale->predict[0][0][1] = vp9_convolve8_avg_vert;
+ scale->predict[0][1][0] = vp9_convolve8_vert;
+ scale->predict[0][1][1] = vp9_convolve8_avg_vert;
+ scale->predict[1][0][0] = vp9_convolve8;
+ scale->predict[1][0][1] = vp9_convolve8_avg;
+ }
+ } else {
+ if (scale->y_step_q4 == 16) {
+ // No scaling in the y direction. Must always scale in the x direction.
+ scale->predict[0][0][0] = vp9_convolve8_horiz;
+ scale->predict[0][0][1] = vp9_convolve8_avg_horiz;
+ scale->predict[0][1][0] = vp9_convolve8;
+ scale->predict[0][1][1] = vp9_convolve8_avg;
+ scale->predict[1][0][0] = vp9_convolve8_horiz;
+ scale->predict[1][0][1] = vp9_convolve8_avg_horiz;
+ } else {
+ // Must always scale in both directions.
+ scale->predict[0][0][0] = vp9_convolve8;
+ scale->predict[0][0][1] = vp9_convolve8_avg;
+ scale->predict[0][1][0] = vp9_convolve8;
+ scale->predict[0][1][1] = vp9_convolve8_avg;
+ scale->predict[1][0][0] = vp9_convolve8;
+ scale->predict[1][0][1] = vp9_convolve8_avg;
+ }
+ }
+ // 2D subpel motion always gets filtered in both directions
+ scale->predict[1][1][0] = vp9_convolve8;
+ scale->predict[1][1][1] = vp9_convolve8_avg;
+}
+
+void vp9_setup_interp_filters(MACROBLOCKD *xd,
+ INTERPOLATIONFILTERTYPE mcomp_filter_type,
+ VP9_COMMON *cm) {
+ if (xd->mode_info_context) {
+ MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
+
+ set_scale_factors(xd,
+ mbmi->ref_frame[0] - 1,
+ mbmi->ref_frame[1] - 1,
+ cm->active_ref_scale);
+ }
+
+ switch (mcomp_filter_type) {
+ case EIGHTTAP:
+ case SWITCHABLE:
+ xd->subpix.filter_x = xd->subpix.filter_y = vp9_sub_pel_filters_8;
+ break;
+ case EIGHTTAP_SMOOTH:
+ xd->subpix.filter_x = xd->subpix.filter_y = vp9_sub_pel_filters_8lp;
+ break;
+ case EIGHTTAP_SHARP:
+ xd->subpix.filter_x = xd->subpix.filter_y = vp9_sub_pel_filters_8s;
+ break;
+ case BILINEAR:
+ xd->subpix.filter_x = xd->subpix.filter_y = vp9_bilinear_filters;
+ break;
+ }
+ assert(((intptr_t)xd->subpix.filter_x & 0xff) == 0);
+}
+
+void vp9_copy_mem16x16_c(const uint8_t *src,
+ int src_stride,
+ uint8_t *dst,
+ int dst_stride) {
+ int r;
+
+ for (r = 0; r < 16; r++) {
+#if !(CONFIG_FAST_UNALIGNED)
+ dst[0] = src[0];
+ dst[1] = src[1];
+ dst[2] = src[2];
+ dst[3] = src[3];
+ dst[4] = src[4];
+ dst[5] = src[5];
+ dst[6] = src[6];
+ dst[7] = src[7];
+ dst[8] = src[8];
+ dst[9] = src[9];
+ dst[10] = src[10];
+ dst[11] = src[11];
+ dst[12] = src[12];
+ dst[13] = src[13];
+ dst[14] = src[14];
+ dst[15] = src[15];
+
+#else
+ ((uint32_t *)dst)[0] = ((const uint32_t *)src)[0];
+ ((uint32_t *)dst)[1] = ((const uint32_t *)src)[1];
+ ((uint32_t *)dst)[2] = ((const uint32_t *)src)[2];
+ ((uint32_t *)dst)[3] = ((const uint32_t *)src)[3];
+
+#endif
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+void vp9_copy_mem8x8_c(const uint8_t *src,
+ int src_stride,
+ uint8_t *dst,
+ int dst_stride) {
+ int r;
+
+ for (r = 0; r < 8; r++) {
+#if !(CONFIG_FAST_UNALIGNED)
+ dst[0] = src[0];
+ dst[1] = src[1];
+ dst[2] = src[2];
+ dst[3] = src[3];
+ dst[4] = src[4];
+ dst[5] = src[5];
+ dst[6] = src[6];
+ dst[7] = src[7];
+#else
+ ((uint32_t *)dst)[0] = ((const uint32_t *)src)[0];
+ ((uint32_t *)dst)[1] = ((const uint32_t *)src)[1];
+#endif
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+void vp9_copy_mem8x4_c(const uint8_t *src,
+ int src_stride,
+ uint8_t *dst,
+ int dst_stride) {
+ int r;
+
+ for (r = 0; r < 4; r++) {
+#if !(CONFIG_FAST_UNALIGNED)
+ dst[0] = src[0];
+ dst[1] = src[1];
+ dst[2] = src[2];
+ dst[3] = src[3];
+ dst[4] = src[4];
+ dst[5] = src[5];
+ dst[6] = src[6];
+ dst[7] = src[7];
+#else
+ ((uint32_t *)dst)[0] = ((const uint32_t *)src)[0];
+ ((uint32_t *)dst)[1] = ((const uint32_t *)src)[1];
+#endif
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int_mv *mv_q3,
+ const struct scale_factors *scale,
+ int w, int h, int weight,
+ const struct subpix_fn_table *subpix) {
+ int_mv32 mv = scale->scale_mv_q3_to_q4(mv_q3, scale);
+ src += (mv.as_mv.row >> 4) * src_stride + (mv.as_mv.col >> 4);
+ scale->predict[!!(mv.as_mv.col & 15)][!!(mv.as_mv.row & 15)][weight](
+ src, src_stride, dst, dst_stride,
+ subpix->filter_x[mv.as_mv.col & 15], scale->x_step_q4,
+ subpix->filter_y[mv.as_mv.row & 15], scale->y_step_q4,
+ w, h);
+}
+
+void vp9_build_inter_predictor_q4(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int_mv *mv_q4,
+ const struct scale_factors *scale,
+ int w, int h, int weight,
+ const struct subpix_fn_table *subpix) {
+ const int scaled_mv_row_q4 = scale->scale_mv_component_q4(mv_q4->as_mv.row,
+ scale->y_scale_fp,
+ scale->y_offset_q4);
+ const int scaled_mv_col_q4 = scale->scale_mv_component_q4(mv_q4->as_mv.col,
+ scale->x_scale_fp,
+ scale->x_offset_q4);
+ const int subpel_x = scaled_mv_col_q4 & 15;
+ const int subpel_y = scaled_mv_row_q4 & 15;
+
+ src += (scaled_mv_row_q4 >> 4) * src_stride + (scaled_mv_col_q4 >> 4);
+ scale->predict[!!subpel_x][!!subpel_y][weight](
+ src, src_stride, dst, dst_stride,
+ subpix->filter_x[subpel_x], scale->x_step_q4,
+ subpix->filter_y[subpel_y], scale->y_step_q4,
+ w, h);
+}
+
+static INLINE int round_mv_comp_q4(int value) {
+ return (value < 0 ? value - 2 : value + 2) / 4;
+}
+
+static int mi_mv_pred_row_q4(MACROBLOCKD *mb, int idx) {
+ const int temp = mb->mode_info_context->bmi[0].as_mv[idx].as_mv.row +
+ mb->mode_info_context->bmi[1].as_mv[idx].as_mv.row +
+ mb->mode_info_context->bmi[2].as_mv[idx].as_mv.row +
+ mb->mode_info_context->bmi[3].as_mv[idx].as_mv.row;
+ return round_mv_comp_q4(temp);
+}
+
+static int mi_mv_pred_col_q4(MACROBLOCKD *mb, int idx) {
+ const int temp = mb->mode_info_context->bmi[0].as_mv[idx].as_mv.col +
+ mb->mode_info_context->bmi[1].as_mv[idx].as_mv.col +
+ mb->mode_info_context->bmi[2].as_mv[idx].as_mv.col +
+ mb->mode_info_context->bmi[3].as_mv[idx].as_mv.col;
+ return round_mv_comp_q4(temp);
+}
+
+// TODO(jkoleszar): yet another mv clamping function :-(
+MV clamp_mv_to_umv_border_sb(const MV *src_mv,
+ int bwl, int bhl, int ss_x, int ss_y,
+ int mb_to_left_edge, int mb_to_top_edge,
+ int mb_to_right_edge, int mb_to_bottom_edge) {
+ /* If the MV points so far into the UMV border that no visible pixels
+ * are used for reconstruction, the subpel part of the MV can be
+ * discarded and the MV limited to 16 pixels with equivalent results.
+ */
+ const int spel_left = (VP9_INTERP_EXTEND + (4 << bwl)) << 4;
+ const int spel_right = spel_left - (1 << 4);
+ const int spel_top = (VP9_INTERP_EXTEND + (4 << bhl)) << 4;
+ const int spel_bottom = spel_top - (1 << 4);
+ MV clamped_mv;
+
+ assert(ss_x <= 1);
+ assert(ss_y <= 1);
+ clamped_mv.col = clamp(src_mv->col << (1 - ss_x),
+ (mb_to_left_edge << (1 - ss_x)) - spel_left,
+ (mb_to_right_edge << (1 - ss_x)) + spel_right);
+ clamped_mv.row = clamp(src_mv->row << (1 - ss_y),
+ (mb_to_top_edge << (1 - ss_y)) - spel_top,
+ (mb_to_bottom_edge << (1 - ss_y)) + spel_bottom);
+ return clamped_mv;
+}
+
+struct build_inter_predictors_args {
+ MACROBLOCKD *xd;
+ int x;
+ int y;
+ uint8_t* dst[MAX_MB_PLANE];
+ int dst_stride[MAX_MB_PLANE];
+ uint8_t* pre[2][MAX_MB_PLANE];
+ int pre_stride[2][MAX_MB_PLANE];
+};
+static void build_inter_predictors(int plane, int block,
+ BLOCK_SIZE_TYPE bsize,
+ int pred_w, int pred_h,
+ void *argv) {
+ const struct build_inter_predictors_args* const arg = argv;
+ MACROBLOCKD * const xd = arg->xd;
+ const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
+ const int bhl = b_height_log2(bsize) - xd->plane[plane].subsampling_y;
+ const int bh = 4 << bhl, bw = 4 << bwl;
+ const int x = 4 * (block & ((1 << bwl) - 1)), y = 4 * (block >> bwl);
+ const int use_second_ref = xd->mode_info_context->mbmi.ref_frame[1] > 0;
+ int which_mv;
+
+ assert(x < bw);
+ assert(y < bh);
+ assert(xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8 ||
+ 4 << pred_w == bw);
+ assert(xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8 ||
+ 4 << pred_h == bh);
+
+ for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
+ // source
+ const uint8_t * const base_pre = arg->pre[which_mv][plane];
+ const int pre_stride = arg->pre_stride[which_mv][plane];
+ const uint8_t *const pre = base_pre +
+ scaled_buffer_offset(x, y, pre_stride, &xd->scale_factor[which_mv]);
+ struct scale_factors * const scale =
+ plane == 0 ? &xd->scale_factor[which_mv] : &xd->scale_factor_uv[which_mv];
+
+ // dest
+ uint8_t *const dst = arg->dst[plane] + arg->dst_stride[plane] * y + x;
+
+ // motion vector
+ const MV *mv;
+ MV split_chroma_mv;
+ int_mv clamped_mv;
+
+ if (xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
+ if (plane == 0) {
+ mv = &xd->mode_info_context->bmi[block].as_mv[which_mv].as_mv;
+ } else {
+ // TODO(jkoleszar): All chroma MVs in SPLITMV mode are taken as the
+ // same MV (the average of the 4 luma MVs) but we could do something
+ // smarter for non-4:2:0. Just punt for now, pending the changes to get
+ // rid of SPLITMV mode entirely.
+ split_chroma_mv.row = mi_mv_pred_row_q4(xd, which_mv);
+ split_chroma_mv.col = mi_mv_pred_col_q4(xd, which_mv);
+ mv = &split_chroma_mv;
+ }
+ } else {
+ mv = &xd->mode_info_context->mbmi.mv[which_mv].as_mv;
+ }
+
+ /* TODO(jkoleszar): This clamping is done in the incorrect place for the
+ * scaling case. It needs to be done on the scaled MV, not the pre-scaling
+ * MV. Note however that it performs the subsampling aware scaling so
+ * that the result is always q4.
+ */
+ clamped_mv.as_mv = clamp_mv_to_umv_border_sb(mv, bwl, bhl,
+ xd->plane[plane].subsampling_x,
+ xd->plane[plane].subsampling_y,
+ xd->mb_to_left_edge,
+ xd->mb_to_top_edge,
+ xd->mb_to_right_edge,
+ xd->mb_to_bottom_edge);
+ scale->set_scaled_offsets(scale, arg->y + y, arg->x + x);
+
+ vp9_build_inter_predictor_q4(pre, pre_stride,
+ dst, arg->dst_stride[plane],
+ &clamped_mv, &xd->scale_factor[which_mv],
+ 4 << pred_w, 4 << pred_h, which_mv,
+ &xd->subpix);
+ }
+}
+void vp9_build_inter_predictors_sby(MACROBLOCKD *xd,
+ int mi_row,
+ int mi_col,
+ BLOCK_SIZE_TYPE bsize) {
+ struct build_inter_predictors_args args = {
+ xd, mi_col * MI_SIZE, mi_row * MI_SIZE,
+ {xd->plane[0].dst.buf, NULL, NULL}, {xd->plane[0].dst.stride, 0, 0},
+ {{xd->plane[0].pre[0].buf, NULL, NULL},
+ {xd->plane[0].pre[1].buf, NULL, NULL}},
+ {{xd->plane[0].pre[0].stride, 0, 0}, {xd->plane[0].pre[1].stride, 0, 0}},
+ };
+
+ foreach_predicted_block_in_plane(xd, bsize, 0, build_inter_predictors, &args);
+}
+void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd,
+ int mi_row,
+ int mi_col,
+ BLOCK_SIZE_TYPE bsize) {
+ struct build_inter_predictors_args args = {
+ xd, mi_col * MI_SIZE, mi_row * MI_SIZE,
+#if CONFIG_ALPHA
+ {NULL, xd->plane[1].dst.buf, xd->plane[2].dst.buf,
+ xd->plane[3].dst.buf},
+ {0, xd->plane[1].dst.stride, xd->plane[1].dst.stride,
+ xd->plane[3].dst.stride},
+ {{NULL, xd->plane[1].pre[0].buf, xd->plane[2].pre[0].buf,
+ xd->plane[3].pre[0].buf},
+ {NULL, xd->plane[1].pre[1].buf, xd->plane[2].pre[1].buf,
+ xd->plane[3].pre[1].buf}},
+ {{0, xd->plane[1].pre[0].stride, xd->plane[1].pre[0].stride,
+ xd->plane[3].pre[0].stride},
+ {0, xd->plane[1].pre[1].stride, xd->plane[1].pre[1].stride,
+ xd->plane[3].pre[1].stride}},
+#else
+ {NULL, xd->plane[1].dst.buf, xd->plane[2].dst.buf},
+ {0, xd->plane[1].dst.stride, xd->plane[1].dst.stride},
+ {{NULL, xd->plane[1].pre[0].buf, xd->plane[2].pre[0].buf},
+ {NULL, xd->plane[1].pre[1].buf, xd->plane[2].pre[1].buf}},
+ {{0, xd->plane[1].pre[0].stride, xd->plane[1].pre[0].stride},
+ {0, xd->plane[1].pre[1].stride, xd->plane[1].pre[1].stride}},
+#endif
+ };
+ foreach_predicted_block_uv(xd, bsize, build_inter_predictors, &args);
+}
+void vp9_build_inter_predictors_sb(MACROBLOCKD *xd,
+ int mi_row, int mi_col,
+ BLOCK_SIZE_TYPE bsize) {
+
+ vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
+ vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col, bsize);
+}
+
+/*encoder only*/
+void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd,
+ int mb_row, int mb_col) {
+ vp9_build_inter_predictors_sbuv(xd, mb_row, mb_col,
+ BLOCK_SIZE_MB16X16);
+}
+
+// TODO(dkovalev: find better place for this function)
+void vp9_setup_scale_factors(VP9_COMMON *cm, int i) {
+ const int ref = cm->active_ref_idx[i];
+ struct scale_factors *const sf = &cm->active_ref_scale[i];
+ if (ref >= NUM_YV12_BUFFERS) {
+ memset(sf, 0, sizeof(*sf));
+ } else {
+ YV12_BUFFER_CONFIG *const fb = &cm->yv12_fb[ref];
+ vp9_setup_scale_factors_for_frame(sf,
+ fb->y_crop_width, fb->y_crop_height,
+ cm->width, cm->height);
+ }
+}
+
diff --git a/libvpx/vp9/common/vp9_reconinter.h b/libvpx/vp9/common/vp9_reconinter.h
new file mode 100644
index 0000000..4e52185
--- /dev/null
+++ b/libvpx/vp9/common/vp9_reconinter.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_RECONINTER_H_
+#define VP9_COMMON_VP9_RECONINTER_H_
+
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_onyxc_int.h"
+
+struct subpix_fn_table;
+void vp9_build_inter_predictors_sby(MACROBLOCKD *xd,
+ int mb_row,
+ int mb_col,
+ BLOCK_SIZE_TYPE bsize);
+
+void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd,
+ int mb_row,
+ int mb_col,
+ BLOCK_SIZE_TYPE bsize);
+
+void vp9_build_inter_predictors_sb(MACROBLOCKD *mb,
+ int mb_row, int mb_col,
+ BLOCK_SIZE_TYPE bsize);
+
+void vp9_setup_interp_filters(MACROBLOCKD *xd,
+ INTERPOLATIONFILTERTYPE filter,
+ VP9_COMMON *cm);
+
+void vp9_setup_scale_factors_for_frame(struct scale_factors *scale,
+ int other_w, int other_h,
+ int this_w, int this_h);
+
+void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int_mv *mv_q3,
+ const struct scale_factors *scale,
+ int w, int h, int do_avg,
+ const struct subpix_fn_table *subpix);
+
+void vp9_build_inter_predictor_q4(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int_mv *mv_q4,
+ const struct scale_factors *scale,
+ int w, int h, int do_avg,
+ const struct subpix_fn_table *subpix);
+
+static int scaled_buffer_offset(int x_offset, int y_offset, int stride,
+ const struct scale_factors *scale) {
+ const int x = scale ? scale->scale_value_x(x_offset, scale) : x_offset;
+ const int y = scale ? scale->scale_value_y(y_offset, scale) : y_offset;
+ return y * stride + x;
+}
+
+static void setup_pred_plane(struct buf_2d *dst,
+ uint8_t *src, int stride,
+ int mi_row, int mi_col,
+ const struct scale_factors *scale,
+ int subsampling_x, int subsampling_y) {
+ const int x = (MI_SIZE * mi_col) >> subsampling_x;
+ const int y = (MI_SIZE * mi_row) >> subsampling_y;
+ dst->buf = src + scaled_buffer_offset(x, y, stride, scale);
+ dst->stride = stride;
+}
+
+// TODO(jkoleszar): audit all uses of this that don't set mb_row, mb_col
+static void setup_dst_planes(MACROBLOCKD *xd,
+ const YV12_BUFFER_CONFIG *src,
+ int mi_row, int mi_col) {
+ uint8_t *buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer,
+ src->alpha_buffer};
+ int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride,
+ src->alpha_stride};
+ int i;
+
+ for (i = 0; i < MAX_MB_PLANE; ++i) {
+ struct macroblockd_plane *pd = &xd->plane[i];
+ setup_pred_plane(&pd->dst, buffers[i], strides[i], mi_row, mi_col, NULL,
+ pd->subsampling_x, pd->subsampling_y);
+ }
+}
+
+static void setup_pre_planes(MACROBLOCKD *xd,
+ const YV12_BUFFER_CONFIG *src0,
+ const YV12_BUFFER_CONFIG *src1,
+ int mi_row, int mi_col,
+ const struct scale_factors *scale,
+ const struct scale_factors *scale_uv) {
+ const YV12_BUFFER_CONFIG *srcs[2] = {src0, src1};
+ int i, j;
+
+ for (i = 0; i < 2; ++i) {
+ const YV12_BUFFER_CONFIG *src = srcs[i];
+ if (src) {
+ uint8_t* buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer,
+ src->alpha_buffer};
+ int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride,
+ src->alpha_stride};
+
+ for (j = 0; j < MAX_MB_PLANE; ++j) {
+ struct macroblockd_plane *pd = &xd->plane[j];
+ const struct scale_factors *sf = j ? scale_uv : scale;
+ setup_pred_plane(&pd->pre[i],
+ buffers[j], strides[j],
+ mi_row, mi_col, sf ? &sf[i] : NULL,
+ pd->subsampling_x, pd->subsampling_y);
+ }
+ }
+ }
+}
+
+static void set_scale_factors(MACROBLOCKD *xd,
+ int ref0, int ref1,
+ struct scale_factors scale_factor[MAX_REF_FRAMES]) {
+
+ xd->scale_factor[0] = scale_factor[ref0 >= 0 ? ref0 : 0];
+ xd->scale_factor[1] = scale_factor[ref1 >= 0 ? ref1 : 0];
+ xd->scale_factor_uv[0] = xd->scale_factor[0];
+ xd->scale_factor_uv[1] = xd->scale_factor[1];
+}
+
+void vp9_setup_scale_factors(VP9_COMMON *cm, int i);
+
+#endif // VP9_COMMON_VP9_RECONINTER_H_
diff --git a/libvpx/vp9/common/vp9_reconintra.c b/libvpx/vp9/common/vp9_reconintra.c
new file mode 100644
index 0000000..85dfe51
--- /dev/null
+++ b/libvpx/vp9/common/vp9_reconintra.c
@@ -0,0 +1,357 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+
+#include "./vpx_config.h"
+#include "vp9_rtcd.h"
+#include "vp9/common/vp9_reconintra.h"
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vpx_mem/vpx_mem.h"
+
+static void d27_predictor(uint8_t *ypred_ptr, int y_stride,
+ int bw, int bh,
+ uint8_t *yabove_row, uint8_t *yleft_col) {
+ int r, c;
+ // first column
+ for (r = 0; r < bh - 1; ++r) {
+ ypred_ptr[r * y_stride] = ROUND_POWER_OF_TWO(yleft_col[r] +
+ yleft_col[r + 1], 1);
+ }
+ ypred_ptr[(bh - 1) * y_stride] = yleft_col[bh-1];
+ ypred_ptr++;
+ // second column
+ for (r = 0; r < bh - 2; ++r) {
+ ypred_ptr[r * y_stride] = ROUND_POWER_OF_TWO(yleft_col[r] +
+ yleft_col[r + 1] * 2 +
+ yleft_col[r + 2], 2);
+ }
+ ypred_ptr[(bh - 2) * y_stride] = ROUND_POWER_OF_TWO(yleft_col[bh - 2] +
+ yleft_col[bh - 1] * 3,
+ 2);
+ ypred_ptr[(bh - 1) * y_stride] = yleft_col[bh-1];
+ ypred_ptr++;
+
+ // rest of last row
+ for (c = 0; c < bw - 2; ++c) {
+ ypred_ptr[(bh - 1) * y_stride + c] = yleft_col[bh-1];
+ }
+
+ for (r = bh - 2; r >= 0; --r) {
+ for (c = 0; c < bw - 2; ++c) {
+ ypred_ptr[r * y_stride + c] = ypred_ptr[(r + 1) * y_stride + c - 2];
+ }
+ }
+}
+
+static void d63_predictor(uint8_t *ypred_ptr, int y_stride,
+ int bw, int bh,
+ uint8_t *yabove_row, uint8_t *yleft_col) {
+ int r, c;
+ for (r = 0; r < bh; ++r) {
+ for (c = 0; c < bw; ++c) {
+ if (r & 1) {
+ ypred_ptr[c] = ROUND_POWER_OF_TWO(yabove_row[r/2 + c] +
+ yabove_row[r/2 + c + 1] * 2 +
+ yabove_row[r/2 + c + 2], 2);
+ } else {
+ ypred_ptr[c] =ROUND_POWER_OF_TWO(yabove_row[r/2 + c] +
+ yabove_row[r/2+ c + 1], 1);
+ }
+ }
+ ypred_ptr += y_stride;
+ }
+}
+
+static void d45_predictor(uint8_t *ypred_ptr, int y_stride,
+ int bw, int bh,
+ uint8_t *yabove_row, uint8_t *yleft_col) {
+ int r, c;
+ for (r = 0; r < bh; ++r) {
+ for (c = 0; c < bw; ++c) {
+ if (r + c + 2 < bw * 2)
+ ypred_ptr[c] = ROUND_POWER_OF_TWO(yabove_row[r + c] +
+ yabove_row[r + c + 1] * 2 +
+ yabove_row[r + c + 2], 2);
+ else
+ ypred_ptr[c] = yabove_row[bw * 2 - 1];
+ }
+ ypred_ptr += y_stride;
+ }
+}
+
+static void d117_predictor(uint8_t *ypred_ptr, int y_stride,
+ int bw, int bh,
+ uint8_t *yabove_row, uint8_t *yleft_col) {
+ int r, c;
+ // first row
+ for (c = 0; c < bw; c++)
+ ypred_ptr[c] = ROUND_POWER_OF_TWO(yabove_row[c - 1] + yabove_row[c], 1);
+ ypred_ptr += y_stride;
+
+ // second row
+ ypred_ptr[0] = ROUND_POWER_OF_TWO(yleft_col[0] +
+ yabove_row[-1] * 2 +
+ yabove_row[0], 2);
+ for (c = 1; c < bw; c++)
+ ypred_ptr[c] = ROUND_POWER_OF_TWO(yabove_row[c - 2] +
+ yabove_row[c - 1] * 2 +
+ yabove_row[c], 2);
+ ypred_ptr += y_stride;
+
+ // the rest of first col
+ ypred_ptr[0] = ROUND_POWER_OF_TWO(yabove_row[-1] +
+ yleft_col[0] * 2 +
+ yleft_col[1], 2);
+ for (r = 3; r < bh; ++r)
+ ypred_ptr[(r-2) * y_stride] = ROUND_POWER_OF_TWO(yleft_col[r - 3] +
+ yleft_col[r - 2] * 2 +
+ yleft_col[r - 1], 2);
+ // the rest of the block
+ for (r = 2; r < bh; ++r) {
+ for (c = 1; c < bw; c++)
+ ypred_ptr[c] = ypred_ptr[-2 * y_stride + c - 1];
+ ypred_ptr += y_stride;
+ }
+}
+
+
+static void d135_predictor(uint8_t *ypred_ptr, int y_stride,
+ int bw, int bh,
+ uint8_t *yabove_row, uint8_t *yleft_col) {
+ int r, c;
+ ypred_ptr[0] = ROUND_POWER_OF_TWO(yleft_col[0] +
+ yabove_row[-1] * 2 +
+ yabove_row[0], 2);
+ for (c = 1; c < bw; c++)
+ ypred_ptr[c] = ROUND_POWER_OF_TWO(yabove_row[c - 2] +
+ yabove_row[c - 1] * 2 +
+ yabove_row[c], 2);
+
+ ypred_ptr[y_stride] = ROUND_POWER_OF_TWO(yabove_row[-1] +
+ yleft_col[0] * 2 +
+ yleft_col[1], 2);
+ for (r = 2; r < bh; ++r)
+ ypred_ptr[r * y_stride] = ROUND_POWER_OF_TWO(yleft_col[r - 2] +
+ yleft_col[r - 1] * 2 +
+ yleft_col[r], 2);
+
+ ypred_ptr += y_stride;
+ for (r = 1; r < bh; ++r) {
+ for (c = 1; c < bw; c++)
+ ypred_ptr[c] = ypred_ptr[-y_stride + c - 1];
+ ypred_ptr += y_stride;
+ }
+}
+
+static void d153_predictor(uint8_t *ypred_ptr,
+ int y_stride,
+ int bw, int bh,
+ uint8_t *yabove_row,
+ uint8_t *yleft_col) {
+ int r, c;
+ ypred_ptr[0] = ROUND_POWER_OF_TWO(yabove_row[-1] + yleft_col[0], 1);
+ for (r = 1; r < bh; r++)
+ ypred_ptr[r * y_stride] =
+ ROUND_POWER_OF_TWO(yleft_col[r - 1] + yleft_col[r], 1);
+ ypred_ptr++;
+
+ ypred_ptr[0] = ROUND_POWER_OF_TWO(yleft_col[0] +
+ yabove_row[-1] * 2 +
+ yabove_row[0], 2);
+ ypred_ptr[y_stride] = ROUND_POWER_OF_TWO(yabove_row[-1] +
+ yleft_col[0] * 2 +
+ yleft_col[1], 2);
+ for (r = 2; r < bh; r++)
+ ypred_ptr[r * y_stride] = ROUND_POWER_OF_TWO(yleft_col[r - 2] +
+ yleft_col[r - 1] * 2 +
+ yleft_col[r], 2);
+ ypred_ptr++;
+
+ for (c = 0; c < bw - 2; c++)
+ ypred_ptr[c] = ROUND_POWER_OF_TWO(yabove_row[c - 1] +
+ yabove_row[c] * 2 +
+ yabove_row[c + 1], 2);
+ ypred_ptr += y_stride;
+ for (r = 1; r < bh; ++r) {
+ for (c = 0; c < bw - 2; c++)
+ ypred_ptr[c] = ypred_ptr[-y_stride + c - 2];
+ ypred_ptr += y_stride;
+ }
+}
+
+void vp9_build_intra_predictors(uint8_t *src, int src_stride,
+ uint8_t *ypred_ptr,
+ int y_stride, int mode,
+ int bw, int bh,
+ int up_available, int left_available,
+ int right_available) {
+ int r, c, i;
+ uint8_t yleft_col[64], yabove_data[129], ytop_left;
+ uint8_t *yabove_row = yabove_data + 1;
+
+ // 127 127 127 .. 127 127 127 127 127 127
+ // 129 A B .. Y Z
+ // 129 C D .. W X
+ // 129 E F .. U V
+ // 129 G H .. S T T T T T
+ // ..
+
+ assert(bw == bh);
+
+ if (left_available) {
+ for (i = 0; i < bh; i++)
+ yleft_col[i] = src[i * src_stride - 1];
+ } else {
+ vpx_memset(yleft_col, 129, bh);
+ }
+
+ if (up_available) {
+ uint8_t *yabove_ptr = src - src_stride;
+ vpx_memcpy(yabove_row, yabove_ptr, bw);
+ if (bw == 4 && right_available)
+ vpx_memcpy(yabove_row + bw, yabove_ptr + bw, bw);
+ else
+ vpx_memset(yabove_row + bw, yabove_row[bw -1], bw);
+ ytop_left = left_available ? yabove_ptr[-1] : 129;
+ } else {
+ vpx_memset(yabove_row, 127, bw * 2);
+ ytop_left = 127;
+ }
+ yabove_row[-1] = ytop_left;
+
+ switch (mode) {
+ case DC_PRED: {
+ int i;
+ int expected_dc = 128;
+ int average = 0;
+ int count = 0;
+
+ if (up_available || left_available) {
+ if (up_available) {
+ for (i = 0; i < bw; i++)
+ average += yabove_row[i];
+ count += bw;
+ }
+ if (left_available) {
+ for (i = 0; i < bh; i++)
+ average += yleft_col[i];
+ count += bh;
+ }
+ expected_dc = (average + (count >> 1)) / count;
+ }
+ for (r = 0; r < bh; r++) {
+ vpx_memset(ypred_ptr, expected_dc, bw);
+ ypred_ptr += y_stride;
+ }
+ }
+ break;
+ case V_PRED:
+ for (r = 0; r < bh; r++) {
+ vpx_memcpy(ypred_ptr, yabove_row, bw);
+ ypred_ptr += y_stride;
+ }
+ break;
+ case H_PRED:
+ for (r = 0; r < bh; r++) {
+ vpx_memset(ypred_ptr, yleft_col[r], bw);
+ ypred_ptr += y_stride;
+ }
+ break;
+ case TM_PRED:
+ for (r = 0; r < bh; r++) {
+ for (c = 0; c < bw; c++)
+ ypred_ptr[c] = clip_pixel(yleft_col[r] + yabove_row[c] - ytop_left);
+ ypred_ptr += y_stride;
+ }
+ break;
+ case D45_PRED:
+ d45_predictor(ypred_ptr, y_stride, bw, bh, yabove_row, yleft_col);
+ break;
+ case D135_PRED:
+ d135_predictor(ypred_ptr, y_stride, bw, bh, yabove_row, yleft_col);
+ break;
+ case D117_PRED:
+ d117_predictor(ypred_ptr, y_stride, bw, bh, yabove_row, yleft_col);
+ break;
+ case D153_PRED:
+ d153_predictor(ypred_ptr, y_stride, bw, bh, yabove_row, yleft_col);
+ break;
+ case D27_PRED:
+ d27_predictor(ypred_ptr, y_stride, bw, bh, yabove_row, yleft_col);
+ break;
+ case D63_PRED:
+ d63_predictor(ypred_ptr, y_stride, bw, bh, yabove_row, yleft_col);
+ break;
+ default:
+ break;
+ }
+}
+
+void vp9_build_intra_predictors_sby_s(MACROBLOCKD *xd,
+ BLOCK_SIZE_TYPE bsize) {
+ const struct macroblockd_plane* const pd = &xd->plane[0];
+ const int bw = plane_block_width(bsize, pd);
+ const int bh = plane_block_height(bsize, pd);
+ vp9_build_intra_predictors(pd->dst.buf, pd->dst.stride,
+ pd->dst.buf, pd->dst.stride,
+ xd->mode_info_context->mbmi.mode,
+ bw, bh, xd->up_available, xd->left_available,
+ 0 /*xd->right_available*/);
+}
+
+void vp9_build_intra_predictors_sbuv_s(MACROBLOCKD *xd,
+ BLOCK_SIZE_TYPE bsize) {
+ const int bwl = b_width_log2(bsize), bw = 2 << bwl;
+ const int bhl = b_height_log2(bsize), bh = 2 << bhl;
+
+ vp9_build_intra_predictors(xd->plane[1].dst.buf, xd->plane[1].dst.stride,
+ xd->plane[1].dst.buf, xd->plane[1].dst.stride,
+ xd->mode_info_context->mbmi.uv_mode,
+ bw, bh, xd->up_available,
+ xd->left_available, 0 /*xd->right_available*/);
+ vp9_build_intra_predictors(xd->plane[2].dst.buf, xd->plane[1].dst.stride,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
+ xd->mode_info_context->mbmi.uv_mode,
+ bw, bh, xd->up_available,
+ xd->left_available, 0 /*xd->right_available*/);
+}
+
+void vp9_predict_intra_block(MACROBLOCKD *xd,
+ int block_idx,
+ int bwl_in,
+ TX_SIZE tx_size,
+ int mode,
+ uint8_t *predictor, int pre_stride) {
+ const int bwl = bwl_in - tx_size;
+ const int wmask = (1 << bwl) - 1;
+ const int have_top = (block_idx >> bwl) || xd->up_available;
+ const int have_left = (block_idx & wmask) || xd->left_available;
+ const int have_right = ((block_idx & wmask) != wmask);
+ const int txfm_block_size = 4 << tx_size;
+
+ assert(bwl >= 0);
+ vp9_build_intra_predictors(predictor, pre_stride,
+ predictor, pre_stride,
+ mode,
+ txfm_block_size,
+ txfm_block_size,
+ have_top, have_left,
+ have_right);
+}
+
+void vp9_intra4x4_predict(MACROBLOCKD *xd,
+ int block_idx,
+ BLOCK_SIZE_TYPE bsize,
+ int mode,
+ uint8_t *predictor, int pre_stride) {
+ vp9_predict_intra_block(xd, block_idx, b_width_log2(bsize), TX_4X4,
+ mode, predictor, pre_stride);
+}
diff --git a/libvpx/vp9/common/vp9_reconintra.h b/libvpx/vp9/common/vp9_reconintra.h
new file mode 100644
index 0000000..f5f5f42
--- /dev/null
+++ b/libvpx/vp9/common/vp9_reconintra.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_RECONINTRA_H_
+#define VP9_COMMON_VP9_RECONINTRA_H_
+
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_blockd.h"
+
+MB_PREDICTION_MODE vp9_find_dominant_direction(uint8_t *ptr,
+ int stride, int n,
+ int tx, int ty);
+
+MB_PREDICTION_MODE vp9_find_bpred_context(MACROBLOCKD *xd, int block,
+ uint8_t *ptr, int stride);
+
+void vp9_predict_intra_block(MACROBLOCKD *xd,
+ int block_idx,
+ int bwl_in,
+ TX_SIZE tx_size,
+ int mode,
+ uint8_t *predictor, int pre_stride);
+#endif // VP9_COMMON_VP9_RECONINTRA_H_
diff --git a/libvpx/vp9/common/vp9_rtcd.c b/libvpx/vp9/common/vp9_rtcd.c
new file mode 100644
index 0000000..72613ae
--- /dev/null
+++ b/libvpx/vp9/common/vp9_rtcd.c
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "vpx_config.h"
+#define RTCD_C
+#include "vp9_rtcd.h"
+#include "vpx_ports/vpx_once.h"
+
+void vpx_scale_rtcd(void);
+
+void vp9_rtcd() {
+ vpx_scale_rtcd();
+ once(setup_rtcd_internal);
+}
diff --git a/libvpx/vp9/common/vp9_rtcd_defs.sh b/libvpx/vp9/common/vp9_rtcd_defs.sh
new file mode 100644
index 0000000..a405aab
--- /dev/null
+++ b/libvpx/vp9/common/vp9_rtcd_defs.sh
@@ -0,0 +1,611 @@
+vp9_common_forward_decls() {
+cat <<EOF
+/*
+ * VP9
+ */
+
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_enums.h"
+
+struct loop_filter_info;
+struct macroblockd;
+struct loop_filter_info;
+
+/* Encoder forward decls */
+struct macroblock;
+struct vp9_variance_vtable;
+
+#define DEC_MVCOSTS int *mvjcost, int *mvcost[2]
+union int_mv;
+struct yv12_buffer_config;
+EOF
+}
+forward_decls vp9_common_forward_decls
+
+#
+# Dequant
+#
+
+prototype void vp9_idct_add_16x16 "int16_t *input, uint8_t *dest, int stride, int eob"
+specialize vp9_idct_add_16x16
+
+prototype void vp9_idct_add_8x8 "int16_t *input, uint8_t *dest, int stride, int eob"
+specialize vp9_idct_add_8x8
+
+prototype void vp9_idct_add "int16_t *input, uint8_t *dest, int stride, int eob"
+specialize vp9_idct_add
+
+
+
+prototype void vp9_idct_add_32x32 "int16_t *q, uint8_t *dst, int stride, int eob"
+specialize vp9_idct_add_32x32
+
+#
+# RECON
+#
+prototype void vp9_copy_mem16x16 "const uint8_t *src, int src_pitch, uint8_t *dst, int dst_pitch"
+specialize vp9_copy_mem16x16 mmx sse2 dspr2
+vp9_copy_mem16x16_dspr2=vp9_copy_mem16x16_dspr2
+
+prototype void vp9_copy_mem8x8 "const uint8_t *src, int src_pitch, uint8_t *dst, int dst_pitch"
+specialize vp9_copy_mem8x8 mmx dspr2
+vp9_copy_mem8x8_dspr2=vp9_copy_mem8x8_dspr2
+
+prototype void vp9_copy_mem8x4 "const uint8_t *src, int src_pitch, uint8_t *dst, int dst_pitch"
+specialize vp9_copy_mem8x4 mmx
+
+prototype void vp9_build_intra_predictors "uint8_t *src, int src_stride, uint8_t *pred, int y_stride, int mode, int bw, int bh, int up_available, int left_available, int right_available"
+specialize void vp9_build_intra_predictors
+
+prototype void vp9_build_intra_predictors_sby_s "struct macroblockd *x, enum BLOCK_SIZE_TYPE bsize"
+specialize vp9_build_intra_predictors_sby_s
+
+prototype void vp9_build_intra_predictors_sbuv_s "struct macroblockd *x, enum BLOCK_SIZE_TYPE bsize"
+specialize vp9_build_intra_predictors_sbuv_s
+
+prototype void vp9_intra4x4_predict "struct macroblockd *xd, int block, enum BLOCK_SIZE_TYPE bsize, int b_mode, uint8_t *predictor, int pre_stride"
+specialize vp9_intra4x4_predict;
+
+if [ "$CONFIG_VP9_DECODER" = "yes" ]; then
+prototype void vp9_add_constant_residual_8x8 "const int16_t diff, uint8_t *dest, int stride"
+specialize vp9_add_constant_residual_8x8 sse2
+
+prototype void vp9_add_constant_residual_16x16 "const int16_t diff, uint8_t *dest, int stride"
+specialize vp9_add_constant_residual_16x16 sse2
+
+prototype void vp9_add_constant_residual_32x32 "const int16_t diff, uint8_t *dest, int stride"
+specialize vp9_add_constant_residual_32x32 sse2
+fi
+
+#
+# Loopfilter
+#
+prototype void vp9_mb_lpf_vertical_edge_w "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh"
+specialize vp9_mb_lpf_vertical_edge_w sse2
+
+prototype void vp9_mbloop_filter_vertical_edge "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count"
+specialize vp9_mbloop_filter_vertical_edge sse2
+
+prototype void vp9_loop_filter_vertical_edge "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count"
+specialize vp9_loop_filter_vertical_edge mmx
+
+prototype void vp9_mb_lpf_horizontal_edge_w "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh"
+specialize vp9_mb_lpf_horizontal_edge_w sse2
+
+prototype void vp9_mbloop_filter_horizontal_edge "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count"
+specialize vp9_mbloop_filter_horizontal_edge sse2
+
+prototype void vp9_loop_filter_horizontal_edge "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count"
+specialize vp9_loop_filter_horizontal_edge mmx
+
+#
+# post proc
+#
+if [ "$CONFIG_POSTPROC" = "yes" ]; then
+prototype void vp9_mbpost_proc_down "uint8_t *dst, int pitch, int rows, int cols, int flimit"
+specialize vp9_mbpost_proc_down mmx sse2
+vp9_mbpost_proc_down_sse2=vp9_mbpost_proc_down_xmm
+
+prototype void vp9_mbpost_proc_across_ip "uint8_t *src, int pitch, int rows, int cols, int flimit"
+specialize vp9_mbpost_proc_across_ip sse2
+vp9_mbpost_proc_across_ip_sse2=vp9_mbpost_proc_across_ip_xmm
+
+prototype void vp9_post_proc_down_and_across "const uint8_t *src_ptr, uint8_t *dst_ptr, int src_pixels_per_line, int dst_pixels_per_line, int rows, int cols, int flimit"
+specialize vp9_post_proc_down_and_across mmx sse2
+vp9_post_proc_down_and_across_sse2=vp9_post_proc_down_and_across_xmm
+
+prototype void vp9_plane_add_noise "uint8_t *Start, char *noise, char blackclamp[16], char whiteclamp[16], char bothclamp[16], unsigned int Width, unsigned int Height, int Pitch"
+specialize vp9_plane_add_noise mmx sse2
+vp9_plane_add_noise_sse2=vp9_plane_add_noise_wmt
+fi
+
+prototype void vp9_blend_mb_inner "uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride"
+specialize vp9_blend_mb_inner
+
+prototype void vp9_blend_mb_outer "uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride"
+specialize vp9_blend_mb_outer
+
+prototype void vp9_blend_b "uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride"
+specialize vp9_blend_b
+
+#
+# Sub Pixel Filters
+#
+prototype void vp9_convolve8 "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
+specialize vp9_convolve8 ssse3
+
+prototype void vp9_convolve8_horiz "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
+specialize vp9_convolve8_horiz ssse3
+
+prototype void vp9_convolve8_vert "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
+specialize vp9_convolve8_vert ssse3
+
+prototype void vp9_convolve8_avg "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
+specialize vp9_convolve8_avg ssse3
+
+prototype void vp9_convolve8_avg_horiz "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
+specialize vp9_convolve8_avg_horiz ssse3
+
+prototype void vp9_convolve8_avg_vert "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
+specialize vp9_convolve8_avg_vert ssse3
+
+#
+# dct
+#
+prototype void vp9_short_idct4x4_1_add "int16_t *input, uint8_t *dest, int dest_stride"
+specialize vp9_short_idct4x4_1_add
+
+prototype void vp9_short_idct4x4_add "int16_t *input, uint8_t *dest, int dest_stride"
+specialize vp9_short_idct4x4_add sse2
+
+prototype void vp9_short_idct8x8_add "int16_t *input, uint8_t *dest, int dest_stride"
+specialize vp9_short_idct8x8_add sse2
+
+prototype void vp9_short_idct10_8x8_add "int16_t *input, uint8_t *dest, int dest_stride"
+specialize vp9_short_idct10_8x8_add sse2
+
+prototype void vp9_short_idct1_8x8 "int16_t *input, int16_t *output"
+specialize vp9_short_idct1_8x8
+
+prototype void vp9_short_idct16x16_add "int16_t *input, uint8_t *dest, int dest_stride"
+specialize vp9_short_idct16x16_add sse2
+
+prototype void vp9_short_idct10_16x16_add "int16_t *input, uint8_t *dest, int dest_stride"
+specialize vp9_short_idct10_16x16_add sse2
+
+prototype void vp9_short_idct1_16x16 "int16_t *input, int16_t *output"
+specialize vp9_short_idct1_16x16
+
+prototype void vp9_short_idct32x32_add "int16_t *input, uint8_t *dest, int dest_stride"
+specialize vp9_short_idct32x32_add sse2
+
+prototype void vp9_short_idct1_32x32 "int16_t *input, int16_t *output"
+specialize vp9_short_idct1_32x32
+
+prototype void vp9_short_idct10_32x32_add "int16_t *input, uint8_t *dest, int dest_stride"
+specialize vp9_short_idct10_32x32_add
+
+prototype void vp9_short_iht4x4_add "int16_t *input, uint8_t *dest, int dest_stride, int tx_type"
+specialize vp9_short_iht4x4_add
+
+prototype void vp9_short_iht8x8_add "int16_t *input, uint8_t *dest, int dest_stride, int tx_type"
+specialize vp9_short_iht8x8_add
+
+prototype void vp9_short_iht16x16_add "int16_t *input, uint8_t *output, int pitch, int tx_type"
+specialize vp9_short_iht16x16_add
+
+prototype void vp9_idct4_1d "int16_t *input, int16_t *output"
+specialize vp9_idct4_1d sse2
+# dct and add
+
+prototype void vp9_dc_only_idct_add "int input_dc, uint8_t *pred_ptr, uint8_t *dst_ptr, int pitch, int stride"
+specialize vp9_dc_only_idct_add sse2
+
+prototype void vp9_short_iwalsh4x4_1_add "int16_t *input, uint8_t *dest, int dest_stride"
+specialize vp9_short_iwalsh4x4_1_add
+
+prototype void vp9_short_iwalsh4x4_add "int16_t *input, uint8_t *dest, int dest_stride"
+specialize vp9_short_iwalsh4x4_add
+
+prototype unsigned int vp9_sad32x3 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, int max_sad"
+specialize vp9_sad32x3
+
+prototype unsigned int vp9_sad3x32 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, int max_sad"
+specialize vp9_sad3x32
+
+#
+# Encoder functions below this point.
+#
+if [ "$CONFIG_VP9_ENCODER" = "yes" ]; then
+
+
+# variance
+[ $arch = "x86_64" ] && mmx_x86_64=mmx && sse2_x86_64=sse2
+
+prototype unsigned int vp9_variance32x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance32x16 sse2
+
+prototype unsigned int vp9_variance16x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance16x32 sse2
+
+prototype unsigned int vp9_variance64x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance64x32 sse2
+
+prototype unsigned int vp9_variance32x64 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance32x64 sse2
+
+prototype unsigned int vp9_variance32x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance32x32 sse2
+
+prototype unsigned int vp9_variance64x64 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance64x64 sse2
+
+prototype unsigned int vp9_variance16x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance16x16 mmx sse2
+
+prototype unsigned int vp9_variance16x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance16x8 mmx sse2
+
+prototype unsigned int vp9_variance8x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance8x16 mmx sse2
+
+prototype unsigned int vp9_variance8x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance8x8 mmx sse2
+
+prototype void vp9_get_sse_sum_8x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum"
+specialize vp9_get_sse_sum_8x8 sse2
+vp9_get_sse_sum_8x8_sse2=vp9_get8x8var_sse2
+
+prototype unsigned int vp9_variance8x4 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance8x4 sse2
+
+prototype unsigned int vp9_variance4x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance4x8 sse2
+
+prototype unsigned int vp9_variance4x4 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance4x4 mmx sse2
+
+prototype unsigned int vp9_sub_pixel_variance64x64 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance64x64 sse2
+
+prototype unsigned int vp9_sub_pixel_avg_variance64x64 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance64x64
+
+prototype unsigned int vp9_sub_pixel_variance32x64 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance32x64
+
+prototype unsigned int vp9_sub_pixel_avg_variance32x64 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance32x64
+
+prototype unsigned int vp9_sub_pixel_variance64x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance64x32
+
+prototype unsigned int vp9_sub_pixel_avg_variance64x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance64x32
+
+prototype unsigned int vp9_sub_pixel_variance32x16 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance32x16
+
+prototype unsigned int vp9_sub_pixel_avg_variance32x16 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance32x16
+
+prototype unsigned int vp9_sub_pixel_variance16x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance16x32
+
+prototype unsigned int vp9_sub_pixel_avg_variance16x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance16x32
+
+prototype unsigned int vp9_sub_pixel_variance32x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance32x32 sse2
+
+prototype unsigned int vp9_sub_pixel_avg_variance32x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance32x32
+
+prototype unsigned int vp9_sub_pixel_variance16x16 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance16x16 sse2 mmx ssse3
+
+prototype unsigned int vp9_sub_pixel_avg_variance16x16 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance16x16
+
+prototype unsigned int vp9_sub_pixel_variance8x16 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance8x16 sse2 mmx
+vp9_sub_pixel_variance8x16_sse2=vp9_sub_pixel_variance8x16_wmt
+
+prototype unsigned int vp9_sub_pixel_avg_variance8x16 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance8x16
+
+prototype unsigned int vp9_sub_pixel_variance16x8 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance16x8 sse2 mmx ssse3
+vp9_sub_pixel_variance16x8_sse2=vp9_sub_pixel_variance16x8_ssse3;
+vp9_sub_pixel_variance16x8_sse2=vp9_sub_pixel_variance16x8_wmt
+
+prototype unsigned int vp9_sub_pixel_avg_variance16x8 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance16x8
+
+prototype unsigned int vp9_sub_pixel_variance8x8 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance8x8 sse2 mmx
+vp9_sub_pixel_variance8x8_sse2=vp9_sub_pixel_variance8x8_wmt
+
+prototype unsigned int vp9_sub_pixel_avg_variance8x8 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance8x8
+
+# TODO(jingning): need to convert 8x4/4x8 functions into mmx/sse form
+prototype unsigned int vp9_sub_pixel_variance8x4 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance8x4
+
+prototype unsigned int vp9_sub_pixel_avg_variance8x4 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance8x4
+
+prototype unsigned int vp9_sub_pixel_variance4x8 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance4x8
+
+prototype unsigned int vp9_sub_pixel_avg_variance4x8 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance4x8
+
+prototype unsigned int vp9_sub_pixel_variance4x4 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_variance4x4 sse2 mmx
+vp9_sub_pixel_variance4x4_sse2=vp9_sub_pixel_variance4x4_wmt
+
+prototype unsigned int vp9_sub_pixel_avg_variance4x4 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred"
+specialize vp9_sub_pixel_avg_variance4x4
+
+prototype unsigned int vp9_sad64x64 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad64x64 sse2
+
+prototype unsigned int vp9_sad32x64 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad32x64 sse2
+
+prototype unsigned int vp9_sad64x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad64x32 sse2
+
+prototype unsigned int vp9_sad32x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad32x16 sse2
+
+prototype unsigned int vp9_sad16x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad16x32 sse2
+
+prototype unsigned int vp9_sad32x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad32x32 sse2
+
+prototype unsigned int vp9_sad16x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad16x16 mmx sse2
+
+prototype unsigned int vp9_sad16x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad16x8 mmx sse2
+
+prototype unsigned int vp9_sad8x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad8x16 mmx sse2
+
+prototype unsigned int vp9_sad8x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad8x8 mmx sse2
+
+# TODO(jingning): need to covert these functions into mmx/sse2 form
+prototype unsigned int vp9_sad8x4 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad8x4 sse2
+
+prototype unsigned int vp9_sad4x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad4x8 sse
+
+prototype unsigned int vp9_sad4x4 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
+specialize vp9_sad4x4 mmx sse
+
+prototype unsigned int vp9_variance_halfpixvar16x16_h "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance_halfpixvar16x16_h mmx sse2
+vp9_variance_halfpixvar16x16_h_sse2=vp9_variance_halfpixvar16x16_h_wmt
+
+prototype unsigned int vp9_variance_halfpixvar16x16_v "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance_halfpixvar16x16_v mmx sse2
+vp9_variance_halfpixvar16x16_v_sse2=vp9_variance_halfpixvar16x16_v_wmt
+
+prototype unsigned int vp9_variance_halfpixvar16x16_hv "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance_halfpixvar16x16_hv mmx sse2
+vp9_variance_halfpixvar16x16_hv_sse2=vp9_variance_halfpixvar16x16_hv_wmt
+
+prototype unsigned int vp9_variance_halfpixvar64x64_h "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance_halfpixvar64x64_h
+
+prototype unsigned int vp9_variance_halfpixvar64x64_v "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance_halfpixvar64x64_v
+
+prototype unsigned int vp9_variance_halfpixvar64x64_hv "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance_halfpixvar64x64_hv
+
+prototype unsigned int vp9_variance_halfpixvar32x32_h "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance_halfpixvar32x32_h
+
+prototype unsigned int vp9_variance_halfpixvar32x32_v "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance_halfpixvar32x32_v
+
+prototype unsigned int vp9_variance_halfpixvar32x32_hv "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_variance_halfpixvar32x32_hv
+
+prototype void vp9_sad64x64x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array"
+specialize vp9_sad64x64x3
+
+prototype void vp9_sad32x32x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array"
+specialize vp9_sad32x32x3
+
+prototype void vp9_sad16x16x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array"
+specialize vp9_sad16x16x3 sse3 ssse3
+
+prototype void vp9_sad16x8x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array"
+specialize vp9_sad16x8x3 sse3 ssse3
+
+prototype void vp9_sad8x16x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array"
+specialize vp9_sad8x16x3 sse3
+
+prototype void vp9_sad8x8x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array"
+specialize vp9_sad8x8x3 sse3
+
+prototype void vp9_sad4x4x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array"
+specialize vp9_sad4x4x3 sse3
+
+prototype void vp9_sad64x64x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"
+specialize vp9_sad64x64x8
+
+prototype void vp9_sad32x32x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"
+specialize vp9_sad32x32x8
+
+prototype void vp9_sad16x16x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"
+specialize vp9_sad16x16x8 sse4
+
+prototype void vp9_sad16x8x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"
+specialize vp9_sad16x8x8 sse4
+
+prototype void vp9_sad8x16x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"
+specialize vp9_sad8x16x8 sse4
+
+prototype void vp9_sad8x8x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"
+specialize vp9_sad8x8x8 sse4
+
+prototype void vp9_sad8x4x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"
+specialize vp9_sad8x4x8
+
+prototype void vp9_sad4x8x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"
+specialize vp9_sad4x8x8
+
+prototype void vp9_sad4x4x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array"
+specialize vp9_sad4x4x8 sse4
+
+prototype void vp9_sad64x64x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad64x64x4d sse2
+
+prototype void vp9_sad32x64x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad32x64x4d sse2
+
+prototype void vp9_sad64x32x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad64x32x4d sse2
+
+prototype void vp9_sad32x16x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad32x16x4d sse2
+
+prototype void vp9_sad16x32x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad16x32x4d sse2
+
+prototype void vp9_sad32x32x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad32x32x4d sse2
+
+prototype void vp9_sad16x16x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad16x16x4d sse2
+
+prototype void vp9_sad16x8x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad16x8x4d sse2
+
+prototype void vp9_sad8x16x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad8x16x4d sse2
+
+prototype void vp9_sad8x8x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad8x8x4d sse2
+
+# TODO(jingning): need to convert these 4x8/8x4 functions into sse2 form
+prototype void vp9_sad8x4x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad8x4x4d sse2
+
+prototype void vp9_sad4x8x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad4x8x4d sse
+
+prototype void vp9_sad4x4x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
+specialize vp9_sad4x4x4d sse
+
+prototype unsigned int vp9_sub_pixel_mse16x16 "const uint8_t *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, const uint8_t *dst_ptr, int dst_pixels_per_line, unsigned int *sse"
+specialize vp9_sub_pixel_mse16x16 sse2 mmx
+
+prototype unsigned int vp9_mse16x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse"
+specialize vp9_mse16x16 mmx sse2
+vp9_mse16x16_sse2=vp9_mse16x16_wmt
+
+prototype unsigned int vp9_mse8x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse"
+specialize vp9_mse8x16
+
+prototype unsigned int vp9_mse16x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse"
+specialize vp9_mse16x8
+
+prototype unsigned int vp9_mse8x8 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse"
+specialize vp9_mse8x8
+
+prototype unsigned int vp9_sub_pixel_mse64x64 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_mse64x64
+
+prototype unsigned int vp9_sub_pixel_mse32x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
+specialize vp9_sub_pixel_mse32x32
+
+prototype unsigned int vp9_get_mb_ss "const int16_t *"
+specialize vp9_get_mb_ss mmx sse2
+# ENCODEMB INVOKE
+
+prototype int vp9_block_error "int16_t *coeff, int16_t *dqcoeff, int block_size"
+specialize vp9_block_error mmx sse2
+vp9_block_error_sse2=vp9_block_error_xmm
+
+#
+# Structured Similarity (SSIM)
+#
+if [ "$CONFIG_INTERNAL_STATS" = "yes" ]; then
+ [ $arch = "x86_64" ] && sse2_on_x86_64=sse2
+
+ prototype void vp9_ssim_parms_8x8 "uint8_t *s, int sp, uint8_t *r, int rp, unsigned long *sum_s, unsigned long *sum_r, unsigned long *sum_sq_s, unsigned long *sum_sq_r, unsigned long *sum_sxr"
+ specialize vp9_ssim_parms_8x8 $sse2_on_x86_64
+
+ prototype void vp9_ssim_parms_16x16 "uint8_t *s, int sp, uint8_t *r, int rp, unsigned long *sum_s, unsigned long *sum_r, unsigned long *sum_sq_s, unsigned long *sum_sq_r, unsigned long *sum_sxr"
+ specialize vp9_ssim_parms_16x16 $sse2_on_x86_64
+fi
+
+# fdct functions
+prototype void vp9_short_fht4x4 "int16_t *InputData, int16_t *OutputData, int pitch, int tx_type"
+specialize vp9_short_fht4x4
+
+prototype void vp9_short_fht8x8 "int16_t *InputData, int16_t *OutputData, int pitch, int tx_type"
+specialize vp9_short_fht8x8
+
+prototype void vp9_short_fht16x16 "int16_t *InputData, int16_t *OutputData, int pitch, int tx_type"
+specialize vp9_short_fht16x16
+
+prototype void vp9_short_fdct8x8 "int16_t *InputData, int16_t *OutputData, int pitch"
+specialize vp9_short_fdct8x8 sse2
+
+prototype void vp9_short_fdct4x4 "int16_t *InputData, int16_t *OutputData, int pitch"
+specialize vp9_short_fdct4x4 sse2
+
+prototype void vp9_short_fdct8x4 "int16_t *InputData, int16_t *OutputData, int pitch"
+specialize vp9_short_fdct8x4 sse2
+
+prototype void vp9_short_fdct32x32 "int16_t *InputData, int16_t *OutputData, int pitch"
+specialize vp9_short_fdct32x32
+
+prototype void vp9_short_fdct32x32_rd "int16_t *InputData, int16_t *OutputData, int pitch"
+specialize vp9_short_fdct32x32_rd
+
+prototype void vp9_short_fdct16x16 "int16_t *InputData, int16_t *OutputData, int pitch"
+specialize vp9_short_fdct16x16 sse2
+
+prototype void vp9_short_walsh4x4 "int16_t *InputData, int16_t *OutputData, int pitch"
+specialize vp9_short_walsh4x4
+
+prototype void vp9_short_walsh8x4 "int16_t *InputData, int16_t *OutputData, int pitch"
+specialize vp9_short_walsh8x4
+
+#
+# Motion search
+#
+prototype int vp9_full_search_sad "struct macroblock *x, union int_mv *ref_mv, int sad_per_bit, int distance, struct vp9_variance_vtable *fn_ptr, DEC_MVCOSTS, union int_mv *center_mv, int n"
+specialize vp9_full_search_sad sse3 sse4_1
+vp9_full_search_sad_sse3=vp9_full_search_sadx3
+vp9_full_search_sad_sse4_1=vp9_full_search_sadx8
+
+prototype int vp9_refining_search_sad "struct macroblock *x, union int_mv *ref_mv, int sad_per_bit, int distance, struct vp9_variance_vtable *fn_ptr, DEC_MVCOSTS, union int_mv *center_mv"
+specialize vp9_refining_search_sad sse3
+vp9_refining_search_sad_sse3=vp9_refining_search_sadx4
+
+prototype int vp9_diamond_search_sad "struct macroblock *x, union int_mv *ref_mv, union int_mv *best_mv, int search_param, int sad_per_bit, int *num00, struct vp9_variance_vtable *fn_ptr, DEC_MVCOSTS, union int_mv *center_mv"
+specialize vp9_diamond_search_sad sse3
+vp9_diamond_search_sad_sse3=vp9_diamond_search_sadx4
+
+prototype void vp9_temporal_filter_apply "uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int block_size, int strength, int filter_weight, unsigned int *accumulator, uint16_t *count"
+specialize vp9_temporal_filter_apply sse2
+
+prototype void vp9_yv12_copy_partial_frame "struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc, int fraction"
+specialize vp9_yv12_copy_partial_frame
+
+
+fi
+# end encoder functions
diff --git a/libvpx/vp9/common/vp9_sadmxn.h b/libvpx/vp9/common/vp9_sadmxn.h
new file mode 100644
index 0000000..b2dfd63
--- /dev/null
+++ b/libvpx/vp9/common/vp9_sadmxn.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_SADMXN_H_
+#define VP9_COMMON_VP9_SADMXN_H_
+
+#include "./vpx_config.h"
+#include "vpx/vpx_integer.h"
+
+static INLINE unsigned int sad_mx_n_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ int m,
+ int n) {
+ int r, c;
+ unsigned int sad = 0;
+
+ for (r = 0; r < n; r++) {
+ for (c = 0; c < m; c++) {
+ sad += abs(src_ptr[c] - ref_ptr[c]);
+ }
+
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
+ }
+
+ return sad;
+}
+
+#endif // VP9_COMMON_VP9_SADMXN_H_
diff --git a/libvpx/vp9/common/vp9_seg_common.c b/libvpx/vp9/common/vp9_seg_common.c
new file mode 100644
index 0000000..df7747c
--- /dev/null
+++ b/libvpx/vp9/common/vp9_seg_common.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_seg_common.h"
+
+static const int seg_feature_data_signed[SEG_LVL_MAX] = { 1, 1, 0, 0 };
+static const int seg_feature_data_max[SEG_LVL_MAX] = { MAXQ, 63, 3, 0 };
+
+// These functions provide access to new segment level features.
+// Eventually these function may be "optimized out" but for the moment,
+// the coding mechanism is still subject to change so these provide a
+// convenient single point of change.
+
+int vp9_segfeature_active(const MACROBLOCKD *xd, int segment_id,
+ SEG_LVL_FEATURES feature_id) {
+ return xd->segmentation_enabled &&
+ (xd->segment_feature_mask[segment_id] & (1 << feature_id));
+}
+
+void vp9_clearall_segfeatures(MACROBLOCKD *xd) {
+ vpx_memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data));
+ vpx_memset(xd->segment_feature_mask, 0, sizeof(xd->segment_feature_mask));
+}
+
+void vp9_enable_segfeature(MACROBLOCKD *xd, int segment_id,
+ SEG_LVL_FEATURES feature_id) {
+ xd->segment_feature_mask[segment_id] |= 1 << feature_id;
+}
+
+void vp9_disable_segfeature(MACROBLOCKD *xd, int segment_id,
+ SEG_LVL_FEATURES feature_id) {
+ xd->segment_feature_mask[segment_id] &= ~(1 << feature_id);
+}
+
+int vp9_seg_feature_data_max(SEG_LVL_FEATURES feature_id) {
+ return seg_feature_data_max[feature_id];
+}
+
+int vp9_is_segfeature_signed(SEG_LVL_FEATURES feature_id) {
+ return seg_feature_data_signed[feature_id];
+}
+
+void vp9_clear_segdata(MACROBLOCKD *xd, int segment_id,
+ SEG_LVL_FEATURES feature_id) {
+ xd->segment_feature_data[segment_id][feature_id] = 0;
+}
+
+void vp9_set_segdata(MACROBLOCKD *xd, int segment_id,
+ SEG_LVL_FEATURES feature_id, int seg_data) {
+ assert(seg_data <= seg_feature_data_max[feature_id]);
+ if (seg_data < 0) {
+ assert(seg_feature_data_signed[feature_id]);
+ assert(-seg_data <= seg_feature_data_max[feature_id]);
+ }
+
+ xd->segment_feature_data[segment_id][feature_id] = seg_data;
+}
+
+int vp9_get_segdata(const MACROBLOCKD *xd, int segment_id,
+ SEG_LVL_FEATURES feature_id) {
+ return xd->segment_feature_data[segment_id][feature_id];
+}
+
+
+const vp9_tree_index vp9_segment_tree[14] = {
+ 2, 4, 6, 8, 10, 12,
+ 0, -1, -2, -3, -4, -5, -6, -7
+};
+
+
+// TBD? Functions to read and write segment data with range / validity checking
diff --git a/libvpx/vp9/common/vp9_seg_common.h b/libvpx/vp9/common/vp9_seg_common.h
new file mode 100644
index 0000000..74ba03c
--- /dev/null
+++ b/libvpx/vp9/common/vp9_seg_common.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/common/vp9_blockd.h"
+
+#ifndef VP9_COMMON_VP9_SEG_COMMON_H_
+#define VP9_COMMON_VP9_SEG_COMMON_H_
+
+int vp9_segfeature_active(const MACROBLOCKD *xd,
+ int segment_id,
+ SEG_LVL_FEATURES feature_id);
+
+void vp9_clearall_segfeatures(MACROBLOCKD *xd);
+
+void vp9_enable_segfeature(MACROBLOCKD *xd,
+ int segment_id,
+ SEG_LVL_FEATURES feature_id);
+
+void vp9_disable_segfeature(MACROBLOCKD *xd,
+ int segment_id,
+ SEG_LVL_FEATURES feature_id);
+
+int vp9_seg_feature_data_max(SEG_LVL_FEATURES feature_id);
+
+int vp9_is_segfeature_signed(SEG_LVL_FEATURES feature_id);
+
+void vp9_clear_segdata(MACROBLOCKD *xd,
+ int segment_id,
+ SEG_LVL_FEATURES feature_id);
+
+void vp9_set_segdata(MACROBLOCKD *xd,
+ int segment_id,
+ SEG_LVL_FEATURES feature_id,
+ int seg_data);
+
+int vp9_get_segdata(const MACROBLOCKD *xd,
+ int segment_id,
+ SEG_LVL_FEATURES feature_id);
+
+extern const vp9_tree_index vp9_segment_tree[14];
+
+#endif // VP9_COMMON_VP9_SEG_COMMON_H_
+
diff --git a/libvpx/vp9/common/vp9_subpelvar.h b/libvpx/vp9/common/vp9_subpelvar.h
new file mode 100644
index 0000000..ad674f1
--- /dev/null
+++ b/libvpx/vp9/common/vp9_subpelvar.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_SUBPELVAR_H_
+#define VP9_COMMON_VP9_SUBPELVAR_H_
+
+#include "vp9/common/vp9_filter.h"
+
+static void variance(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ int w,
+ int h,
+ unsigned int *sse,
+ int *sum) {
+ int i, j;
+ int diff;
+
+ *sum = 0;
+ *sse = 0;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ diff = src_ptr[j] - ref_ptr[j];
+ *sum += diff;
+ *sse += diff * diff;
+ }
+
+ src_ptr += source_stride;
+ ref_ptr += recon_stride;
+ }
+}
+
+/****************************************************************************
+ *
+ * ROUTINE : filter_block2d_bil_first_pass
+ *
+ * INPUTS : uint8_t *src_ptr : Pointer to source block.
+ * uint32_t src_pixels_per_line : Stride of input block.
+ * uint32_t pixel_step : Offset between filter input samples (see notes).
+ * uint32_t output_height : Input block height.
+ * uint32_t output_width : Input block width.
+ * int32_t *vp9_filter : Array of 2 bi-linear filter taps.
+ *
+ * OUTPUTS : int32_t *output_ptr : Pointer to filtered block.
+ *
+ * RETURNS : void
+ *
+ * FUNCTION : Applies a 1-D 2-tap bi-linear filter to the source block in
+ * either horizontal or vertical direction to produce the
+ * filtered output block. Used to implement first-pass
+ * of 2-D separable filter.
+ *
+ * SPECIAL NOTES : Produces int32_t output to retain precision for next pass.
+ * Two filter taps should sum to VP9_FILTER_WEIGHT.
+ * pixel_step defines whether the filter is applied
+ * horizontally (pixel_step=1) or vertically (pixel_step=stride).
+ * It defines the offset required to move from one input
+ * to the next.
+ *
+ ****************************************************************************/
+static void var_filter_block2d_bil_first_pass(const uint8_t *src_ptr,
+ uint16_t *output_ptr,
+ unsigned int src_pixels_per_line,
+ int pixel_step,
+ unsigned int output_height,
+ unsigned int output_width,
+ const int16_t *vp9_filter) {
+ unsigned int i, j;
+
+ for (i = 0; i < output_height; i++) {
+ for (j = 0; j < output_width; j++) {
+ // Apply bilinear filter
+ output_ptr[j] = (((int)src_ptr[0] * vp9_filter[0]) +
+ ((int)src_ptr[pixel_step] * vp9_filter[1]) +
+ (VP9_FILTER_WEIGHT / 2)) >> VP9_FILTER_SHIFT;
+ src_ptr++;
+ }
+
+ // Next row...
+ src_ptr += src_pixels_per_line - output_width;
+ output_ptr += output_width;
+ }
+}
+
+/****************************************************************************
+ *
+ * ROUTINE : filter_block2d_bil_second_pass
+ *
+ * INPUTS : int32_t *src_ptr : Pointer to source block.
+ * uint32_t src_pixels_per_line : Stride of input block.
+ * uint32_t pixel_step : Offset between filter input samples (see notes).
+ * uint32_t output_height : Input block height.
+ * uint32_t output_width : Input block width.
+ * int32_t *vp9_filter : Array of 2 bi-linear filter taps.
+ *
+ * OUTPUTS : uint16_t *output_ptr : Pointer to filtered block.
+ *
+ * RETURNS : void
+ *
+ * FUNCTION : Applies a 1-D 2-tap bi-linear filter to the source block in
+ * either horizontal or vertical direction to produce the
+ * filtered output block. Used to implement second-pass
+ * of 2-D separable filter.
+ *
+ * SPECIAL NOTES : Requires 32-bit input as produced by filter_block2d_bil_first_pass.
+ * Two filter taps should sum to VP9_FILTER_WEIGHT.
+ * pixel_step defines whether the filter is applied
+ * horizontally (pixel_step=1) or vertically (pixel_step=stride).
+ * It defines the offset required to move from one input
+ * to the next.
+ *
+ ****************************************************************************/
+static void var_filter_block2d_bil_second_pass(const uint16_t *src_ptr,
+ uint8_t *output_ptr,
+ unsigned int src_pixels_per_line,
+ unsigned int pixel_step,
+ unsigned int output_height,
+ unsigned int output_width,
+ const int16_t *vp9_filter) {
+ unsigned int i, j;
+ int Temp;
+
+ for (i = 0; i < output_height; i++) {
+ for (j = 0; j < output_width; j++) {
+ // Apply filter
+ Temp = ((int)src_ptr[0] * vp9_filter[0]) +
+ ((int)src_ptr[pixel_step] * vp9_filter[1]) +
+ (VP9_FILTER_WEIGHT / 2);
+ output_ptr[j] = (unsigned int)(Temp >> VP9_FILTER_SHIFT);
+ src_ptr++;
+ }
+
+ // Next row...
+ src_ptr += src_pixels_per_line - output_width;
+ output_ptr += output_width;
+ }
+}
+
+#endif // VP9_COMMON_VP9_SUBPELVAR_H_
diff --git a/libvpx/vp9/common/vp9_systemdependent.h b/libvpx/vp9/common/vp9_systemdependent.h
new file mode 100644
index 0000000..1b9147e
--- /dev/null
+++ b/libvpx/vp9/common/vp9_systemdependent.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_SYSTEMDEPENDENT_H_
+#define VP9_COMMON_VP9_SYSTEMDEPENDENT_H_
+
+#ifdef _MSC_VER
+#include <math.h>
+#endif
+
+#include "./vpx_config.h"
+#if ARCH_X86 || ARCH_X86_64
+void vpx_reset_mmx_state(void);
+#define vp9_clear_system_state() vpx_reset_mmx_state()
+#else
+#define vp9_clear_system_state()
+#endif
+
+#ifdef _MSC_VER
+// round is not defined in MSVC
+static int round(double x) {
+ if (x < 0)
+ return (int)ceil(x - 0.5);
+ else
+ return (int)floor(x + 0.5);
+}
+#endif
+
+struct VP9Common;
+void vp9_machine_specific_config(struct VP9Common *);
+
+#endif // VP9_COMMON_VP9_SYSTEMDEPENDENT_H_
diff --git a/libvpx/vp9/common/vp9_tapify.py b/libvpx/vp9/common/vp9_tapify.py
new file mode 100644
index 0000000..99529cf
--- /dev/null
+++ b/libvpx/vp9/common/vp9_tapify.py
@@ -0,0 +1,106 @@
+"""
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+"""
+#!/usr/bin/env python
+import sys,string,os,re,math,numpy
+scale = 2**16
+def dist(p1,p2):
+ x1,y1 = p1
+ x2,y2 = p2
+ if x1==x2 and y1==y2 :
+ return 1.0
+ return 1/ math.sqrt((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2))
+
+def gettaps(p):
+ def l(b):
+ return int(math.floor(b))
+ def h(b):
+ return int(math.ceil(b))
+ def t(b,p,s):
+ return int((scale*dist(b,p)+s/2)/s)
+ r,c = p
+ ul=[l(r),l(c)]
+ ur=[l(r),h(c)]
+ ll=[h(r),l(c)]
+ lr=[h(r),h(c)]
+ sum = dist(ul,p)+dist(ur,p)+dist(ll,p)+dist(lr,p)
+ t4 = scale - t(ul,p,sum) - t(ur,p,sum) - t(ll,p,sum);
+ return [[ul,t(ul,p,sum)],[ur,t(ur,p,sum)],
+ [ll,t(ll,p,sum)],[lr,t4]]
+
+def print_mb_taps(angle,blocksize):
+ theta = angle / 57.2957795;
+ affine = [[math.cos(theta),-math.sin(theta)],
+ [math.sin(theta),math.cos(theta)]]
+ radius = (float(blocksize)-1)/2
+ print " // angle of",angle,"degrees"
+ for y in range(blocksize) :
+ for x in range(blocksize) :
+ r,c = numpy.dot(affine,[y-radius, x-radius])
+ tps = gettaps([r+radius,c+radius])
+ for t in tps :
+ p,t = t
+ tr,tc = p
+ print " %2d, %2d, %5d, " % (tr,tc,t,),
+ print " // %2d,%2d " % (y,x)
+
+i=float(sys.argv[1])
+while i <= float(sys.argv[2]) :
+ print_mb_taps(i,float(sys.argv[4]))
+ i=i+float(sys.argv[3])
+"""
+
+taps = []
+pt=dict()
+ptr=dict()
+for y in range(16) :
+ for x in range(16) :
+ r,c = numpy.dot(affine,[y-7.5, x-7.5])
+ tps = gettaps([r+7.5,c+7.5])
+ j=0
+ for tp in tps :
+ p,i = tp
+ r,c = p
+ pt[y,x,j]= [p,i]
+ try:
+ ptr[r,j,c].append([y,x])
+ except:
+ ptr[r,j,c]=[[y,x]]
+ j = j+1
+
+for key in sorted(pt.keys()) :
+ print key,pt[key]
+
+lr = -99
+lj = -99
+lc = 0
+
+shuf=""
+mask=""
+for r,j,c in sorted(ptr.keys()) :
+ for y,x in ptr[r,j,c] :
+ if lr != r or lj != j :
+ print "shuf_"+str(lr)+"_"+str(lj)+"_"+shuf.ljust(16,"0"), lc
+ shuf=""
+ lc = 0
+ for i in range(lc,c-1) :
+ shuf = shuf +"0"
+ shuf = shuf + hex(x)[2]
+ lc =c
+ break
+ lr = r
+ lj = j
+# print r,j,c,ptr[r,j,c]
+# print
+
+for r,j,c in sorted(ptr.keys()) :
+ for y,x in ptr[r,j,c] :
+ print r,j,c,y,x
+ break
+"""
diff --git a/libvpx/vp9/common/vp9_textblit.c b/libvpx/vp9/common/vp9_textblit.c
new file mode 100644
index 0000000..60e95e0
--- /dev/null
+++ b/libvpx/vp9/common/vp9_textblit.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+
+#include "vp9/common/vp9_textblit.h"
+
+static const int font[] = {
+ 0x0, 0x5C00, 0x8020, 0xAFABEA, 0xD7EC0, 0x1111111, 0x1855740, 0x18000,
+ 0x45C0, 0x74400, 0x51140, 0x23880, 0xC4000, 0x21080, 0x80000, 0x111110,
+ 0xE9D72E, 0x87E40, 0x12AD732, 0xAAD62A, 0x4F94C4, 0x4D6B7, 0x456AA,
+ 0x3E8423, 0xAAD6AA, 0xAAD6A2, 0x2800, 0x2A00, 0x8A880, 0x52940, 0x22A20,
+ 0x15422, 0x6AD62E, 0x1E4A53E, 0xAAD6BF, 0x8C62E, 0xE8C63F, 0x118D6BF,
+ 0x1094BF, 0xCAC62E, 0x1F2109F, 0x118FE31, 0xF8C628, 0x8A89F, 0x108421F,
+ 0x1F1105F, 0x1F4105F, 0xE8C62E, 0x2294BF, 0x164C62E, 0x12694BF, 0x8AD6A2,
+ 0x10FC21, 0x1F8421F, 0x744107, 0xF8220F, 0x1151151, 0x117041, 0x119D731,
+ 0x47E0, 0x1041041, 0xFC400, 0x10440, 0x1084210, 0x820
+};
+
+static void plot(int x, int y, unsigned char *image, int pitch) {
+ image[x + y * pitch] ^= 255;
+}
+
+void vp9_blit_text(const char *msg, unsigned char *address, const int pitch) {
+ int letter_bitmap;
+ unsigned char *output_pos = address;
+ int colpos = 0;
+
+ while (msg[colpos] != 0) {
+ char letter = msg[colpos];
+ int fontcol, fontrow;
+
+ if (letter <= 'Z' && letter >= ' ')
+ letter_bitmap = font[letter - ' '];
+ else if (letter <= 'z' && letter >= 'a')
+ letter_bitmap = font[letter - 'a' + 'A' - ' '];
+ else
+ letter_bitmap = font[0];
+
+ for (fontcol = 6; fontcol >= 0; fontcol--)
+ for (fontrow = 0; fontrow < 5; fontrow++)
+ output_pos[fontrow * pitch + fontcol] =
+ ((letter_bitmap >> (fontcol * 5)) & (1 << fontrow) ? 255 : 0);
+
+ output_pos += 7;
+ colpos++;
+ }
+}
+
+
+
+/* Bresenham line algorithm */
+void vp9_blit_line(int x0, int x1, int y0, int y1, unsigned char *image,
+ int pitch) {
+ int steep = abs(y1 - y0) > abs(x1 - x0);
+ int deltax, deltay;
+ int error, ystep, y, x;
+
+ if (steep) {
+ int t;
+ t = x0;
+ x0 = y0;
+ y0 = t;
+
+ t = x1;
+ x1 = y1;
+ y1 = t;
+ }
+
+ if (x0 > x1) {
+ int t;
+ t = x0;
+ x0 = x1;
+ x1 = t;
+
+ t = y0;
+ y0 = y1;
+ y1 = t;
+ }
+
+ deltax = x1 - x0;
+ deltay = abs(y1 - y0);
+ error = deltax / 2;
+
+ y = y0;
+
+ if (y0 < y1)
+ ystep = 1;
+ else
+ ystep = -1;
+
+ if (steep) {
+ for (x = x0; x <= x1; x++) {
+ plot(y, x, image, pitch);
+
+ error = error - deltay;
+ if (error < 0) {
+ y = y + ystep;
+ error = error + deltax;
+ }
+ }
+ } else {
+ for (x = x0; x <= x1; x++) {
+ plot(x, y, image, pitch);
+
+ error = error - deltay;
+ if (error < 0) {
+ y = y + ystep;
+ error = error + deltax;
+ }
+ }
+ }
+}
diff --git a/libvpx/vp9/common/vp9_textblit.h b/libvpx/vp9/common/vp9_textblit.h
new file mode 100644
index 0000000..c968628
--- /dev/null
+++ b/libvpx/vp9/common/vp9_textblit.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_TEXTBLIT_H_
+#define VP9_COMMON_VP9_TEXTBLIT_H_
+
+void vp9_blit_text(const char *msg, unsigned char *address, int pitch);
+
+void vp9_blit_line(int x0, int x1, int y0, int y1, unsigned char *image,
+ int pitch);
+
+#endif // VP9_COMMON_VP9_TEXTBLIT_H_
diff --git a/libvpx/vp9/common/vp9_tile_common.c b/libvpx/vp9/common/vp9_tile_common.c
new file mode 100644
index 0000000..95296ad
--- /dev/null
+++ b/libvpx/vp9/common/vp9_tile_common.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp9/common/vp9_tile_common.h"
+
+#define MIN_TILE_WIDTH 256
+#define MAX_TILE_WIDTH 4096
+#define MIN_TILE_WIDTH_SBS (MIN_TILE_WIDTH >> 6)
+#define MAX_TILE_WIDTH_SBS (MAX_TILE_WIDTH >> 6)
+
+static void vp9_get_tile_offsets(VP9_COMMON *cm, int *min_tile_off,
+ int *max_tile_off, int tile_idx,
+ int log2_n_tiles, int n_mis) {
+ const int n_sbs = (n_mis + 7) >> 3;
+ const int sb_off1 = (tile_idx * n_sbs) >> log2_n_tiles;
+ const int sb_off2 = ((tile_idx + 1) * n_sbs) >> log2_n_tiles;
+
+ *min_tile_off = MIN(sb_off1 << 3, n_mis);
+ *max_tile_off = MIN(sb_off2 << 3, n_mis);
+}
+
+void vp9_get_tile_col_offsets(VP9_COMMON *cm, int tile_col_idx) {
+ cm->cur_tile_col_idx = tile_col_idx;
+ vp9_get_tile_offsets(cm, &cm->cur_tile_mi_col_start,
+ &cm->cur_tile_mi_col_end, tile_col_idx,
+ cm->log2_tile_columns, cm->mi_cols);
+}
+
+void vp9_get_tile_row_offsets(VP9_COMMON *cm, int tile_row_idx) {
+ cm->cur_tile_row_idx = tile_row_idx;
+ vp9_get_tile_offsets(cm, &cm->cur_tile_mi_row_start,
+ &cm->cur_tile_mi_row_end, tile_row_idx,
+ cm->log2_tile_rows, cm->mi_rows);
+}
+
+
+void vp9_get_tile_n_bits(VP9_COMMON *cm, int *min_log2_n_tiles_ptr,
+ int *delta_log2_n_tiles) {
+ const int sb_cols = (cm->mb_cols + 3) >> 2;
+ int min_log2_n_tiles, max_log2_n_tiles;
+
+ for (max_log2_n_tiles = 0;
+ (sb_cols >> max_log2_n_tiles) >= MIN_TILE_WIDTH_SBS;
+ max_log2_n_tiles++) {}
+ max_log2_n_tiles--;
+ if (max_log2_n_tiles < 0)
+ max_log2_n_tiles = 0;
+
+ for (min_log2_n_tiles = 0;
+ (MAX_TILE_WIDTH_SBS << min_log2_n_tiles) < sb_cols;
+ min_log2_n_tiles++) {}
+
+ assert(max_log2_n_tiles >= min_log2_n_tiles);
+ *min_log2_n_tiles_ptr = min_log2_n_tiles;
+ *delta_log2_n_tiles = max_log2_n_tiles - min_log2_n_tiles;
+}
diff --git a/libvpx/vp9/common/vp9_tile_common.h b/libvpx/vp9/common/vp9_tile_common.h
new file mode 100644
index 0000000..7ea3772
--- /dev/null
+++ b/libvpx/vp9/common/vp9_tile_common.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_TILE_COMMON_H_
+#define VP9_COMMON_VP9_TILE_COMMON_H_
+
+#include "vp9/common/vp9_onyxc_int.h"
+
+void vp9_get_tile_col_offsets(VP9_COMMON *cm, int tile_col_idx);
+
+void vp9_get_tile_row_offsets(VP9_COMMON *cm, int tile_row_idx);
+
+void vp9_get_tile_n_bits(VP9_COMMON *cm, int *min_log2_n_tiles,
+ int *delta_log2_n_tiles);
+
+#endif // VP9_COMMON_VP9_TILE_COMMON_H_
diff --git a/libvpx/vp9/common/vp9_treecoder.c b/libvpx/vp9/common/vp9_treecoder.c
new file mode 100644
index 0000000..531fa75
--- /dev/null
+++ b/libvpx/vp9/common/vp9_treecoder.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vpx_config.h"
+
+#if defined(CONFIG_DEBUG) && CONFIG_DEBUG
+#include <assert.h>
+#endif
+
+#include "vp9/common/vp9_treecoder.h"
+
+static void tree2tok(struct vp9_token *const p, vp9_tree t,
+ int i, int v, int l) {
+ v += v;
+ ++l;
+
+ do {
+ const vp9_tree_index j = t[i++];
+
+ if (j <= 0) {
+ p[-j].value = v;
+ p[-j].len = l;
+ } else
+ tree2tok(p, t, j, v, l);
+ } while (++v & 1);
+}
+
+void vp9_tokens_from_tree(struct vp9_token *p, vp9_tree t) {
+ tree2tok(p, t, 0, 0, 0);
+}
+
+void vp9_tokens_from_tree_offset(struct vp9_token *p, vp9_tree t,
+ int offset) {
+ tree2tok(p - offset, t, 0, 0, 0);
+}
+
+static unsigned int convert_distribution(unsigned int i,
+ vp9_tree tree,
+ vp9_prob probs[],
+ unsigned int branch_ct[][2],
+ const unsigned int num_events[],
+ unsigned int tok0_offset) {
+ unsigned int left, right;
+
+ if (tree[i] <= 0) {
+ left = num_events[-tree[i] - tok0_offset];
+ } else {
+ left = convert_distribution(tree[i], tree, probs, branch_ct,
+ num_events, tok0_offset);
+ }
+ if (tree[i + 1] <= 0)
+ right = num_events[-tree[i + 1] - tok0_offset];
+ else
+ right = convert_distribution(tree[i + 1], tree, probs, branch_ct,
+ num_events, tok0_offset);
+
+ probs[i>>1] = get_binary_prob(left, right);
+ branch_ct[i>>1][0] = left;
+ branch_ct[i>>1][1] = right;
+ return left + right;
+}
+
+void vp9_tree_probs_from_distribution(
+ vp9_tree tree,
+ vp9_prob probs [ /* n-1 */ ],
+ unsigned int branch_ct [ /* n-1 */ ] [2],
+ const unsigned int num_events[ /* n */ ],
+ unsigned int tok0_offset) {
+ convert_distribution(0, tree, probs, branch_ct, num_events, tok0_offset);
+}
diff --git a/libvpx/vp9/common/vp9_treecoder.h b/libvpx/vp9/common/vp9_treecoder.h
new file mode 100644
index 0000000..ebcd411
--- /dev/null
+++ b/libvpx/vp9/common/vp9_treecoder.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_TREECODER_H_
+#define VP9_COMMON_VP9_TREECODER_H_
+
+#include "./vpx_config.h"
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_common.h"
+
+typedef uint8_t vp9_prob;
+
+#define vp9_prob_half ((vp9_prob) 128)
+
+typedef int8_t vp9_tree_index;
+
+#define vp9_complement(x) (255 - x)
+
+/* We build coding trees compactly in arrays.
+ Each node of the tree is a pair of vp9_tree_indices.
+ Array index often references a corresponding probability table.
+ Index <= 0 means done encoding/decoding and value = -Index,
+ Index > 0 means need another bit, specification at index.
+ Nonnegative indices are always even; processing begins at node 0. */
+
+typedef const vp9_tree_index vp9_tree[], *vp9_tree_p;
+
+struct vp9_token {
+ int value;
+ int len;
+};
+
+/* Construct encoding array from tree. */
+
+void vp9_tokens_from_tree(struct vp9_token*, vp9_tree);
+void vp9_tokens_from_tree_offset(struct vp9_token*, vp9_tree, int offset);
+
+/* Convert array of token occurrence counts into a table of probabilities
+ for the associated binary encoding tree. Also writes count of branches
+ taken for each node on the tree; this facilitiates decisions as to
+ probability updates. */
+
+void vp9_tree_probs_from_distribution(vp9_tree tree,
+ vp9_prob probs[ /* n - 1 */ ],
+ unsigned int branch_ct[ /* n - 1 */ ][2],
+ const unsigned int num_events[ /* n */ ],
+ unsigned int tok0_offset);
+
+static INLINE vp9_prob clip_prob(int p) {
+ return (p > 255) ? 255u : (p < 1) ? 1u : p;
+}
+
+// int64 is not needed for normal frame level calculations.
+// However when outputing entropy stats accumulated over many frames
+// or even clips we can overflow int math.
+#ifdef ENTROPY_STATS
+static INLINE vp9_prob get_prob(int num, int den) {
+ return (den == 0) ? 128u : clip_prob(((int64_t)num * 256 + (den >> 1)) / den);
+}
+#else
+static INLINE vp9_prob get_prob(int num, int den) {
+ return (den == 0) ? 128u : clip_prob((num * 256 + (den >> 1)) / den);
+}
+#endif
+
+static INLINE vp9_prob get_binary_prob(int n0, int n1) {
+ return get_prob(n0, n0 + n1);
+}
+
+/* this function assumes prob1 and prob2 are already within [1,255] range */
+static INLINE vp9_prob weighted_prob(int prob1, int prob2, int factor) {
+ return ROUND_POWER_OF_TWO(prob1 * (256 - factor) + prob2 * factor, 8);
+}
+
+#endif // VP9_COMMON_VP9_TREECODER_H_
diff --git a/libvpx/vp9/common/x86/vp9_asm_stubs.c b/libvpx/vp9/common/x86/vp9_asm_stubs.c
new file mode 100644
index 0000000..2b66834
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_asm_stubs.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+
+#include "./vpx_config.h"
+#include "./vp9_rtcd.h"
+#include "vpx_ports/mem.h"
+///////////////////////////////////////////////////////////////////////////
+// the mmx function that does the bilinear filtering and var calculation //
+// int one pass //
+///////////////////////////////////////////////////////////////////////////
+DECLARE_ALIGNED(16, const short, vp9_bilinear_filters_mmx[16][8]) = {
+ { 128, 128, 128, 128, 0, 0, 0, 0 },
+ { 120, 120, 120, 120, 8, 8, 8, 8 },
+ { 112, 112, 112, 112, 16, 16, 16, 16 },
+ { 104, 104, 104, 104, 24, 24, 24, 24 },
+ { 96, 96, 96, 96, 32, 32, 32, 32 },
+ { 88, 88, 88, 88, 40, 40, 40, 40 },
+ { 80, 80, 80, 80, 48, 48, 48, 48 },
+ { 72, 72, 72, 72, 56, 56, 56, 56 },
+ { 64, 64, 64, 64, 64, 64, 64, 64 },
+ { 56, 56, 56, 56, 72, 72, 72, 72 },
+ { 48, 48, 48, 48, 80, 80, 80, 80 },
+ { 40, 40, 40, 40, 88, 88, 88, 88 },
+ { 32, 32, 32, 32, 96, 96, 96, 96 },
+ { 24, 24, 24, 24, 104, 104, 104, 104 },
+ { 16, 16, 16, 16, 112, 112, 112, 112 },
+ { 8, 8, 8, 8, 120, 120, 120, 120 }
+};
+
+#if HAVE_SSSE3
+void vp9_filter_block1d16_v8_ssse3(const unsigned char *src_ptr,
+ const unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ const short *filter);
+
+void vp9_filter_block1d16_h8_ssse3(const unsigned char *src_ptr,
+ const unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ const short *filter);
+
+void vp9_filter_block1d8_v8_ssse3(const unsigned char *src_ptr,
+ const unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ const short *filter);
+
+void vp9_filter_block1d8_h8_ssse3(const unsigned char *src_ptr,
+ const unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ const short *filter);
+
+void vp9_filter_block1d4_v8_ssse3(const unsigned char *src_ptr,
+ const unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ const short *filter);
+
+void vp9_filter_block1d4_h8_ssse3(const unsigned char *src_ptr,
+ const unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ const short *filter);
+
+void vp9_filter_block1d16_v8_avg_ssse3(const unsigned char *src_ptr,
+ const unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ const short *filter);
+
+void vp9_filter_block1d16_h8_avg_ssse3(const unsigned char *src_ptr,
+ const unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ const short *filter);
+
+void vp9_filter_block1d8_v8_avg_ssse3(const unsigned char *src_ptr,
+ const unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ const short *filter);
+
+void vp9_filter_block1d8_h8_avg_ssse3(const unsigned char *src_ptr,
+ const unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ const short *filter);
+
+void vp9_filter_block1d4_v8_avg_ssse3(const unsigned char *src_ptr,
+ const unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ const short *filter);
+
+void vp9_filter_block1d4_h8_avg_ssse3(const unsigned char *src_ptr,
+ const unsigned int src_pitch,
+ unsigned char *output_ptr,
+ unsigned int out_pitch,
+ unsigned int output_height,
+ const short *filter);
+
+void vp9_convolve8_horiz_ssse3(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ if (x_step_q4 == 16 && filter_x[3] != 128) {
+ while (w >= 16) {
+ vp9_filter_block1d16_h8_ssse3(src, src_stride,
+ dst, dst_stride,
+ h, filter_x);
+ src += 16;
+ dst += 16;
+ w -= 16;
+ }
+ while (w >= 8) {
+ vp9_filter_block1d8_h8_ssse3(src, src_stride,
+ dst, dst_stride,
+ h, filter_x);
+ src += 8;
+ dst += 8;
+ w -= 8;
+ }
+ while (w >= 4) {
+ vp9_filter_block1d4_h8_ssse3(src, src_stride,
+ dst, dst_stride,
+ h, filter_x);
+ src += 4;
+ dst += 4;
+ w -= 4;
+ }
+ }
+ if (w) {
+ vp9_convolve8_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h);
+ }
+}
+
+void vp9_convolve8_vert_ssse3(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ if (y_step_q4 == 16 && filter_y[3] != 128) {
+ while (w >= 16) {
+ vp9_filter_block1d16_v8_ssse3(src - src_stride * 3, src_stride,
+ dst, dst_stride,
+ h, filter_y);
+ src += 16;
+ dst += 16;
+ w -= 16;
+ }
+ while (w >= 8) {
+ vp9_filter_block1d8_v8_ssse3(src - src_stride * 3, src_stride,
+ dst, dst_stride,
+ h, filter_y);
+ src += 8;
+ dst += 8;
+ w -= 8;
+ }
+ while (w >= 4) {
+ vp9_filter_block1d4_v8_ssse3(src - src_stride * 3, src_stride,
+ dst, dst_stride,
+ h, filter_y);
+ src += 4;
+ dst += 4;
+ w -= 4;
+ }
+ }
+ if (w) {
+ vp9_convolve8_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h);
+ }
+}
+
+void vp9_convolve8_avg_horiz_ssse3(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ if (x_step_q4 == 16 && filter_x[3] != 128) {
+ while (w >= 16) {
+ vp9_filter_block1d16_h8_avg_ssse3(src, src_stride,
+ dst, dst_stride,
+ h, filter_x);
+ src += 16;
+ dst += 16;
+ w -= 16;
+ }
+ while (w >= 8) {
+ vp9_filter_block1d8_h8_avg_ssse3(src, src_stride,
+ dst, dst_stride,
+ h, filter_x);
+ src += 8;
+ dst += 8;
+ w -= 8;
+ }
+ while (w >= 4) {
+ vp9_filter_block1d4_h8_avg_ssse3(src, src_stride,
+ dst, dst_stride,
+ h, filter_x);
+ src += 4;
+ dst += 4;
+ w -= 4;
+ }
+ }
+ if (w) {
+ vp9_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h);
+ }
+}
+
+void vp9_convolve8_avg_vert_ssse3(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ if (y_step_q4 == 16 && filter_y[3] != 128) {
+ while (w >= 16) {
+ vp9_filter_block1d16_v8_avg_ssse3(src - src_stride * 3, src_stride,
+ dst, dst_stride,
+ h, filter_y);
+ src += 16;
+ dst += 16;
+ w -= 16;
+ }
+ while (w >= 8) {
+ vp9_filter_block1d8_v8_avg_ssse3(src - src_stride * 3, src_stride,
+ dst, dst_stride,
+ h, filter_y);
+ src += 8;
+ dst += 8;
+ w -= 8;
+ }
+ while (w >= 4) {
+ vp9_filter_block1d4_v8_avg_ssse3(src - src_stride * 3, src_stride,
+ dst, dst_stride,
+ h, filter_y);
+ src += 4;
+ dst += 4;
+ w -= 4;
+ }
+ }
+ if (w) {
+ vp9_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h);
+ }
+}
+
+void vp9_convolve8_ssse3(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ DECLARE_ALIGNED_ARRAY(16, unsigned char, fdata2, 64*71);
+
+ assert(w <= 64);
+ assert(h <= 64);
+ if (x_step_q4 == 16 && y_step_q4 == 16) {
+ vp9_convolve8_horiz_ssse3(src - 3 * src_stride, src_stride, fdata2, 64,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h + 7);
+ vp9_convolve8_vert_ssse3(fdata2 + 3 * 64, 64, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4, w, h);
+ } else {
+ vp9_convolve8_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4, w, h);
+ }
+}
+
+void vp9_convolve8_avg_ssse3(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ DECLARE_ALIGNED_ARRAY(16, unsigned char, fdata2, 64*71);
+
+ assert(w <= 64);
+ assert(h <= 64);
+ if (x_step_q4 == 16 && y_step_q4 == 16) {
+ vp9_convolve8_horiz_ssse3(src - 3 * src_stride, src_stride, fdata2, 64,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h + 7);
+ vp9_convolve8_avg_vert_ssse3(fdata2 + 3 * 64, 64, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h);
+ } else {
+ vp9_convolve8_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4, w, h);
+ }
+}
+#endif
diff --git a/libvpx/vp9/common/x86/vp9_idct_intrin_sse2.c b/libvpx/vp9/common/x86/vp9_idct_intrin_sse2.c
new file mode 100644
index 0000000..599dcff
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_idct_intrin_sse2.c
@@ -0,0 +1,1985 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <emmintrin.h> // SSE2
+#include "./vpx_config.h"
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_idct.h"
+
+// In order to improve performance, clip absolute diff values to [0, 255],
+// which allows to keep the additions/subtractions in 8 bits.
+void vp9_dc_only_idct_add_sse2(int input_dc, uint8_t *pred_ptr,
+ uint8_t *dst_ptr, int pitch, int stride) {
+ int a1;
+ int16_t out;
+ uint8_t abs_diff;
+ __m128i p0, p1, p2, p3;
+ unsigned int extended_diff;
+ __m128i diff;
+
+ out = dct_const_round_shift(input_dc * cospi_16_64);
+ out = dct_const_round_shift(out * cospi_16_64);
+ a1 = ROUND_POWER_OF_TWO(out, 4);
+
+ // Read prediction data.
+ p0 = _mm_cvtsi32_si128 (*(const int *)(pred_ptr + 0 * pitch));
+ p1 = _mm_cvtsi32_si128 (*(const int *)(pred_ptr + 1 * pitch));
+ p2 = _mm_cvtsi32_si128 (*(const int *)(pred_ptr + 2 * pitch));
+ p3 = _mm_cvtsi32_si128 (*(const int *)(pred_ptr + 3 * pitch));
+
+ // Unpack prediction data, and store 4x4 array in 1 XMM register.
+ p0 = _mm_unpacklo_epi32(p0, p1);
+ p2 = _mm_unpacklo_epi32(p2, p3);
+ p0 = _mm_unpacklo_epi64(p0, p2);
+
+ // Clip dc value to [0, 255] range. Then, do addition or subtraction
+ // according to its sign.
+ if (a1 >= 0) {
+ abs_diff = (a1 > 255) ? 255 : a1;
+ extended_diff = abs_diff * 0x01010101u;
+ diff = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_diff), 0);
+
+ p1 = _mm_adds_epu8(p0, diff);
+ } else {
+ abs_diff = (a1 < -255) ? 255 : -a1;
+ extended_diff = abs_diff * 0x01010101u;
+ diff = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_diff), 0);
+
+ p1 = _mm_subs_epu8(p0, diff);
+ }
+
+ // Store results to dst.
+ *(int *)dst_ptr = _mm_cvtsi128_si32(p1);
+ dst_ptr += stride;
+
+ p1 = _mm_srli_si128(p1, 4);
+ *(int *)dst_ptr = _mm_cvtsi128_si32(p1);
+ dst_ptr += stride;
+
+ p1 = _mm_srli_si128(p1, 4);
+ *(int *)dst_ptr = _mm_cvtsi128_si32(p1);
+ dst_ptr += stride;
+
+ p1 = _mm_srli_si128(p1, 4);
+ *(int *)dst_ptr = _mm_cvtsi128_si32(p1);
+}
+
+void vp9_short_idct4x4_add_sse2(int16_t *input, uint8_t *dest, int stride) {
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i eight = _mm_set1_epi16(8);
+ const __m128i cst = _mm_setr_epi16((int16_t)cospi_16_64, (int16_t)cospi_16_64,
+ (int16_t)cospi_16_64, (int16_t)-cospi_16_64,
+ (int16_t)cospi_24_64, (int16_t)-cospi_8_64,
+ (int16_t)cospi_8_64, (int16_t)cospi_24_64);
+ const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ __m128i input0, input1, input2, input3;
+
+ // Rows
+ input0 = _mm_loadl_epi64((__m128i *)input);
+ input1 = _mm_loadl_epi64((__m128i *)(input + 4));
+ input2 = _mm_loadl_epi64((__m128i *)(input + 8));
+ input3 = _mm_loadl_epi64((__m128i *)(input + 12));
+
+ // Construct i3, i1, i3, i1, i2, i0, i2, i0
+ input0 = _mm_shufflelo_epi16(input0, 0xd8);
+ input1 = _mm_shufflelo_epi16(input1, 0xd8);
+ input2 = _mm_shufflelo_epi16(input2, 0xd8);
+ input3 = _mm_shufflelo_epi16(input3, 0xd8);
+
+ input0 = _mm_unpacklo_epi32(input0, input0);
+ input1 = _mm_unpacklo_epi32(input1, input1);
+ input2 = _mm_unpacklo_epi32(input2, input2);
+ input3 = _mm_unpacklo_epi32(input3, input3);
+
+ // Stage 1
+ input0 = _mm_madd_epi16(input0, cst);
+ input1 = _mm_madd_epi16(input1, cst);
+ input2 = _mm_madd_epi16(input2, cst);
+ input3 = _mm_madd_epi16(input3, cst);
+
+ input0 = _mm_add_epi32(input0, rounding);
+ input1 = _mm_add_epi32(input1, rounding);
+ input2 = _mm_add_epi32(input2, rounding);
+ input3 = _mm_add_epi32(input3, rounding);
+
+ input0 = _mm_srai_epi32(input0, DCT_CONST_BITS);
+ input1 = _mm_srai_epi32(input1, DCT_CONST_BITS);
+ input2 = _mm_srai_epi32(input2, DCT_CONST_BITS);
+ input3 = _mm_srai_epi32(input3, DCT_CONST_BITS);
+
+ // Stage 2
+ input0 = _mm_packs_epi32(input0, zero);
+ input1 = _mm_packs_epi32(input1, zero);
+ input2 = _mm_packs_epi32(input2, zero);
+ input3 = _mm_packs_epi32(input3, zero);
+
+ // Transpose
+ input1 = _mm_unpacklo_epi16(input0, input1);
+ input3 = _mm_unpacklo_epi16(input2, input3);
+ input0 = _mm_unpacklo_epi32(input1, input3);
+ input1 = _mm_unpackhi_epi32(input1, input3);
+
+ // Switch column2, column 3, and then, we got:
+ // input2: column1, column 0; input3: column2, column 3.
+ input1 = _mm_shuffle_epi32(input1, 0x4e);
+ input2 = _mm_add_epi16(input0, input1);
+ input3 = _mm_sub_epi16(input0, input1);
+
+ // Columns
+ // Construct i3, i1, i3, i1, i2, i0, i2, i0
+ input0 = _mm_shufflelo_epi16(input2, 0xd8);
+ input1 = _mm_shufflehi_epi16(input2, 0xd8);
+ input2 = _mm_shufflehi_epi16(input3, 0xd8);
+ input3 = _mm_shufflelo_epi16(input3, 0xd8);
+
+ input0 = _mm_unpacklo_epi32(input0, input0);
+ input1 = _mm_unpackhi_epi32(input1, input1);
+ input2 = _mm_unpackhi_epi32(input2, input2);
+ input3 = _mm_unpacklo_epi32(input3, input3);
+
+ // Stage 1
+ input0 = _mm_madd_epi16(input0, cst);
+ input1 = _mm_madd_epi16(input1, cst);
+ input2 = _mm_madd_epi16(input2, cst);
+ input3 = _mm_madd_epi16(input3, cst);
+
+ input0 = _mm_add_epi32(input0, rounding);
+ input1 = _mm_add_epi32(input1, rounding);
+ input2 = _mm_add_epi32(input2, rounding);
+ input3 = _mm_add_epi32(input3, rounding);
+
+ input0 = _mm_srai_epi32(input0, DCT_CONST_BITS);
+ input1 = _mm_srai_epi32(input1, DCT_CONST_BITS);
+ input2 = _mm_srai_epi32(input2, DCT_CONST_BITS);
+ input3 = _mm_srai_epi32(input3, DCT_CONST_BITS);
+
+ // Stage 2
+ input0 = _mm_packs_epi32(input0, zero);
+ input1 = _mm_packs_epi32(input1, zero);
+ input2 = _mm_packs_epi32(input2, zero);
+ input3 = _mm_packs_epi32(input3, zero);
+
+ // Transpose
+ input1 = _mm_unpacklo_epi16(input0, input1);
+ input3 = _mm_unpacklo_epi16(input2, input3);
+ input0 = _mm_unpacklo_epi32(input1, input3);
+ input1 = _mm_unpackhi_epi32(input1, input3);
+
+ // Switch column2, column 3, and then, we got:
+ // input2: column1, column 0; input3: column2, column 3.
+ input1 = _mm_shuffle_epi32(input1, 0x4e);
+ input2 = _mm_add_epi16(input0, input1);
+ input3 = _mm_sub_epi16(input0, input1);
+
+ // Final round and shift
+ input2 = _mm_add_epi16(input2, eight);
+ input3 = _mm_add_epi16(input3, eight);
+
+ input2 = _mm_srai_epi16(input2, 4);
+ input3 = _mm_srai_epi16(input3, 4);
+
+#define RECON_AND_STORE4X4(dest, in_x) \
+ { \
+ __m128i d0 = _mm_cvtsi32_si128(*(const int *)(dest)); \
+ d0 = _mm_unpacklo_epi8(d0, zero); \
+ d0 = _mm_add_epi16(in_x, d0); \
+ d0 = _mm_packus_epi16(d0, d0); \
+ *(int *)dest = _mm_cvtsi128_si32(d0); \
+ dest += stride; \
+ }
+
+ input0 = _mm_srli_si128(input2, 8);
+ input1 = _mm_srli_si128(input3, 8);
+
+ RECON_AND_STORE4X4(dest, input2);
+ RECON_AND_STORE4X4(dest, input0);
+ RECON_AND_STORE4X4(dest, input1);
+ RECON_AND_STORE4X4(dest, input3);
+}
+
+void vp9_idct4_1d_sse2(int16_t *input, int16_t *output) {
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i c1 = _mm_setr_epi16((int16_t)cospi_16_64, (int16_t)cospi_16_64,
+ (int16_t)cospi_16_64, (int16_t)-cospi_16_64,
+ (int16_t)cospi_24_64, (int16_t)-cospi_8_64,
+ (int16_t)cospi_8_64, (int16_t)cospi_24_64);
+ const __m128i c2 = _mm_setr_epi16(1, 1, 1, 1, 1, -1, 1, -1);
+
+ const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ __m128i in, temp;
+
+ // Load input data.
+ in = _mm_loadl_epi64((__m128i *)input);
+
+ // Construct i3, i1, i3, i1, i2, i0, i2, i0
+ in = _mm_shufflelo_epi16(in, 0xd8);
+ in = _mm_unpacklo_epi32(in, in);
+
+ // Stage 1
+ in = _mm_madd_epi16(in, c1);
+ in = _mm_add_epi32(in, rounding);
+ in = _mm_srai_epi32(in, DCT_CONST_BITS);
+ in = _mm_packs_epi32(in, zero);
+
+ // Stage 2
+ temp = _mm_shufflelo_epi16(in, 0x9c);
+ in = _mm_shufflelo_epi16(in, 0xc9);
+ in = _mm_unpacklo_epi64(temp, in);
+ in = _mm_madd_epi16(in, c2);
+ in = _mm_packs_epi32(in, zero);
+
+ // Store results
+ _mm_storel_epi64((__m128i *)output, in);
+}
+
+#define TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, \
+ out0, out1, out2, out3, out4, out5, out6, out7) \
+ { \
+ const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \
+ const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \
+ const __m128i tr0_2 = _mm_unpackhi_epi16(in0, in1); \
+ const __m128i tr0_3 = _mm_unpackhi_epi16(in2, in3); \
+ const __m128i tr0_4 = _mm_unpacklo_epi16(in4, in5); \
+ const __m128i tr0_5 = _mm_unpacklo_epi16(in6, in7); \
+ const __m128i tr0_6 = _mm_unpackhi_epi16(in4, in5); \
+ const __m128i tr0_7 = _mm_unpackhi_epi16(in6, in7); \
+ \
+ const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \
+ const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3); \
+ const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); \
+ const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3); \
+ const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); \
+ const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); \
+ const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); \
+ const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); \
+ \
+ out0 = _mm_unpacklo_epi64(tr1_0, tr1_4); \
+ out1 = _mm_unpackhi_epi64(tr1_0, tr1_4); \
+ out2 = _mm_unpacklo_epi64(tr1_2, tr1_6); \
+ out3 = _mm_unpackhi_epi64(tr1_2, tr1_6); \
+ out4 = _mm_unpacklo_epi64(tr1_1, tr1_5); \
+ out5 = _mm_unpackhi_epi64(tr1_1, tr1_5); \
+ out6 = _mm_unpacklo_epi64(tr1_3, tr1_7); \
+ out7 = _mm_unpackhi_epi64(tr1_3, tr1_7); \
+ }
+
+#define TRANSPOSE_4X8(in0, in1, in2, in3, in4, in5, in6, in7, \
+ out0, out1, out2, out3, out4, out5, out6, out7) \
+ { \
+ const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \
+ const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \
+ const __m128i tr0_4 = _mm_unpacklo_epi16(in4, in5); \
+ const __m128i tr0_5 = _mm_unpacklo_epi16(in6, in7); \
+ \
+ const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \
+ const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); \
+ const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); \
+ const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); \
+ \
+ out0 = _mm_unpacklo_epi64(tr1_0, tr1_4); \
+ out1 = _mm_unpackhi_epi64(tr1_0, tr1_4); \
+ out2 = _mm_unpacklo_epi64(tr1_2, tr1_6); \
+ out3 = _mm_unpackhi_epi64(tr1_2, tr1_6); \
+ out4 = out5 = out6 = out7 = zero; \
+ }
+
+#define TRANSPOSE_8X4(in0, in1, in2, in3, out0, out1, out2, out3) \
+ { \
+ const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \
+ const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \
+ const __m128i tr0_2 = _mm_unpackhi_epi16(in0, in1); \
+ const __m128i tr0_3 = _mm_unpackhi_epi16(in2, in3); \
+ \
+ in0 = _mm_unpacklo_epi32(tr0_0, tr0_1); /* i1 i0 */ \
+ in1 = _mm_unpackhi_epi32(tr0_0, tr0_1); /* i3 i2 */ \
+ in2 = _mm_unpacklo_epi32(tr0_2, tr0_3); /* i5 i4 */ \
+ in3 = _mm_unpackhi_epi32(tr0_2, tr0_3); /* i7 i6 */ \
+ }
+
+// Define Macro for multiplying elements by constants and adding them together.
+#define MULTIPLICATION_AND_ADD(lo_0, hi_0, lo_1, hi_1, \
+ cst0, cst1, cst2, cst3, res0, res1, res2, res3) \
+ { \
+ tmp0 = _mm_madd_epi16(lo_0, cst0); \
+ tmp1 = _mm_madd_epi16(hi_0, cst0); \
+ tmp2 = _mm_madd_epi16(lo_0, cst1); \
+ tmp3 = _mm_madd_epi16(hi_0, cst1); \
+ tmp4 = _mm_madd_epi16(lo_1, cst2); \
+ tmp5 = _mm_madd_epi16(hi_1, cst2); \
+ tmp6 = _mm_madd_epi16(lo_1, cst3); \
+ tmp7 = _mm_madd_epi16(hi_1, cst3); \
+ \
+ tmp0 = _mm_add_epi32(tmp0, rounding); \
+ tmp1 = _mm_add_epi32(tmp1, rounding); \
+ tmp2 = _mm_add_epi32(tmp2, rounding); \
+ tmp3 = _mm_add_epi32(tmp3, rounding); \
+ tmp4 = _mm_add_epi32(tmp4, rounding); \
+ tmp5 = _mm_add_epi32(tmp5, rounding); \
+ tmp6 = _mm_add_epi32(tmp6, rounding); \
+ tmp7 = _mm_add_epi32(tmp7, rounding); \
+ \
+ tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
+ tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
+ tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
+ tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
+ tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS); \
+ tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS); \
+ tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); \
+ tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS); \
+ \
+ res0 = _mm_packs_epi32(tmp0, tmp1); \
+ res1 = _mm_packs_epi32(tmp2, tmp3); \
+ res2 = _mm_packs_epi32(tmp4, tmp5); \
+ res3 = _mm_packs_epi32(tmp6, tmp7); \
+ }
+
+#define IDCT8x8_1D \
+ /* Stage1 */ \
+ { \
+ const __m128i lo_17 = _mm_unpacklo_epi16(in1, in7); \
+ const __m128i hi_17 = _mm_unpackhi_epi16(in1, in7); \
+ const __m128i lo_35 = _mm_unpacklo_epi16(in3, in5); \
+ const __m128i hi_35 = _mm_unpackhi_epi16(in3, in5); \
+ \
+ MULTIPLICATION_AND_ADD(lo_17, hi_17, lo_35, hi_35, stg1_0, \
+ stg1_1, stg1_2, stg1_3, stp1_4, \
+ stp1_7, stp1_5, stp1_6) \
+ } \
+ \
+ /* Stage2 */ \
+ { \
+ const __m128i lo_04 = _mm_unpacklo_epi16(in0, in4); \
+ const __m128i hi_04 = _mm_unpackhi_epi16(in0, in4); \
+ const __m128i lo_26 = _mm_unpacklo_epi16(in2, in6); \
+ const __m128i hi_26 = _mm_unpackhi_epi16(in2, in6); \
+ \
+ MULTIPLICATION_AND_ADD(lo_04, hi_04, lo_26, hi_26, stg2_0, \
+ stg2_1, stg2_2, stg2_3, stp2_0, \
+ stp2_1, stp2_2, stp2_3) \
+ \
+ stp2_4 = _mm_adds_epi16(stp1_4, stp1_5); \
+ stp2_5 = _mm_subs_epi16(stp1_4, stp1_5); \
+ stp2_6 = _mm_subs_epi16(stp1_7, stp1_6); \
+ stp2_7 = _mm_adds_epi16(stp1_7, stp1_6); \
+ } \
+ \
+ /* Stage3 */ \
+ { \
+ const __m128i lo_56 = _mm_unpacklo_epi16(stp2_6, stp2_5); \
+ const __m128i hi_56 = _mm_unpackhi_epi16(stp2_6, stp2_5); \
+ \
+ stp1_0 = _mm_adds_epi16(stp2_0, stp2_3); \
+ stp1_1 = _mm_adds_epi16(stp2_1, stp2_2); \
+ stp1_2 = _mm_subs_epi16(stp2_1, stp2_2); \
+ stp1_3 = _mm_subs_epi16(stp2_0, stp2_3); \
+ \
+ tmp0 = _mm_madd_epi16(lo_56, stg2_1); \
+ tmp1 = _mm_madd_epi16(hi_56, stg2_1); \
+ tmp2 = _mm_madd_epi16(lo_56, stg2_0); \
+ tmp3 = _mm_madd_epi16(hi_56, stg2_0); \
+ \
+ tmp0 = _mm_add_epi32(tmp0, rounding); \
+ tmp1 = _mm_add_epi32(tmp1, rounding); \
+ tmp2 = _mm_add_epi32(tmp2, rounding); \
+ tmp3 = _mm_add_epi32(tmp3, rounding); \
+ \
+ tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
+ tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
+ tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
+ tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
+ \
+ stp1_5 = _mm_packs_epi32(tmp0, tmp1); \
+ stp1_6 = _mm_packs_epi32(tmp2, tmp3); \
+ } \
+ \
+ /* Stage4 */ \
+ in0 = _mm_adds_epi16(stp1_0, stp2_7); \
+ in1 = _mm_adds_epi16(stp1_1, stp1_6); \
+ in2 = _mm_adds_epi16(stp1_2, stp1_5); \
+ in3 = _mm_adds_epi16(stp1_3, stp2_4); \
+ in4 = _mm_subs_epi16(stp1_3, stp2_4); \
+ in5 = _mm_subs_epi16(stp1_2, stp1_5); \
+ in6 = _mm_subs_epi16(stp1_1, stp1_6); \
+ in7 = _mm_subs_epi16(stp1_0, stp2_7);
+
+#define RECON_AND_STORE(dest, in_x) \
+ { \
+ __m128i d0 = _mm_loadl_epi64((__m128i *)(dest)); \
+ d0 = _mm_unpacklo_epi8(d0, zero); \
+ in_x = _mm_add_epi16(in_x, d0); \
+ in_x = _mm_packus_epi16(in_x, in_x); \
+ _mm_storel_epi64((__m128i *)(dest), in_x); \
+ dest += stride; \
+ }
+
+void vp9_short_idct8x8_add_sse2(int16_t *input, uint8_t *dest, int stride) {
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ const __m128i final_rounding = _mm_set1_epi16(1<<4);
+ const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
+ const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64);
+ const __m128i stg1_2 = pair_set_epi16(-cospi_20_64, cospi_12_64);
+ const __m128i stg1_3 = pair_set_epi16(cospi_12_64, cospi_20_64);
+ const __m128i stg2_0 = pair_set_epi16(cospi_16_64, cospi_16_64);
+ const __m128i stg2_1 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i stg2_2 = pair_set_epi16(cospi_24_64, -cospi_8_64);
+ const __m128i stg2_3 = pair_set_epi16(cospi_8_64, cospi_24_64);
+
+ __m128i in0, in1, in2, in3, in4, in5, in6, in7;
+ __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7;
+ __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int i;
+
+ // Load input data.
+ in0 = _mm_load_si128((__m128i *)input);
+ in1 = _mm_load_si128((__m128i *)(input + 8 * 1));
+ in2 = _mm_load_si128((__m128i *)(input + 8 * 2));
+ in3 = _mm_load_si128((__m128i *)(input + 8 * 3));
+ in4 = _mm_load_si128((__m128i *)(input + 8 * 4));
+ in5 = _mm_load_si128((__m128i *)(input + 8 * 5));
+ in6 = _mm_load_si128((__m128i *)(input + 8 * 6));
+ in7 = _mm_load_si128((__m128i *)(input + 8 * 7));
+
+ // 2-D
+ for (i = 0; i < 2; i++) {
+ // 8x8 Transpose is copied from vp9_short_fdct8x8_sse2()
+ TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+ in4, in5, in6, in7);
+
+ // 4-stage 1D idct8x8
+ IDCT8x8_1D
+ }
+
+ // Final rounding and shift
+ in0 = _mm_adds_epi16(in0, final_rounding);
+ in1 = _mm_adds_epi16(in1, final_rounding);
+ in2 = _mm_adds_epi16(in2, final_rounding);
+ in3 = _mm_adds_epi16(in3, final_rounding);
+ in4 = _mm_adds_epi16(in4, final_rounding);
+ in5 = _mm_adds_epi16(in5, final_rounding);
+ in6 = _mm_adds_epi16(in6, final_rounding);
+ in7 = _mm_adds_epi16(in7, final_rounding);
+
+ in0 = _mm_srai_epi16(in0, 5);
+ in1 = _mm_srai_epi16(in1, 5);
+ in2 = _mm_srai_epi16(in2, 5);
+ in3 = _mm_srai_epi16(in3, 5);
+ in4 = _mm_srai_epi16(in4, 5);
+ in5 = _mm_srai_epi16(in5, 5);
+ in6 = _mm_srai_epi16(in6, 5);
+ in7 = _mm_srai_epi16(in7, 5);
+
+ RECON_AND_STORE(dest, in0);
+ RECON_AND_STORE(dest, in1);
+ RECON_AND_STORE(dest, in2);
+ RECON_AND_STORE(dest, in3);
+ RECON_AND_STORE(dest, in4);
+ RECON_AND_STORE(dest, in5);
+ RECON_AND_STORE(dest, in6);
+ RECON_AND_STORE(dest, in7);
+}
+
+void vp9_short_idct10_8x8_add_sse2(int16_t *input, uint8_t *dest, int stride) {
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ const __m128i final_rounding = _mm_set1_epi16(1<<4);
+ const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
+ const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64);
+ const __m128i stg1_2 = pair_set_epi16(-cospi_20_64, cospi_12_64);
+ const __m128i stg1_3 = pair_set_epi16(cospi_12_64, cospi_20_64);
+ const __m128i stg2_0 = pair_set_epi16(cospi_16_64, cospi_16_64);
+ const __m128i stg2_1 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i stg2_2 = pair_set_epi16(cospi_24_64, -cospi_8_64);
+ const __m128i stg2_3 = pair_set_epi16(cospi_8_64, cospi_24_64);
+ const __m128i stg3_0 = pair_set_epi16(-cospi_16_64, cospi_16_64);
+
+ __m128i in0, in1, in2, in3, in4, in5, in6, in7;
+ __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7;
+ __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+
+ // Rows. Load 4-row input data.
+ in0 = _mm_load_si128((__m128i *)input);
+ in1 = _mm_load_si128((__m128i *)(input + 8 * 1));
+ in2 = _mm_load_si128((__m128i *)(input + 8 * 2));
+ in3 = _mm_load_si128((__m128i *)(input + 8 * 3));
+
+ // 8x4 Transpose
+ TRANSPOSE_8X4(in0, in1, in2, in3, in0, in1, in2, in3)
+
+ // Stage1
+ {
+ const __m128i lo_17 = _mm_unpackhi_epi16(in0, in3);
+ const __m128i lo_35 = _mm_unpackhi_epi16(in1, in2);
+
+ tmp0 = _mm_madd_epi16(lo_17, stg1_0);
+ tmp2 = _mm_madd_epi16(lo_17, stg1_1);
+ tmp4 = _mm_madd_epi16(lo_35, stg1_2);
+ tmp6 = _mm_madd_epi16(lo_35, stg1_3);
+
+ tmp0 = _mm_add_epi32(tmp0, rounding);
+ tmp2 = _mm_add_epi32(tmp2, rounding);
+ tmp4 = _mm_add_epi32(tmp4, rounding);
+ tmp6 = _mm_add_epi32(tmp6, rounding);
+ tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
+ tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
+ tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
+ tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS);
+
+ stp1_4 = _mm_packs_epi32(tmp0, zero);
+ stp1_7 = _mm_packs_epi32(tmp2, zero);
+ stp1_5 = _mm_packs_epi32(tmp4, zero);
+ stp1_6 = _mm_packs_epi32(tmp6, zero);
+ }
+
+ // Stage2
+ {
+ const __m128i lo_04 = _mm_unpacklo_epi16(in0, in2);
+ const __m128i lo_26 = _mm_unpacklo_epi16(in1, in3);
+
+ tmp0 = _mm_madd_epi16(lo_04, stg2_0);
+ tmp2 = _mm_madd_epi16(lo_04, stg2_1);
+ tmp4 = _mm_madd_epi16(lo_26, stg2_2);
+ tmp6 = _mm_madd_epi16(lo_26, stg2_3);
+
+ tmp0 = _mm_add_epi32(tmp0, rounding);
+ tmp2 = _mm_add_epi32(tmp2, rounding);
+ tmp4 = _mm_add_epi32(tmp4, rounding);
+ tmp6 = _mm_add_epi32(tmp6, rounding);
+ tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
+ tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
+ tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
+ tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS);
+
+ stp2_0 = _mm_packs_epi32(tmp0, zero);
+ stp2_1 = _mm_packs_epi32(tmp2, zero);
+ stp2_2 = _mm_packs_epi32(tmp4, zero);
+ stp2_3 = _mm_packs_epi32(tmp6, zero);
+
+ stp2_4 = _mm_adds_epi16(stp1_4, stp1_5);
+ stp2_5 = _mm_subs_epi16(stp1_4, stp1_5);
+ stp2_6 = _mm_subs_epi16(stp1_7, stp1_6);
+ stp2_7 = _mm_adds_epi16(stp1_7, stp1_6);
+ }
+
+ // Stage3
+ {
+ const __m128i lo_56 = _mm_unpacklo_epi16(stp2_5, stp2_6);
+ stp1_0 = _mm_adds_epi16(stp2_0, stp2_3);
+ stp1_1 = _mm_adds_epi16(stp2_1, stp2_2);
+ stp1_2 = _mm_subs_epi16(stp2_1, stp2_2);
+ stp1_3 = _mm_subs_epi16(stp2_0, stp2_3);
+
+ tmp0 = _mm_madd_epi16(lo_56, stg3_0);
+ tmp2 = _mm_madd_epi16(lo_56, stg2_0); // stg3_1 = stg2_0
+
+ tmp0 = _mm_add_epi32(tmp0, rounding);
+ tmp2 = _mm_add_epi32(tmp2, rounding);
+ tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
+ tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
+
+ stp1_5 = _mm_packs_epi32(tmp0, zero);
+ stp1_6 = _mm_packs_epi32(tmp2, zero);
+ }
+
+ // Stage4
+ in0 = _mm_adds_epi16(stp1_0, stp2_7);
+ in1 = _mm_adds_epi16(stp1_1, stp1_6);
+ in2 = _mm_adds_epi16(stp1_2, stp1_5);
+ in3 = _mm_adds_epi16(stp1_3, stp2_4);
+ in4 = _mm_subs_epi16(stp1_3, stp2_4);
+ in5 = _mm_subs_epi16(stp1_2, stp1_5);
+ in6 = _mm_subs_epi16(stp1_1, stp1_6);
+ in7 = _mm_subs_epi16(stp1_0, stp2_7);
+
+ // Columns. 4x8 Transpose
+ TRANSPOSE_4X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+ in4, in5, in6, in7)
+
+ // 1D idct8x8
+ IDCT8x8_1D
+
+ // Final rounding and shift
+ in0 = _mm_adds_epi16(in0, final_rounding);
+ in1 = _mm_adds_epi16(in1, final_rounding);
+ in2 = _mm_adds_epi16(in2, final_rounding);
+ in3 = _mm_adds_epi16(in3, final_rounding);
+ in4 = _mm_adds_epi16(in4, final_rounding);
+ in5 = _mm_adds_epi16(in5, final_rounding);
+ in6 = _mm_adds_epi16(in6, final_rounding);
+ in7 = _mm_adds_epi16(in7, final_rounding);
+
+ in0 = _mm_srai_epi16(in0, 5);
+ in1 = _mm_srai_epi16(in1, 5);
+ in2 = _mm_srai_epi16(in2, 5);
+ in3 = _mm_srai_epi16(in3, 5);
+ in4 = _mm_srai_epi16(in4, 5);
+ in5 = _mm_srai_epi16(in5, 5);
+ in6 = _mm_srai_epi16(in6, 5);
+ in7 = _mm_srai_epi16(in7, 5);
+
+ RECON_AND_STORE(dest, in0);
+ RECON_AND_STORE(dest, in1);
+ RECON_AND_STORE(dest, in2);
+ RECON_AND_STORE(dest, in3);
+ RECON_AND_STORE(dest, in4);
+ RECON_AND_STORE(dest, in5);
+ RECON_AND_STORE(dest, in6);
+ RECON_AND_STORE(dest, in7);
+}
+
+#define IDCT16x16_1D \
+ /* Stage2 */ \
+ { \
+ const __m128i lo_1_15 = _mm_unpacklo_epi16(in1, in15); \
+ const __m128i hi_1_15 = _mm_unpackhi_epi16(in1, in15); \
+ const __m128i lo_9_7 = _mm_unpacklo_epi16(in9, in7); \
+ const __m128i hi_9_7 = _mm_unpackhi_epi16(in9, in7); \
+ const __m128i lo_5_11 = _mm_unpacklo_epi16(in5, in11); \
+ const __m128i hi_5_11 = _mm_unpackhi_epi16(in5, in11); \
+ const __m128i lo_13_3 = _mm_unpacklo_epi16(in13, in3); \
+ const __m128i hi_13_3 = _mm_unpackhi_epi16(in13, in3); \
+ \
+ MULTIPLICATION_AND_ADD(lo_1_15, hi_1_15, lo_9_7, hi_9_7, \
+ stg2_0, stg2_1, stg2_2, stg2_3, \
+ stp2_8, stp2_15, stp2_9, stp2_14) \
+ \
+ MULTIPLICATION_AND_ADD(lo_5_11, hi_5_11, lo_13_3, hi_13_3, \
+ stg2_4, stg2_5, stg2_6, stg2_7, \
+ stp2_10, stp2_13, stp2_11, stp2_12) \
+ } \
+ \
+ /* Stage3 */ \
+ { \
+ const __m128i lo_2_14 = _mm_unpacklo_epi16(in2, in14); \
+ const __m128i hi_2_14 = _mm_unpackhi_epi16(in2, in14); \
+ const __m128i lo_10_6 = _mm_unpacklo_epi16(in10, in6); \
+ const __m128i hi_10_6 = _mm_unpackhi_epi16(in10, in6); \
+ \
+ MULTIPLICATION_AND_ADD(lo_2_14, hi_2_14, lo_10_6, hi_10_6, \
+ stg3_0, stg3_1, stg3_2, stg3_3, \
+ stp1_4, stp1_7, stp1_5, stp1_6) \
+ \
+ stp1_8_0 = _mm_add_epi16(stp2_8, stp2_9); \
+ stp1_9 = _mm_sub_epi16(stp2_8, stp2_9); \
+ stp1_10 = _mm_sub_epi16(stp2_11, stp2_10); \
+ stp1_11 = _mm_add_epi16(stp2_11, stp2_10); \
+ \
+ stp1_12_0 = _mm_add_epi16(stp2_12, stp2_13); \
+ stp1_13 = _mm_sub_epi16(stp2_12, stp2_13); \
+ stp1_14 = _mm_sub_epi16(stp2_15, stp2_14); \
+ stp1_15 = _mm_add_epi16(stp2_15, stp2_14); \
+ } \
+ \
+ /* Stage4 */ \
+ { \
+ const __m128i lo_0_8 = _mm_unpacklo_epi16(in0, in8); \
+ const __m128i hi_0_8 = _mm_unpackhi_epi16(in0, in8); \
+ const __m128i lo_4_12 = _mm_unpacklo_epi16(in4, in12); \
+ const __m128i hi_4_12 = _mm_unpackhi_epi16(in4, in12); \
+ \
+ const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14); \
+ const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14); \
+ const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \
+ const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \
+ \
+ MULTIPLICATION_AND_ADD(lo_0_8, hi_0_8, lo_4_12, hi_4_12, \
+ stg4_0, stg4_1, stg4_2, stg4_3, \
+ stp2_0, stp2_1, stp2_2, stp2_3) \
+ \
+ stp2_4 = _mm_add_epi16(stp1_4, stp1_5); \
+ stp2_5 = _mm_sub_epi16(stp1_4, stp1_5); \
+ stp2_6 = _mm_sub_epi16(stp1_7, stp1_6); \
+ stp2_7 = _mm_add_epi16(stp1_7, stp1_6); \
+ \
+ MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, \
+ stg4_4, stg4_5, stg4_6, stg4_7, \
+ stp2_9, stp2_14, stp2_10, stp2_13) \
+ } \
+ \
+ /* Stage5 */ \
+ { \
+ const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); \
+ const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5); \
+ \
+ stp1_0 = _mm_add_epi16(stp2_0, stp2_3); \
+ stp1_1 = _mm_add_epi16(stp2_1, stp2_2); \
+ stp1_2 = _mm_sub_epi16(stp2_1, stp2_2); \
+ stp1_3 = _mm_sub_epi16(stp2_0, stp2_3); \
+ \
+ tmp0 = _mm_madd_epi16(lo_6_5, stg4_1); \
+ tmp1 = _mm_madd_epi16(hi_6_5, stg4_1); \
+ tmp2 = _mm_madd_epi16(lo_6_5, stg4_0); \
+ tmp3 = _mm_madd_epi16(hi_6_5, stg4_0); \
+ \
+ tmp0 = _mm_add_epi32(tmp0, rounding); \
+ tmp1 = _mm_add_epi32(tmp1, rounding); \
+ tmp2 = _mm_add_epi32(tmp2, rounding); \
+ tmp3 = _mm_add_epi32(tmp3, rounding); \
+ \
+ tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
+ tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
+ tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
+ tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
+ \
+ stp1_5 = _mm_packs_epi32(tmp0, tmp1); \
+ stp1_6 = _mm_packs_epi32(tmp2, tmp3); \
+ \
+ stp1_8 = _mm_add_epi16(stp1_8_0, stp1_11); \
+ stp1_9 = _mm_add_epi16(stp2_9, stp2_10); \
+ stp1_10 = _mm_sub_epi16(stp2_9, stp2_10); \
+ stp1_11 = _mm_sub_epi16(stp1_8_0, stp1_11); \
+ \
+ stp1_12 = _mm_sub_epi16(stp1_15, stp1_12_0); \
+ stp1_13 = _mm_sub_epi16(stp2_14, stp2_13); \
+ stp1_14 = _mm_add_epi16(stp2_14, stp2_13); \
+ stp1_15 = _mm_add_epi16(stp1_15, stp1_12_0); \
+ } \
+ \
+ /* Stage6 */ \
+ { \
+ const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \
+ const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \
+ const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); \
+ const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12); \
+ \
+ stp2_0 = _mm_add_epi16(stp1_0, stp2_7); \
+ stp2_1 = _mm_add_epi16(stp1_1, stp1_6); \
+ stp2_2 = _mm_add_epi16(stp1_2, stp1_5); \
+ stp2_3 = _mm_add_epi16(stp1_3, stp2_4); \
+ stp2_4 = _mm_sub_epi16(stp1_3, stp2_4); \
+ stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); \
+ stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); \
+ stp2_7 = _mm_sub_epi16(stp1_0, stp2_7); \
+ \
+ MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, \
+ stg6_0, stg4_0, stg6_0, stg4_0, \
+ stp2_10, stp2_13, stp2_11, stp2_12) \
+ }
+
+void vp9_short_idct16x16_add_sse2(int16_t *input, uint8_t *dest, int stride) {
+ const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ const __m128i final_rounding = _mm_set1_epi16(1<<5);
+ const __m128i zero = _mm_setzero_si128();
+
+ const __m128i stg2_0 = pair_set_epi16(cospi_30_64, -cospi_2_64);
+ const __m128i stg2_1 = pair_set_epi16(cospi_2_64, cospi_30_64);
+ const __m128i stg2_2 = pair_set_epi16(cospi_14_64, -cospi_18_64);
+ const __m128i stg2_3 = pair_set_epi16(cospi_18_64, cospi_14_64);
+ const __m128i stg2_4 = pair_set_epi16(cospi_22_64, -cospi_10_64);
+ const __m128i stg2_5 = pair_set_epi16(cospi_10_64, cospi_22_64);
+ const __m128i stg2_6 = pair_set_epi16(cospi_6_64, -cospi_26_64);
+ const __m128i stg2_7 = pair_set_epi16(cospi_26_64, cospi_6_64);
+
+ const __m128i stg3_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
+ const __m128i stg3_1 = pair_set_epi16(cospi_4_64, cospi_28_64);
+ const __m128i stg3_2 = pair_set_epi16(cospi_12_64, -cospi_20_64);
+ const __m128i stg3_3 = pair_set_epi16(cospi_20_64, cospi_12_64);
+
+ const __m128i stg4_0 = pair_set_epi16(cospi_16_64, cospi_16_64);
+ const __m128i stg4_1 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i stg4_2 = pair_set_epi16(cospi_24_64, -cospi_8_64);
+ const __m128i stg4_3 = pair_set_epi16(cospi_8_64, cospi_24_64);
+ const __m128i stg4_4 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m128i stg4_5 = pair_set_epi16(cospi_24_64, cospi_8_64);
+ const __m128i stg4_6 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
+ const __m128i stg4_7 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+
+ const __m128i stg6_0 = pair_set_epi16(-cospi_16_64, cospi_16_64);
+
+ __m128i in0 = zero, in1 = zero, in2 = zero, in3 = zero, in4 = zero,
+ in5 = zero, in6 = zero, in7 = zero, in8 = zero, in9 = zero,
+ in10 = zero, in11 = zero, in12 = zero, in13 = zero,
+ in14 = zero, in15 = zero;
+ __m128i l0 = zero, l1 = zero, l2 = zero, l3 = zero, l4 = zero, l5 = zero,
+ l6 = zero, l7 = zero, l8 = zero, l9 = zero, l10 = zero, l11 = zero,
+ l12 = zero, l13 = zero, l14 = zero, l15 = zero;
+ __m128i r0 = zero, r1 = zero, r2 = zero, r3 = zero, r4 = zero, r5 = zero,
+ r6 = zero, r7 = zero, r8 = zero, r9 = zero, r10 = zero, r11 = zero,
+ r12 = zero, r13 = zero, r14 = zero, r15 = zero;
+ __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7,
+ stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15,
+ stp1_8_0, stp1_12_0;
+ __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7,
+ stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int i;
+
+ // We work on a 8x16 block each time, and loop 4 times for 2-D 16x16 idct.
+ for (i = 0; i < 4; i++) {
+ // 1-D idct
+ if (i < 2) {
+ if (i == 1) input += 128;
+
+ // Load input data.
+ in0 = _mm_load_si128((__m128i *)input);
+ in8 = _mm_load_si128((__m128i *)(input + 8 * 1));
+ in1 = _mm_load_si128((__m128i *)(input + 8 * 2));
+ in9 = _mm_load_si128((__m128i *)(input + 8 * 3));
+ in2 = _mm_load_si128((__m128i *)(input + 8 * 4));
+ in10 = _mm_load_si128((__m128i *)(input + 8 * 5));
+ in3 = _mm_load_si128((__m128i *)(input + 8 * 6));
+ in11 = _mm_load_si128((__m128i *)(input + 8 * 7));
+ in4 = _mm_load_si128((__m128i *)(input + 8 * 8));
+ in12 = _mm_load_si128((__m128i *)(input + 8 * 9));
+ in5 = _mm_load_si128((__m128i *)(input + 8 * 10));
+ in13 = _mm_load_si128((__m128i *)(input + 8 * 11));
+ in6 = _mm_load_si128((__m128i *)(input + 8 * 12));
+ in14 = _mm_load_si128((__m128i *)(input + 8 * 13));
+ in7 = _mm_load_si128((__m128i *)(input + 8 * 14));
+ in15 = _mm_load_si128((__m128i *)(input + 8 * 15));
+
+ TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+ in4, in5, in6, in7);
+ TRANSPOSE_8X8(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9,
+ in10, in11, in12, in13, in14, in15);
+ }
+
+ if (i == 2) {
+ TRANSPOSE_8X8(l0, l1, l2, l3, l4, l5, l6, l7, in0, in1, in2, in3, in4,
+ in5, in6, in7);
+ TRANSPOSE_8X8(r0, r1, r2, r3, r4, r5, r6, r7, in8, in9, in10, in11, in12,
+ in13, in14, in15);
+ }
+
+ if (i == 3) {
+ TRANSPOSE_8X8(l8, l9, l10, l11, l12, l13, l14, l15, in0, in1, in2, in3,
+ in4, in5, in6, in7);
+ TRANSPOSE_8X8(r8, r9, r10, r11, r12, r13, r14, r15, in8, in9, in10, in11,
+ in12, in13, in14, in15);
+ }
+
+ IDCT16x16_1D
+
+ // Stage7
+ if (i == 0) {
+ // Left 8x16
+ l0 = _mm_add_epi16(stp2_0, stp1_15);
+ l1 = _mm_add_epi16(stp2_1, stp1_14);
+ l2 = _mm_add_epi16(stp2_2, stp2_13);
+ l3 = _mm_add_epi16(stp2_3, stp2_12);
+ l4 = _mm_add_epi16(stp2_4, stp2_11);
+ l5 = _mm_add_epi16(stp2_5, stp2_10);
+ l6 = _mm_add_epi16(stp2_6, stp1_9);
+ l7 = _mm_add_epi16(stp2_7, stp1_8);
+ l8 = _mm_sub_epi16(stp2_7, stp1_8);
+ l9 = _mm_sub_epi16(stp2_6, stp1_9);
+ l10 = _mm_sub_epi16(stp2_5, stp2_10);
+ l11 = _mm_sub_epi16(stp2_4, stp2_11);
+ l12 = _mm_sub_epi16(stp2_3, stp2_12);
+ l13 = _mm_sub_epi16(stp2_2, stp2_13);
+ l14 = _mm_sub_epi16(stp2_1, stp1_14);
+ l15 = _mm_sub_epi16(stp2_0, stp1_15);
+ } else if (i == 1) {
+ // Right 8x16
+ r0 = _mm_add_epi16(stp2_0, stp1_15);
+ r1 = _mm_add_epi16(stp2_1, stp1_14);
+ r2 = _mm_add_epi16(stp2_2, stp2_13);
+ r3 = _mm_add_epi16(stp2_3, stp2_12);
+ r4 = _mm_add_epi16(stp2_4, stp2_11);
+ r5 = _mm_add_epi16(stp2_5, stp2_10);
+ r6 = _mm_add_epi16(stp2_6, stp1_9);
+ r7 = _mm_add_epi16(stp2_7, stp1_8);
+ r8 = _mm_sub_epi16(stp2_7, stp1_8);
+ r9 = _mm_sub_epi16(stp2_6, stp1_9);
+ r10 = _mm_sub_epi16(stp2_5, stp2_10);
+ r11 = _mm_sub_epi16(stp2_4, stp2_11);
+ r12 = _mm_sub_epi16(stp2_3, stp2_12);
+ r13 = _mm_sub_epi16(stp2_2, stp2_13);
+ r14 = _mm_sub_epi16(stp2_1, stp1_14);
+ r15 = _mm_sub_epi16(stp2_0, stp1_15);
+ } else {
+ // 2-D
+ in0 = _mm_add_epi16(stp2_0, stp1_15);
+ in1 = _mm_add_epi16(stp2_1, stp1_14);
+ in2 = _mm_add_epi16(stp2_2, stp2_13);
+ in3 = _mm_add_epi16(stp2_3, stp2_12);
+ in4 = _mm_add_epi16(stp2_4, stp2_11);
+ in5 = _mm_add_epi16(stp2_5, stp2_10);
+ in6 = _mm_add_epi16(stp2_6, stp1_9);
+ in7 = _mm_add_epi16(stp2_7, stp1_8);
+ in8 = _mm_sub_epi16(stp2_7, stp1_8);
+ in9 = _mm_sub_epi16(stp2_6, stp1_9);
+ in10 = _mm_sub_epi16(stp2_5, stp2_10);
+ in11 = _mm_sub_epi16(stp2_4, stp2_11);
+ in12 = _mm_sub_epi16(stp2_3, stp2_12);
+ in13 = _mm_sub_epi16(stp2_2, stp2_13);
+ in14 = _mm_sub_epi16(stp2_1, stp1_14);
+ in15 = _mm_sub_epi16(stp2_0, stp1_15);
+
+ // Final rounding and shift
+ in0 = _mm_adds_epi16(in0, final_rounding);
+ in1 = _mm_adds_epi16(in1, final_rounding);
+ in2 = _mm_adds_epi16(in2, final_rounding);
+ in3 = _mm_adds_epi16(in3, final_rounding);
+ in4 = _mm_adds_epi16(in4, final_rounding);
+ in5 = _mm_adds_epi16(in5, final_rounding);
+ in6 = _mm_adds_epi16(in6, final_rounding);
+ in7 = _mm_adds_epi16(in7, final_rounding);
+ in8 = _mm_adds_epi16(in8, final_rounding);
+ in9 = _mm_adds_epi16(in9, final_rounding);
+ in10 = _mm_adds_epi16(in10, final_rounding);
+ in11 = _mm_adds_epi16(in11, final_rounding);
+ in12 = _mm_adds_epi16(in12, final_rounding);
+ in13 = _mm_adds_epi16(in13, final_rounding);
+ in14 = _mm_adds_epi16(in14, final_rounding);
+ in15 = _mm_adds_epi16(in15, final_rounding);
+
+ in0 = _mm_srai_epi16(in0, 6);
+ in1 = _mm_srai_epi16(in1, 6);
+ in2 = _mm_srai_epi16(in2, 6);
+ in3 = _mm_srai_epi16(in3, 6);
+ in4 = _mm_srai_epi16(in4, 6);
+ in5 = _mm_srai_epi16(in5, 6);
+ in6 = _mm_srai_epi16(in6, 6);
+ in7 = _mm_srai_epi16(in7, 6);
+ in8 = _mm_srai_epi16(in8, 6);
+ in9 = _mm_srai_epi16(in9, 6);
+ in10 = _mm_srai_epi16(in10, 6);
+ in11 = _mm_srai_epi16(in11, 6);
+ in12 = _mm_srai_epi16(in12, 6);
+ in13 = _mm_srai_epi16(in13, 6);
+ in14 = _mm_srai_epi16(in14, 6);
+ in15 = _mm_srai_epi16(in15, 6);
+
+ RECON_AND_STORE(dest, in0);
+ RECON_AND_STORE(dest, in1);
+ RECON_AND_STORE(dest, in2);
+ RECON_AND_STORE(dest, in3);
+ RECON_AND_STORE(dest, in4);
+ RECON_AND_STORE(dest, in5);
+ RECON_AND_STORE(dest, in6);
+ RECON_AND_STORE(dest, in7);
+ RECON_AND_STORE(dest, in8);
+ RECON_AND_STORE(dest, in9);
+ RECON_AND_STORE(dest, in10);
+ RECON_AND_STORE(dest, in11);
+ RECON_AND_STORE(dest, in12);
+ RECON_AND_STORE(dest, in13);
+ RECON_AND_STORE(dest, in14);
+ RECON_AND_STORE(dest, in15);
+
+ dest += 8 - (stride * 16);
+ }
+ }
+}
+
+void vp9_short_idct10_16x16_add_sse2(int16_t *input, uint8_t *dest,
+ int stride) {
+ const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ const __m128i final_rounding = _mm_set1_epi16(1<<5);
+ const __m128i zero = _mm_setzero_si128();
+
+ const __m128i stg2_0 = pair_set_epi16(cospi_30_64, -cospi_2_64);
+ const __m128i stg2_1 = pair_set_epi16(cospi_2_64, cospi_30_64);
+ const __m128i stg2_2 = pair_set_epi16(cospi_14_64, -cospi_18_64);
+ const __m128i stg2_3 = pair_set_epi16(cospi_18_64, cospi_14_64);
+ const __m128i stg2_4 = pair_set_epi16(cospi_22_64, -cospi_10_64);
+ const __m128i stg2_5 = pair_set_epi16(cospi_10_64, cospi_22_64);
+ const __m128i stg2_6 = pair_set_epi16(cospi_6_64, -cospi_26_64);
+ const __m128i stg2_7 = pair_set_epi16(cospi_26_64, cospi_6_64);
+
+ const __m128i stg3_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
+ const __m128i stg3_1 = pair_set_epi16(cospi_4_64, cospi_28_64);
+ const __m128i stg3_2 = pair_set_epi16(cospi_12_64, -cospi_20_64);
+ const __m128i stg3_3 = pair_set_epi16(cospi_20_64, cospi_12_64);
+
+ const __m128i stg4_0 = pair_set_epi16(cospi_16_64, cospi_16_64);
+ const __m128i stg4_1 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i stg4_2 = pair_set_epi16(cospi_24_64, -cospi_8_64);
+ const __m128i stg4_3 = pair_set_epi16(cospi_8_64, cospi_24_64);
+ const __m128i stg4_4 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m128i stg4_5 = pair_set_epi16(cospi_24_64, cospi_8_64);
+ const __m128i stg4_6 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
+ const __m128i stg4_7 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+
+ const __m128i stg6_0 = pair_set_epi16(-cospi_16_64, cospi_16_64);
+
+ __m128i in0 = zero, in1 = zero, in2 = zero, in3 = zero, in4 = zero,
+ in5 = zero, in6 = zero, in7 = zero, in8 = zero, in9 = zero,
+ in10 = zero, in11 = zero, in12 = zero, in13 = zero,
+ in14 = zero, in15 = zero;
+ __m128i l0 = zero, l1 = zero, l2 = zero, l3 = zero, l4 = zero, l5 = zero,
+ l6 = zero, l7 = zero, l8 = zero, l9 = zero, l10 = zero, l11 = zero,
+ l12 = zero, l13 = zero, l14 = zero, l15 = zero;
+
+ __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7,
+ stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15,
+ stp1_8_0, stp1_12_0;
+ __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7,
+ stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int i;
+ // 1-D idct. Load input data.
+ in0 = _mm_load_si128((__m128i *)input);
+ in8 = _mm_load_si128((__m128i *)(input + 8 * 1));
+ in1 = _mm_load_si128((__m128i *)(input + 8 * 2));
+ in9 = _mm_load_si128((__m128i *)(input + 8 * 3));
+ in2 = _mm_load_si128((__m128i *)(input + 8 * 4));
+ in10 = _mm_load_si128((__m128i *)(input + 8 * 5));
+ in3 = _mm_load_si128((__m128i *)(input + 8 * 6));
+ in11 = _mm_load_si128((__m128i *)(input + 8 * 7));
+
+ TRANSPOSE_8X4(in0, in1, in2, in3, in0, in1, in2, in3);
+ TRANSPOSE_8X4(in8, in9, in10, in11, in8, in9, in10, in11);
+
+ // Stage2
+ {
+ const __m128i lo_1_15 = _mm_unpackhi_epi16(in0, in11);
+ const __m128i lo_9_7 = _mm_unpackhi_epi16(in8, in3);
+ const __m128i lo_5_11 = _mm_unpackhi_epi16(in2, in9);
+ const __m128i lo_13_3 = _mm_unpackhi_epi16(in10, in1);
+
+ tmp0 = _mm_madd_epi16(lo_1_15, stg2_0);
+ tmp2 = _mm_madd_epi16(lo_1_15, stg2_1);
+ tmp4 = _mm_madd_epi16(lo_9_7, stg2_2);
+ tmp6 = _mm_madd_epi16(lo_9_7, stg2_3);
+ tmp1 = _mm_madd_epi16(lo_5_11, stg2_4);
+ tmp3 = _mm_madd_epi16(lo_5_11, stg2_5);
+ tmp5 = _mm_madd_epi16(lo_13_3, stg2_6);
+ tmp7 = _mm_madd_epi16(lo_13_3, stg2_7);
+
+ tmp0 = _mm_add_epi32(tmp0, rounding);
+ tmp2 = _mm_add_epi32(tmp2, rounding);
+ tmp4 = _mm_add_epi32(tmp4, rounding);
+ tmp6 = _mm_add_epi32(tmp6, rounding);
+ tmp1 = _mm_add_epi32(tmp1, rounding);
+ tmp3 = _mm_add_epi32(tmp3, rounding);
+ tmp5 = _mm_add_epi32(tmp5, rounding);
+ tmp7 = _mm_add_epi32(tmp7, rounding);
+
+ tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
+ tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
+ tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
+ tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS);
+ tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);
+ tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
+ tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS);
+ tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS);
+
+ stp2_8 = _mm_packs_epi32(tmp0, zero);
+ stp2_15 = _mm_packs_epi32(tmp2, zero);
+ stp2_9 = _mm_packs_epi32(tmp4, zero);
+ stp2_14 = _mm_packs_epi32(tmp6, zero);
+
+ stp2_10 = _mm_packs_epi32(tmp1, zero);
+ stp2_13 = _mm_packs_epi32(tmp3, zero);
+ stp2_11 = _mm_packs_epi32(tmp5, zero);
+ stp2_12 = _mm_packs_epi32(tmp7, zero);
+ }
+
+ // Stage3
+ {
+ const __m128i lo_2_14 = _mm_unpacklo_epi16(in1, in11);
+ const __m128i lo_10_6 = _mm_unpacklo_epi16(in9, in3);
+
+ tmp0 = _mm_madd_epi16(lo_2_14, stg3_0);
+ tmp2 = _mm_madd_epi16(lo_2_14, stg3_1);
+ tmp4 = _mm_madd_epi16(lo_10_6, stg3_2);
+ tmp6 = _mm_madd_epi16(lo_10_6, stg3_3);
+
+ tmp0 = _mm_add_epi32(tmp0, rounding);
+ tmp2 = _mm_add_epi32(tmp2, rounding);
+ tmp4 = _mm_add_epi32(tmp4, rounding);
+ tmp6 = _mm_add_epi32(tmp6, rounding);
+
+ tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
+ tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
+ tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
+ tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS);
+
+ stp1_4 = _mm_packs_epi32(tmp0, zero);
+ stp1_7 = _mm_packs_epi32(tmp2, zero);
+ stp1_5 = _mm_packs_epi32(tmp4, zero);
+ stp1_6 = _mm_packs_epi32(tmp6, zero);
+
+ stp1_8_0 = _mm_add_epi16(stp2_8, stp2_9);
+ stp1_9 = _mm_sub_epi16(stp2_8, stp2_9);
+ stp1_10 = _mm_sub_epi16(stp2_11, stp2_10);
+ stp1_11 = _mm_add_epi16(stp2_11, stp2_10);
+
+ stp1_12_0 = _mm_add_epi16(stp2_12, stp2_13);
+ stp1_13 = _mm_sub_epi16(stp2_12, stp2_13);
+ stp1_14 = _mm_sub_epi16(stp2_15, stp2_14);
+ stp1_15 = _mm_add_epi16(stp2_15, stp2_14);
+ }
+
+ // Stage4
+ {
+ const __m128i lo_0_8 = _mm_unpacklo_epi16(in0, in8);
+ const __m128i lo_4_12 = _mm_unpacklo_epi16(in2, in10);
+ const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14);
+ const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);
+
+ tmp0 = _mm_madd_epi16(lo_0_8, stg4_0);
+ tmp2 = _mm_madd_epi16(lo_0_8, stg4_1);
+ tmp4 = _mm_madd_epi16(lo_4_12, stg4_2);
+ tmp6 = _mm_madd_epi16(lo_4_12, stg4_3);
+ tmp1 = _mm_madd_epi16(lo_9_14, stg4_4);
+ tmp3 = _mm_madd_epi16(lo_9_14, stg4_5);
+ tmp5 = _mm_madd_epi16(lo_10_13, stg4_6);
+ tmp7 = _mm_madd_epi16(lo_10_13, stg4_7);
+
+ tmp0 = _mm_add_epi32(tmp0, rounding);
+ tmp2 = _mm_add_epi32(tmp2, rounding);
+ tmp4 = _mm_add_epi32(tmp4, rounding);
+ tmp6 = _mm_add_epi32(tmp6, rounding);
+ tmp1 = _mm_add_epi32(tmp1, rounding);
+ tmp3 = _mm_add_epi32(tmp3, rounding);
+ tmp5 = _mm_add_epi32(tmp5, rounding);
+ tmp7 = _mm_add_epi32(tmp7, rounding);
+
+ tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
+ tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
+ tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
+ tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS);
+ tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);
+ tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
+ tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS);
+ tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS);
+
+ stp2_0 = _mm_packs_epi32(tmp0, zero);
+ stp2_1 = _mm_packs_epi32(tmp2, zero);
+ stp2_2 = _mm_packs_epi32(tmp4, zero);
+ stp2_3 = _mm_packs_epi32(tmp6, zero);
+ stp2_9 = _mm_packs_epi32(tmp1, zero);
+ stp2_14 = _mm_packs_epi32(tmp3, zero);
+ stp2_10 = _mm_packs_epi32(tmp5, zero);
+ stp2_13 = _mm_packs_epi32(tmp7, zero);
+
+ stp2_4 = _mm_add_epi16(stp1_4, stp1_5);
+ stp2_5 = _mm_sub_epi16(stp1_4, stp1_5);
+ stp2_6 = _mm_sub_epi16(stp1_7, stp1_6);
+ stp2_7 = _mm_add_epi16(stp1_7, stp1_6);
+ }
+
+ // Stage5 and Stage6
+ {
+ stp1_0 = _mm_add_epi16(stp2_0, stp2_3);
+ stp1_1 = _mm_add_epi16(stp2_1, stp2_2);
+ stp1_2 = _mm_sub_epi16(stp2_1, stp2_2);
+ stp1_3 = _mm_sub_epi16(stp2_0, stp2_3);
+
+ stp1_8 = _mm_add_epi16(stp1_8_0, stp1_11);
+ stp1_9 = _mm_add_epi16(stp2_9, stp2_10);
+ stp1_10 = _mm_sub_epi16(stp2_9, stp2_10);
+ stp1_11 = _mm_sub_epi16(stp1_8_0, stp1_11);
+
+ stp1_12 = _mm_sub_epi16(stp1_15, stp1_12_0);
+ stp1_13 = _mm_sub_epi16(stp2_14, stp2_13);
+ stp1_14 = _mm_add_epi16(stp2_14, stp2_13);
+ stp1_15 = _mm_add_epi16(stp1_15, stp1_12_0);
+ }
+
+ // Stage6
+ {
+ const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5);
+ const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);
+ const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12);
+
+ tmp1 = _mm_madd_epi16(lo_6_5, stg4_1);
+ tmp3 = _mm_madd_epi16(lo_6_5, stg4_0);
+ tmp0 = _mm_madd_epi16(lo_10_13, stg6_0);
+ tmp2 = _mm_madd_epi16(lo_10_13, stg4_0);
+ tmp4 = _mm_madd_epi16(lo_11_12, stg6_0);
+ tmp6 = _mm_madd_epi16(lo_11_12, stg4_0);
+
+ tmp1 = _mm_add_epi32(tmp1, rounding);
+ tmp3 = _mm_add_epi32(tmp3, rounding);
+ tmp0 = _mm_add_epi32(tmp0, rounding);
+ tmp2 = _mm_add_epi32(tmp2, rounding);
+ tmp4 = _mm_add_epi32(tmp4, rounding);
+ tmp6 = _mm_add_epi32(tmp6, rounding);
+
+ tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);
+ tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
+ tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
+ tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
+ tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
+ tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS);
+
+ stp1_5 = _mm_packs_epi32(tmp1, zero);
+ stp1_6 = _mm_packs_epi32(tmp3, zero);
+ stp2_10 = _mm_packs_epi32(tmp0, zero);
+ stp2_13 = _mm_packs_epi32(tmp2, zero);
+ stp2_11 = _mm_packs_epi32(tmp4, zero);
+ stp2_12 = _mm_packs_epi32(tmp6, zero);
+
+ stp2_0 = _mm_add_epi16(stp1_0, stp2_7);
+ stp2_1 = _mm_add_epi16(stp1_1, stp1_6);
+ stp2_2 = _mm_add_epi16(stp1_2, stp1_5);
+ stp2_3 = _mm_add_epi16(stp1_3, stp2_4);
+ stp2_4 = _mm_sub_epi16(stp1_3, stp2_4);
+ stp2_5 = _mm_sub_epi16(stp1_2, stp1_5);
+ stp2_6 = _mm_sub_epi16(stp1_1, stp1_6);
+ stp2_7 = _mm_sub_epi16(stp1_0, stp2_7);
+ }
+
+ // Stage7. Left 8x16 only.
+ l0 = _mm_add_epi16(stp2_0, stp1_15);
+ l1 = _mm_add_epi16(stp2_1, stp1_14);
+ l2 = _mm_add_epi16(stp2_2, stp2_13);
+ l3 = _mm_add_epi16(stp2_3, stp2_12);
+ l4 = _mm_add_epi16(stp2_4, stp2_11);
+ l5 = _mm_add_epi16(stp2_5, stp2_10);
+ l6 = _mm_add_epi16(stp2_6, stp1_9);
+ l7 = _mm_add_epi16(stp2_7, stp1_8);
+ l8 = _mm_sub_epi16(stp2_7, stp1_8);
+ l9 = _mm_sub_epi16(stp2_6, stp1_9);
+ l10 = _mm_sub_epi16(stp2_5, stp2_10);
+ l11 = _mm_sub_epi16(stp2_4, stp2_11);
+ l12 = _mm_sub_epi16(stp2_3, stp2_12);
+ l13 = _mm_sub_epi16(stp2_2, stp2_13);
+ l14 = _mm_sub_epi16(stp2_1, stp1_14);
+ l15 = _mm_sub_epi16(stp2_0, stp1_15);
+
+ // 2-D idct. We do 2 8x16 blocks.
+ for (i = 0; i < 2; i++) {
+ if (i == 0)
+ TRANSPOSE_4X8(l0, l1, l2, l3, l4, l5, l6, l7, in0, in1, in2, in3, in4,
+ in5, in6, in7);
+
+ if (i == 1)
+ TRANSPOSE_4X8(l8, l9, l10, l11, l12, l13, l14, l15, in0, in1, in2, in3,
+ in4, in5, in6, in7);
+
+ in8 = in9 = in10 = in11 = in12 = in13 = in14 = in15 = zero;
+
+ IDCT16x16_1D
+
+ // Stage7
+ in0 = _mm_add_epi16(stp2_0, stp1_15);
+ in1 = _mm_add_epi16(stp2_1, stp1_14);
+ in2 = _mm_add_epi16(stp2_2, stp2_13);
+ in3 = _mm_add_epi16(stp2_3, stp2_12);
+ in4 = _mm_add_epi16(stp2_4, stp2_11);
+ in5 = _mm_add_epi16(stp2_5, stp2_10);
+ in6 = _mm_add_epi16(stp2_6, stp1_9);
+ in7 = _mm_add_epi16(stp2_7, stp1_8);
+ in8 = _mm_sub_epi16(stp2_7, stp1_8);
+ in9 = _mm_sub_epi16(stp2_6, stp1_9);
+ in10 = _mm_sub_epi16(stp2_5, stp2_10);
+ in11 = _mm_sub_epi16(stp2_4, stp2_11);
+ in12 = _mm_sub_epi16(stp2_3, stp2_12);
+ in13 = _mm_sub_epi16(stp2_2, stp2_13);
+ in14 = _mm_sub_epi16(stp2_1, stp1_14);
+ in15 = _mm_sub_epi16(stp2_0, stp1_15);
+
+ // Final rounding and shift
+ in0 = _mm_adds_epi16(in0, final_rounding);
+ in1 = _mm_adds_epi16(in1, final_rounding);
+ in2 = _mm_adds_epi16(in2, final_rounding);
+ in3 = _mm_adds_epi16(in3, final_rounding);
+ in4 = _mm_adds_epi16(in4, final_rounding);
+ in5 = _mm_adds_epi16(in5, final_rounding);
+ in6 = _mm_adds_epi16(in6, final_rounding);
+ in7 = _mm_adds_epi16(in7, final_rounding);
+ in8 = _mm_adds_epi16(in8, final_rounding);
+ in9 = _mm_adds_epi16(in9, final_rounding);
+ in10 = _mm_adds_epi16(in10, final_rounding);
+ in11 = _mm_adds_epi16(in11, final_rounding);
+ in12 = _mm_adds_epi16(in12, final_rounding);
+ in13 = _mm_adds_epi16(in13, final_rounding);
+ in14 = _mm_adds_epi16(in14, final_rounding);
+ in15 = _mm_adds_epi16(in15, final_rounding);
+
+ in0 = _mm_srai_epi16(in0, 6);
+ in1 = _mm_srai_epi16(in1, 6);
+ in2 = _mm_srai_epi16(in2, 6);
+ in3 = _mm_srai_epi16(in3, 6);
+ in4 = _mm_srai_epi16(in4, 6);
+ in5 = _mm_srai_epi16(in5, 6);
+ in6 = _mm_srai_epi16(in6, 6);
+ in7 = _mm_srai_epi16(in7, 6);
+ in8 = _mm_srai_epi16(in8, 6);
+ in9 = _mm_srai_epi16(in9, 6);
+ in10 = _mm_srai_epi16(in10, 6);
+ in11 = _mm_srai_epi16(in11, 6);
+ in12 = _mm_srai_epi16(in12, 6);
+ in13 = _mm_srai_epi16(in13, 6);
+ in14 = _mm_srai_epi16(in14, 6);
+ in15 = _mm_srai_epi16(in15, 6);
+
+ RECON_AND_STORE(dest, in0);
+ RECON_AND_STORE(dest, in1);
+ RECON_AND_STORE(dest, in2);
+ RECON_AND_STORE(dest, in3);
+ RECON_AND_STORE(dest, in4);
+ RECON_AND_STORE(dest, in5);
+ RECON_AND_STORE(dest, in6);
+ RECON_AND_STORE(dest, in7);
+ RECON_AND_STORE(dest, in8);
+ RECON_AND_STORE(dest, in9);
+ RECON_AND_STORE(dest, in10);
+ RECON_AND_STORE(dest, in11);
+ RECON_AND_STORE(dest, in12);
+ RECON_AND_STORE(dest, in13);
+ RECON_AND_STORE(dest, in14);
+ RECON_AND_STORE(dest, in15);
+
+ dest += 8 - (stride * 16);
+ }
+}
+
+void vp9_short_idct32x32_add_sse2(int16_t *input, uint8_t *dest, int stride) {
+ const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ const __m128i final_rounding = _mm_set1_epi16(1<<5);
+
+ // idct constants for each stage
+ const __m128i stg1_0 = pair_set_epi16(cospi_31_64, -cospi_1_64);
+ const __m128i stg1_1 = pair_set_epi16(cospi_1_64, cospi_31_64);
+ const __m128i stg1_2 = pair_set_epi16(cospi_15_64, -cospi_17_64);
+ const __m128i stg1_3 = pair_set_epi16(cospi_17_64, cospi_15_64);
+ const __m128i stg1_4 = pair_set_epi16(cospi_23_64, -cospi_9_64);
+ const __m128i stg1_5 = pair_set_epi16(cospi_9_64, cospi_23_64);
+ const __m128i stg1_6 = pair_set_epi16(cospi_7_64, -cospi_25_64);
+ const __m128i stg1_7 = pair_set_epi16(cospi_25_64, cospi_7_64);
+ const __m128i stg1_8 = pair_set_epi16(cospi_27_64, -cospi_5_64);
+ const __m128i stg1_9 = pair_set_epi16(cospi_5_64, cospi_27_64);
+ const __m128i stg1_10 = pair_set_epi16(cospi_11_64, -cospi_21_64);
+ const __m128i stg1_11 = pair_set_epi16(cospi_21_64, cospi_11_64);
+ const __m128i stg1_12 = pair_set_epi16(cospi_19_64, -cospi_13_64);
+ const __m128i stg1_13 = pair_set_epi16(cospi_13_64, cospi_19_64);
+ const __m128i stg1_14 = pair_set_epi16(cospi_3_64, -cospi_29_64);
+ const __m128i stg1_15 = pair_set_epi16(cospi_29_64, cospi_3_64);
+
+ const __m128i stg2_0 = pair_set_epi16(cospi_30_64, -cospi_2_64);
+ const __m128i stg2_1 = pair_set_epi16(cospi_2_64, cospi_30_64);
+ const __m128i stg2_2 = pair_set_epi16(cospi_14_64, -cospi_18_64);
+ const __m128i stg2_3 = pair_set_epi16(cospi_18_64, cospi_14_64);
+ const __m128i stg2_4 = pair_set_epi16(cospi_22_64, -cospi_10_64);
+ const __m128i stg2_5 = pair_set_epi16(cospi_10_64, cospi_22_64);
+ const __m128i stg2_6 = pair_set_epi16(cospi_6_64, -cospi_26_64);
+ const __m128i stg2_7 = pair_set_epi16(cospi_26_64, cospi_6_64);
+
+ const __m128i stg3_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
+ const __m128i stg3_1 = pair_set_epi16(cospi_4_64, cospi_28_64);
+ const __m128i stg3_2 = pair_set_epi16(cospi_12_64, -cospi_20_64);
+ const __m128i stg3_3 = pair_set_epi16(cospi_20_64, cospi_12_64);
+ const __m128i stg3_4 = pair_set_epi16(-cospi_4_64, cospi_28_64);
+ const __m128i stg3_5 = pair_set_epi16(cospi_28_64, cospi_4_64);
+ const __m128i stg3_6 = pair_set_epi16(-cospi_28_64, -cospi_4_64);
+ const __m128i stg3_8 = pair_set_epi16(-cospi_20_64, cospi_12_64);
+ const __m128i stg3_9 = pair_set_epi16(cospi_12_64, cospi_20_64);
+ const __m128i stg3_10 = pair_set_epi16(-cospi_12_64, -cospi_20_64);
+
+ const __m128i stg4_0 = pair_set_epi16(cospi_16_64, cospi_16_64);
+ const __m128i stg4_1 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i stg4_2 = pair_set_epi16(cospi_24_64, -cospi_8_64);
+ const __m128i stg4_3 = pair_set_epi16(cospi_8_64, cospi_24_64);
+ const __m128i stg4_4 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m128i stg4_5 = pair_set_epi16(cospi_24_64, cospi_8_64);
+ const __m128i stg4_6 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
+
+ const __m128i stg6_0 = pair_set_epi16(-cospi_16_64, cospi_16_64);
+
+ __m128i in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, in12,
+ in13, in14, in15, in16, in17, in18, in19, in20, in21, in22, in23,
+ in24, in25, in26, in27, in28, in29, in30, in31;
+ __m128i col[128];
+ __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7,
+ stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15,
+ stp1_16, stp1_17, stp1_18, stp1_19, stp1_20, stp1_21, stp1_22,
+ stp1_23, stp1_24, stp1_25, stp1_26, stp1_27, stp1_28, stp1_29,
+ stp1_30, stp1_31;
+ __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7,
+ stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15,
+ stp2_16, stp2_17, stp2_18, stp2_19, stp2_20, stp2_21, stp2_22,
+ stp2_23, stp2_24, stp2_25, stp2_26, stp2_27, stp2_28, stp2_29,
+ stp2_30, stp2_31;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int i, j;
+
+ // We work on a 8x32 block each time, and loop 8 times for 2-D 32x32 idct.
+ for (i = 0; i < 8; i++) {
+ if (i < 4) {
+ // First 1-D idct
+ // Load input data.
+ in0 = _mm_load_si128((__m128i *)input);
+ in8 = _mm_load_si128((__m128i *)(input + 8 * 1));
+ in16 = _mm_load_si128((__m128i *)(input + 8 * 2));
+ in24 = _mm_load_si128((__m128i *)(input + 8 * 3));
+ in1 = _mm_load_si128((__m128i *)(input + 8 * 4));
+ in9 = _mm_load_si128((__m128i *)(input + 8 * 5));
+ in17 = _mm_load_si128((__m128i *)(input + 8 * 6));
+ in25 = _mm_load_si128((__m128i *)(input + 8 * 7));
+ in2 = _mm_load_si128((__m128i *)(input + 8 * 8));
+ in10 = _mm_load_si128((__m128i *)(input + 8 * 9));
+ in18 = _mm_load_si128((__m128i *)(input + 8 * 10));
+ in26 = _mm_load_si128((__m128i *)(input + 8 * 11));
+ in3 = _mm_load_si128((__m128i *)(input + 8 * 12));
+ in11 = _mm_load_si128((__m128i *)(input + 8 * 13));
+ in19 = _mm_load_si128((__m128i *)(input + 8 * 14));
+ in27 = _mm_load_si128((__m128i *)(input + 8 * 15));
+
+ in4 = _mm_load_si128((__m128i *)(input + 8 * 16));
+ in12 = _mm_load_si128((__m128i *)(input + 8 * 17));
+ in20 = _mm_load_si128((__m128i *)(input + 8 * 18));
+ in28 = _mm_load_si128((__m128i *)(input + 8 * 19));
+ in5 = _mm_load_si128((__m128i *)(input + 8 * 20));
+ in13 = _mm_load_si128((__m128i *)(input + 8 * 21));
+ in21 = _mm_load_si128((__m128i *)(input + 8 * 22));
+ in29 = _mm_load_si128((__m128i *)(input + 8 * 23));
+ in6 = _mm_load_si128((__m128i *)(input + 8 * 24));
+ in14 = _mm_load_si128((__m128i *)(input + 8 * 25));
+ in22 = _mm_load_si128((__m128i *)(input + 8 * 26));
+ in30 = _mm_load_si128((__m128i *)(input + 8 * 27));
+ in7 = _mm_load_si128((__m128i *)(input + 8 * 28));
+ in15 = _mm_load_si128((__m128i *)(input + 8 * 29));
+ in23 = _mm_load_si128((__m128i *)(input + 8 * 30));
+ in31 = _mm_load_si128((__m128i *)(input + 8 * 31));
+
+ input += 256;
+
+ // Transpose 32x8 block to 8x32 block
+ TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+ in4, in5, in6, in7);
+ TRANSPOSE_8X8(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9,
+ in10, in11, in12, in13, in14, in15);
+ TRANSPOSE_8X8(in16, in17, in18, in19, in20, in21, in22, in23, in16, in17,
+ in18, in19, in20, in21, in22, in23);
+ TRANSPOSE_8X8(in24, in25, in26, in27, in28, in29, in30, in31, in24, in25,
+ in26, in27, in28, in29, in30, in31);
+ } else {
+ // Second 1-D idct
+ j = i - 4;
+
+ // Transpose 32x8 block to 8x32 block
+ TRANSPOSE_8X8(col[j * 8 + 0], col[j * 8 + 1], col[j * 8 + 2],
+ col[j * 8 + 3], col[j * 8 + 4], col[j * 8 + 5],
+ col[j * 8 + 6], col[j * 8 + 7], in0, in1, in2, in3, in4,
+ in5, in6, in7);
+ j += 4;
+ TRANSPOSE_8X8(col[j * 8 + 0], col[j * 8 + 1], col[j * 8 + 2],
+ col[j * 8 + 3], col[j * 8 + 4], col[j * 8 + 5],
+ col[j * 8 + 6], col[j * 8 + 7], in8, in9, in10,
+ in11, in12, in13, in14, in15);
+ j += 4;
+ TRANSPOSE_8X8(col[j * 8 + 0], col[j * 8 + 1], col[j * 8 + 2],
+ col[j * 8 + 3], col[j * 8 + 4], col[j * 8 + 5],
+ col[j * 8 + 6], col[j * 8 + 7], in16, in17, in18,
+ in19, in20, in21, in22, in23);
+ j += 4;
+ TRANSPOSE_8X8(col[j * 8 + 0], col[j * 8 + 1], col[j * 8 + 2],
+ col[j * 8 + 3], col[j * 8 + 4], col[j * 8 + 5],
+ col[j * 8 + 6], col[j * 8 + 7], in24, in25, in26, in27,
+ in28, in29, in30, in31);
+ }
+
+ // Stage1
+ {
+ const __m128i lo_1_31 = _mm_unpacklo_epi16(in1, in31);
+ const __m128i hi_1_31 = _mm_unpackhi_epi16(in1, in31);
+ const __m128i lo_17_15 = _mm_unpacklo_epi16(in17, in15);
+ const __m128i hi_17_15 = _mm_unpackhi_epi16(in17, in15);
+
+ const __m128i lo_9_23 = _mm_unpacklo_epi16(in9, in23);
+ const __m128i hi_9_23 = _mm_unpackhi_epi16(in9, in23);
+ const __m128i lo_25_7= _mm_unpacklo_epi16(in25, in7);
+ const __m128i hi_25_7 = _mm_unpackhi_epi16(in25, in7);
+
+ const __m128i lo_5_27 = _mm_unpacklo_epi16(in5, in27);
+ const __m128i hi_5_27 = _mm_unpackhi_epi16(in5, in27);
+ const __m128i lo_21_11 = _mm_unpacklo_epi16(in21, in11);
+ const __m128i hi_21_11 = _mm_unpackhi_epi16(in21, in11);
+
+ const __m128i lo_13_19 = _mm_unpacklo_epi16(in13, in19);
+ const __m128i hi_13_19 = _mm_unpackhi_epi16(in13, in19);
+ const __m128i lo_29_3 = _mm_unpacklo_epi16(in29, in3);
+ const __m128i hi_29_3 = _mm_unpackhi_epi16(in29, in3);
+
+ MULTIPLICATION_AND_ADD(lo_1_31, hi_1_31, lo_17_15, hi_17_15, stg1_0,
+ stg1_1, stg1_2, stg1_3, stp1_16, stp1_31,
+ stp1_17, stp1_30)
+ MULTIPLICATION_AND_ADD(lo_9_23, hi_9_23, lo_25_7, hi_25_7, stg1_4,
+ stg1_5, stg1_6, stg1_7, stp1_18, stp1_29,
+ stp1_19, stp1_28)
+ MULTIPLICATION_AND_ADD(lo_5_27, hi_5_27, lo_21_11, hi_21_11, stg1_8,
+ stg1_9, stg1_10, stg1_11, stp1_20, stp1_27,
+ stp1_21, stp1_26)
+ MULTIPLICATION_AND_ADD(lo_13_19, hi_13_19, lo_29_3, hi_29_3, stg1_12,
+ stg1_13, stg1_14, stg1_15, stp1_22, stp1_25,
+ stp1_23, stp1_24)
+ }
+
+ // Stage2
+ {
+ const __m128i lo_2_30 = _mm_unpacklo_epi16(in2, in30);
+ const __m128i hi_2_30 = _mm_unpackhi_epi16(in2, in30);
+ const __m128i lo_18_14 = _mm_unpacklo_epi16(in18, in14);
+ const __m128i hi_18_14 = _mm_unpackhi_epi16(in18, in14);
+
+ const __m128i lo_10_22 = _mm_unpacklo_epi16(in10, in22);
+ const __m128i hi_10_22 = _mm_unpackhi_epi16(in10, in22);
+ const __m128i lo_26_6 = _mm_unpacklo_epi16(in26, in6);
+ const __m128i hi_26_6 = _mm_unpackhi_epi16(in26, in6);
+
+ MULTIPLICATION_AND_ADD(lo_2_30, hi_2_30, lo_18_14, hi_18_14, stg2_0,
+ stg2_1, stg2_2, stg2_3, stp2_8, stp2_15, stp2_9,
+ stp2_14)
+ MULTIPLICATION_AND_ADD(lo_10_22, hi_10_22, lo_26_6, hi_26_6, stg2_4,
+ stg2_5, stg2_6, stg2_7, stp2_10, stp2_13,
+ stp2_11, stp2_12)
+
+ stp2_16 = _mm_add_epi16(stp1_16, stp1_17);
+ stp2_17 = _mm_sub_epi16(stp1_16, stp1_17);
+ stp2_18 = _mm_sub_epi16(stp1_19, stp1_18);
+ stp2_19 = _mm_add_epi16(stp1_19, stp1_18);
+
+ stp2_20 = _mm_add_epi16(stp1_20, stp1_21);
+ stp2_21 = _mm_sub_epi16(stp1_20, stp1_21);
+ stp2_22 = _mm_sub_epi16(stp1_23, stp1_22);
+ stp2_23 = _mm_add_epi16(stp1_23, stp1_22);
+
+ stp2_24 = _mm_add_epi16(stp1_24, stp1_25);
+ stp2_25 = _mm_sub_epi16(stp1_24, stp1_25);
+ stp2_26 = _mm_sub_epi16(stp1_27, stp1_26);
+ stp2_27 = _mm_add_epi16(stp1_27, stp1_26);
+
+ stp2_28 = _mm_add_epi16(stp1_28, stp1_29);
+ stp2_29 = _mm_sub_epi16(stp1_28, stp1_29);
+ stp2_30 = _mm_sub_epi16(stp1_31, stp1_30);
+ stp2_31 = _mm_add_epi16(stp1_31, stp1_30);
+ }
+
+ // Stage3
+ {
+ const __m128i lo_4_28 = _mm_unpacklo_epi16(in4, in28);
+ const __m128i hi_4_28 = _mm_unpackhi_epi16(in4, in28);
+ const __m128i lo_20_12 = _mm_unpacklo_epi16(in20, in12);
+ const __m128i hi_20_12 = _mm_unpackhi_epi16(in20, in12);
+
+ const __m128i lo_17_30 = _mm_unpacklo_epi16(stp2_17, stp2_30);
+ const __m128i hi_17_30 = _mm_unpackhi_epi16(stp2_17, stp2_30);
+ const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29);
+ const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29);
+
+ const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26);
+ const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26);
+ const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25);
+ const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25);
+
+ MULTIPLICATION_AND_ADD(lo_4_28, hi_4_28, lo_20_12, hi_20_12, stg3_0,
+ stg3_1, stg3_2, stg3_3, stp1_4, stp1_7, stp1_5,
+ stp1_6)
+
+ stp1_8 = _mm_add_epi16(stp2_8, stp2_9);
+ stp1_9 = _mm_sub_epi16(stp2_8, stp2_9);
+ stp1_10 = _mm_sub_epi16(stp2_11, stp2_10);
+ stp1_11 = _mm_add_epi16(stp2_11, stp2_10);
+ stp1_12 = _mm_add_epi16(stp2_12, stp2_13);
+ stp1_13 = _mm_sub_epi16(stp2_12, stp2_13);
+ stp1_14 = _mm_sub_epi16(stp2_15, stp2_14);
+ stp1_15 = _mm_add_epi16(stp2_15, stp2_14);
+
+ MULTIPLICATION_AND_ADD(lo_17_30, hi_17_30, lo_18_29, hi_18_29, stg3_4,
+ stg3_5, stg3_6, stg3_4, stp1_17, stp1_30,
+ stp1_18, stp1_29)
+ MULTIPLICATION_AND_ADD(lo_21_26, hi_21_26, lo_22_25, hi_22_25, stg3_8,
+ stg3_9, stg3_10, stg3_8, stp1_21, stp1_26,
+ stp1_22, stp1_25)
+
+ stp1_16 = stp2_16;
+ stp1_31 = stp2_31;
+ stp1_19 = stp2_19;
+ stp1_20 = stp2_20;
+ stp1_23 = stp2_23;
+ stp1_24 = stp2_24;
+ stp1_27 = stp2_27;
+ stp1_28 = stp2_28;
+ }
+
+ // Stage4
+ {
+ const __m128i lo_0_16 = _mm_unpacklo_epi16(in0, in16);
+ const __m128i hi_0_16 = _mm_unpackhi_epi16(in0, in16);
+ const __m128i lo_8_24 = _mm_unpacklo_epi16(in8, in24);
+ const __m128i hi_8_24 = _mm_unpackhi_epi16(in8, in24);
+
+ const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14);
+ const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14);
+ const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);
+ const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13);
+
+ MULTIPLICATION_AND_ADD(lo_0_16, hi_0_16, lo_8_24, hi_8_24, stg4_0,
+ stg4_1, stg4_2, stg4_3, stp2_0, stp2_1,
+ stp2_2, stp2_3)
+
+ stp2_4 = _mm_add_epi16(stp1_4, stp1_5);
+ stp2_5 = _mm_sub_epi16(stp1_4, stp1_5);
+ stp2_6 = _mm_sub_epi16(stp1_7, stp1_6);
+ stp2_7 = _mm_add_epi16(stp1_7, stp1_6);
+
+ MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, stg4_4,
+ stg4_5, stg4_6, stg4_4, stp2_9, stp2_14,
+ stp2_10, stp2_13)
+
+ stp2_8 = stp1_8;
+ stp2_15 = stp1_15;
+ stp2_11 = stp1_11;
+ stp2_12 = stp1_12;
+
+ stp2_16 = _mm_add_epi16(stp1_16, stp1_19);
+ stp2_17 = _mm_add_epi16(stp1_17, stp1_18);
+ stp2_18 = _mm_sub_epi16(stp1_17, stp1_18);
+ stp2_19 = _mm_sub_epi16(stp1_16, stp1_19);
+ stp2_20 = _mm_sub_epi16(stp1_23, stp1_20);
+ stp2_21 = _mm_sub_epi16(stp1_22, stp1_21);
+ stp2_22 = _mm_add_epi16(stp1_22, stp1_21);
+ stp2_23 = _mm_add_epi16(stp1_23, stp1_20);
+
+ stp2_24 = _mm_add_epi16(stp1_24, stp1_27);
+ stp2_25 = _mm_add_epi16(stp1_25, stp1_26);
+ stp2_26 = _mm_sub_epi16(stp1_25, stp1_26);
+ stp2_27 = _mm_sub_epi16(stp1_24, stp1_27);
+ stp2_28 = _mm_sub_epi16(stp1_31, stp1_28);
+ stp2_29 = _mm_sub_epi16(stp1_30, stp1_29);
+ stp2_30 = _mm_add_epi16(stp1_29, stp1_30);
+ stp2_31 = _mm_add_epi16(stp1_28, stp1_31);
+ }
+
+ // Stage5
+ {
+ const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5);
+ const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5);
+ const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29);
+ const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29);
+
+ const __m128i lo_19_28 = _mm_unpacklo_epi16(stp2_19, stp2_28);
+ const __m128i hi_19_28 = _mm_unpackhi_epi16(stp2_19, stp2_28);
+ const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27);
+ const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27);
+
+ const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26);
+ const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26);
+
+ stp1_0 = _mm_add_epi16(stp2_0, stp2_3);
+ stp1_1 = _mm_add_epi16(stp2_1, stp2_2);
+ stp1_2 = _mm_sub_epi16(stp2_1, stp2_2);
+ stp1_3 = _mm_sub_epi16(stp2_0, stp2_3);
+
+ tmp0 = _mm_madd_epi16(lo_6_5, stg4_1);
+ tmp1 = _mm_madd_epi16(hi_6_5, stg4_1);
+ tmp2 = _mm_madd_epi16(lo_6_5, stg4_0);
+ tmp3 = _mm_madd_epi16(hi_6_5, stg4_0);
+
+ tmp0 = _mm_add_epi32(tmp0, rounding);
+ tmp1 = _mm_add_epi32(tmp1, rounding);
+ tmp2 = _mm_add_epi32(tmp2, rounding);
+ tmp3 = _mm_add_epi32(tmp3, rounding);
+
+ tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
+ tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);
+ tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
+ tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
+
+ stp1_5 = _mm_packs_epi32(tmp0, tmp1);
+ stp1_6 = _mm_packs_epi32(tmp2, tmp3);
+
+ stp1_4 = stp2_4;
+ stp1_7 = stp2_7;
+
+ stp1_8 = _mm_add_epi16(stp2_8, stp2_11);
+ stp1_9 = _mm_add_epi16(stp2_9, stp2_10);
+ stp1_10 = _mm_sub_epi16(stp2_9, stp2_10);
+ stp1_11 = _mm_sub_epi16(stp2_8, stp2_11);
+ stp1_12 = _mm_sub_epi16(stp2_15, stp2_12);
+ stp1_13 = _mm_sub_epi16(stp2_14, stp2_13);
+ stp1_14 = _mm_add_epi16(stp2_14, stp2_13);
+ stp1_15 = _mm_add_epi16(stp2_15, stp2_12);
+
+ stp1_16 = stp2_16;
+ stp1_17 = stp2_17;
+
+ MULTIPLICATION_AND_ADD(lo_18_29, hi_18_29, lo_19_28, hi_19_28, stg4_4,
+ stg4_5, stg4_4, stg4_5, stp1_18, stp1_29,
+ stp1_19, stp1_28)
+ MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg4_6,
+ stg4_4, stg4_6, stg4_4, stp1_20, stp1_27,
+ stp1_21, stp1_26)
+
+ stp1_22 = stp2_22;
+ stp1_23 = stp2_23;
+ stp1_24 = stp2_24;
+ stp1_25 = stp2_25;
+ stp1_30 = stp2_30;
+ stp1_31 = stp2_31;
+ }
+
+ // Stage6
+ {
+ const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);
+ const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13);
+ const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12);
+ const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12);
+
+ stp2_0 = _mm_add_epi16(stp1_0, stp1_7);
+ stp2_1 = _mm_add_epi16(stp1_1, stp1_6);
+ stp2_2 = _mm_add_epi16(stp1_2, stp1_5);
+ stp2_3 = _mm_add_epi16(stp1_3, stp1_4);
+ stp2_4 = _mm_sub_epi16(stp1_3, stp1_4);
+ stp2_5 = _mm_sub_epi16(stp1_2, stp1_5);
+ stp2_6 = _mm_sub_epi16(stp1_1, stp1_6);
+ stp2_7 = _mm_sub_epi16(stp1_0, stp1_7);
+
+ stp2_8 = stp1_8;
+ stp2_9 = stp1_9;
+ stp2_14 = stp1_14;
+ stp2_15 = stp1_15;
+
+ MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12,
+ stg6_0, stg4_0, stg6_0, stg4_0, stp2_10,
+ stp2_13, stp2_11, stp2_12)
+
+ stp2_16 = _mm_add_epi16(stp1_16, stp1_23);
+ stp2_17 = _mm_add_epi16(stp1_17, stp1_22);
+ stp2_18 = _mm_add_epi16(stp1_18, stp1_21);
+ stp2_19 = _mm_add_epi16(stp1_19, stp1_20);
+ stp2_20 = _mm_sub_epi16(stp1_19, stp1_20);
+ stp2_21 = _mm_sub_epi16(stp1_18, stp1_21);
+ stp2_22 = _mm_sub_epi16(stp1_17, stp1_22);
+ stp2_23 = _mm_sub_epi16(stp1_16, stp1_23);
+
+ stp2_24 = _mm_sub_epi16(stp1_31, stp1_24);
+ stp2_25 = _mm_sub_epi16(stp1_30, stp1_25);
+ stp2_26 = _mm_sub_epi16(stp1_29, stp1_26);
+ stp2_27 = _mm_sub_epi16(stp1_28, stp1_27);
+ stp2_28 = _mm_add_epi16(stp1_27, stp1_28);
+ stp2_29 = _mm_add_epi16(stp1_26, stp1_29);
+ stp2_30 = _mm_add_epi16(stp1_25, stp1_30);
+ stp2_31 = _mm_add_epi16(stp1_24, stp1_31);
+ }
+
+ // Stage7
+ {
+ const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27);
+ const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27);
+ const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26);
+ const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26);
+
+ const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25);
+ const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25);
+ const __m128i lo_23_24 = _mm_unpacklo_epi16(stp2_23, stp2_24);
+ const __m128i hi_23_24 = _mm_unpackhi_epi16(stp2_23, stp2_24);
+
+ stp1_0 = _mm_add_epi16(stp2_0, stp2_15);
+ stp1_1 = _mm_add_epi16(stp2_1, stp2_14);
+ stp1_2 = _mm_add_epi16(stp2_2, stp2_13);
+ stp1_3 = _mm_add_epi16(stp2_3, stp2_12);
+ stp1_4 = _mm_add_epi16(stp2_4, stp2_11);
+ stp1_5 = _mm_add_epi16(stp2_5, stp2_10);
+ stp1_6 = _mm_add_epi16(stp2_6, stp2_9);
+ stp1_7 = _mm_add_epi16(stp2_7, stp2_8);
+ stp1_8 = _mm_sub_epi16(stp2_7, stp2_8);
+ stp1_9 = _mm_sub_epi16(stp2_6, stp2_9);
+ stp1_10 = _mm_sub_epi16(stp2_5, stp2_10);
+ stp1_11 = _mm_sub_epi16(stp2_4, stp2_11);
+ stp1_12 = _mm_sub_epi16(stp2_3, stp2_12);
+ stp1_13 = _mm_sub_epi16(stp2_2, stp2_13);
+ stp1_14 = _mm_sub_epi16(stp2_1, stp2_14);
+ stp1_15 = _mm_sub_epi16(stp2_0, stp2_15);
+
+ stp1_16 = stp2_16;
+ stp1_17 = stp2_17;
+ stp1_18 = stp2_18;
+ stp1_19 = stp2_19;
+
+ MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg6_0,
+ stg4_0, stg6_0, stg4_0, stp1_20, stp1_27,
+ stp1_21, stp1_26)
+ MULTIPLICATION_AND_ADD(lo_22_25, hi_22_25, lo_23_24, hi_23_24, stg6_0,
+ stg4_0, stg6_0, stg4_0, stp1_22, stp1_25,
+ stp1_23, stp1_24)
+
+ stp1_28 = stp2_28;
+ stp1_29 = stp2_29;
+ stp1_30 = stp2_30;
+ stp1_31 = stp2_31;
+ }
+
+ // final stage
+ if (i < 4) {
+ // 1_D: Store 32 intermediate results for each 8x32 block.
+ col[i * 32 + 0] = _mm_add_epi16(stp1_0, stp1_31);
+ col[i * 32 + 1] = _mm_add_epi16(stp1_1, stp1_30);
+ col[i * 32 + 2] = _mm_add_epi16(stp1_2, stp1_29);
+ col[i * 32 + 3] = _mm_add_epi16(stp1_3, stp1_28);
+ col[i * 32 + 4] = _mm_add_epi16(stp1_4, stp1_27);
+ col[i * 32 + 5] = _mm_add_epi16(stp1_5, stp1_26);
+ col[i * 32 + 6] = _mm_add_epi16(stp1_6, stp1_25);
+ col[i * 32 + 7] = _mm_add_epi16(stp1_7, stp1_24);
+ col[i * 32 + 8] = _mm_add_epi16(stp1_8, stp1_23);
+ col[i * 32 + 9] = _mm_add_epi16(stp1_9, stp1_22);
+ col[i * 32 + 10] = _mm_add_epi16(stp1_10, stp1_21);
+ col[i * 32 + 11] = _mm_add_epi16(stp1_11, stp1_20);
+ col[i * 32 + 12] = _mm_add_epi16(stp1_12, stp1_19);
+ col[i * 32 + 13] = _mm_add_epi16(stp1_13, stp1_18);
+ col[i * 32 + 14] = _mm_add_epi16(stp1_14, stp1_17);
+ col[i * 32 + 15] = _mm_add_epi16(stp1_15, stp1_16);
+ col[i * 32 + 16] = _mm_sub_epi16(stp1_15, stp1_16);
+ col[i * 32 + 17] = _mm_sub_epi16(stp1_14, stp1_17);
+ col[i * 32 + 18] = _mm_sub_epi16(stp1_13, stp1_18);
+ col[i * 32 + 19] = _mm_sub_epi16(stp1_12, stp1_19);
+ col[i * 32 + 20] = _mm_sub_epi16(stp1_11, stp1_20);
+ col[i * 32 + 21] = _mm_sub_epi16(stp1_10, stp1_21);
+ col[i * 32 + 22] = _mm_sub_epi16(stp1_9, stp1_22);
+ col[i * 32 + 23] = _mm_sub_epi16(stp1_8, stp1_23);
+ col[i * 32 + 24] = _mm_sub_epi16(stp1_7, stp1_24);
+ col[i * 32 + 25] = _mm_sub_epi16(stp1_6, stp1_25);
+ col[i * 32 + 26] = _mm_sub_epi16(stp1_5, stp1_26);
+ col[i * 32 + 27] = _mm_sub_epi16(stp1_4, stp1_27);
+ col[i * 32 + 28] = _mm_sub_epi16(stp1_3, stp1_28);
+ col[i * 32 + 29] = _mm_sub_epi16(stp1_2, stp1_29);
+ col[i * 32 + 30] = _mm_sub_epi16(stp1_1, stp1_30);
+ col[i * 32 + 31] = _mm_sub_epi16(stp1_0, stp1_31);
+ } else {
+ const __m128i zero = _mm_setzero_si128();
+
+ // 2_D: Calculate the results and store them to destination.
+ in0 = _mm_add_epi16(stp1_0, stp1_31);
+ in1 = _mm_add_epi16(stp1_1, stp1_30);
+ in2 = _mm_add_epi16(stp1_2, stp1_29);
+ in3 = _mm_add_epi16(stp1_3, stp1_28);
+ in4 = _mm_add_epi16(stp1_4, stp1_27);
+ in5 = _mm_add_epi16(stp1_5, stp1_26);
+ in6 = _mm_add_epi16(stp1_6, stp1_25);
+ in7 = _mm_add_epi16(stp1_7, stp1_24);
+ in8 = _mm_add_epi16(stp1_8, stp1_23);
+ in9 = _mm_add_epi16(stp1_9, stp1_22);
+ in10 = _mm_add_epi16(stp1_10, stp1_21);
+ in11 = _mm_add_epi16(stp1_11, stp1_20);
+ in12 = _mm_add_epi16(stp1_12, stp1_19);
+ in13 = _mm_add_epi16(stp1_13, stp1_18);
+ in14 = _mm_add_epi16(stp1_14, stp1_17);
+ in15 = _mm_add_epi16(stp1_15, stp1_16);
+ in16 = _mm_sub_epi16(stp1_15, stp1_16);
+ in17 = _mm_sub_epi16(stp1_14, stp1_17);
+ in18 = _mm_sub_epi16(stp1_13, stp1_18);
+ in19 = _mm_sub_epi16(stp1_12, stp1_19);
+ in20 = _mm_sub_epi16(stp1_11, stp1_20);
+ in21 = _mm_sub_epi16(stp1_10, stp1_21);
+ in22 = _mm_sub_epi16(stp1_9, stp1_22);
+ in23 = _mm_sub_epi16(stp1_8, stp1_23);
+ in24 = _mm_sub_epi16(stp1_7, stp1_24);
+ in25 = _mm_sub_epi16(stp1_6, stp1_25);
+ in26 = _mm_sub_epi16(stp1_5, stp1_26);
+ in27 = _mm_sub_epi16(stp1_4, stp1_27);
+ in28 = _mm_sub_epi16(stp1_3, stp1_28);
+ in29 = _mm_sub_epi16(stp1_2, stp1_29);
+ in30 = _mm_sub_epi16(stp1_1, stp1_30);
+ in31 = _mm_sub_epi16(stp1_0, stp1_31);
+
+ // Final rounding and shift
+ in0 = _mm_adds_epi16(in0, final_rounding);
+ in1 = _mm_adds_epi16(in1, final_rounding);
+ in2 = _mm_adds_epi16(in2, final_rounding);
+ in3 = _mm_adds_epi16(in3, final_rounding);
+ in4 = _mm_adds_epi16(in4, final_rounding);
+ in5 = _mm_adds_epi16(in5, final_rounding);
+ in6 = _mm_adds_epi16(in6, final_rounding);
+ in7 = _mm_adds_epi16(in7, final_rounding);
+ in8 = _mm_adds_epi16(in8, final_rounding);
+ in9 = _mm_adds_epi16(in9, final_rounding);
+ in10 = _mm_adds_epi16(in10, final_rounding);
+ in11 = _mm_adds_epi16(in11, final_rounding);
+ in12 = _mm_adds_epi16(in12, final_rounding);
+ in13 = _mm_adds_epi16(in13, final_rounding);
+ in14 = _mm_adds_epi16(in14, final_rounding);
+ in15 = _mm_adds_epi16(in15, final_rounding);
+ in16 = _mm_adds_epi16(in16, final_rounding);
+ in17 = _mm_adds_epi16(in17, final_rounding);
+ in18 = _mm_adds_epi16(in18, final_rounding);
+ in19 = _mm_adds_epi16(in19, final_rounding);
+ in20 = _mm_adds_epi16(in20, final_rounding);
+ in21 = _mm_adds_epi16(in21, final_rounding);
+ in22 = _mm_adds_epi16(in22, final_rounding);
+ in23 = _mm_adds_epi16(in23, final_rounding);
+ in24 = _mm_adds_epi16(in24, final_rounding);
+ in25 = _mm_adds_epi16(in25, final_rounding);
+ in26 = _mm_adds_epi16(in26, final_rounding);
+ in27 = _mm_adds_epi16(in27, final_rounding);
+ in28 = _mm_adds_epi16(in28, final_rounding);
+ in29 = _mm_adds_epi16(in29, final_rounding);
+ in30 = _mm_adds_epi16(in30, final_rounding);
+ in31 = _mm_adds_epi16(in31, final_rounding);
+
+ in0 = _mm_srai_epi16(in0, 6);
+ in1 = _mm_srai_epi16(in1, 6);
+ in2 = _mm_srai_epi16(in2, 6);
+ in3 = _mm_srai_epi16(in3, 6);
+ in4 = _mm_srai_epi16(in4, 6);
+ in5 = _mm_srai_epi16(in5, 6);
+ in6 = _mm_srai_epi16(in6, 6);
+ in7 = _mm_srai_epi16(in7, 6);
+ in8 = _mm_srai_epi16(in8, 6);
+ in9 = _mm_srai_epi16(in9, 6);
+ in10 = _mm_srai_epi16(in10, 6);
+ in11 = _mm_srai_epi16(in11, 6);
+ in12 = _mm_srai_epi16(in12, 6);
+ in13 = _mm_srai_epi16(in13, 6);
+ in14 = _mm_srai_epi16(in14, 6);
+ in15 = _mm_srai_epi16(in15, 6);
+ in16 = _mm_srai_epi16(in16, 6);
+ in17 = _mm_srai_epi16(in17, 6);
+ in18 = _mm_srai_epi16(in18, 6);
+ in19 = _mm_srai_epi16(in19, 6);
+ in20 = _mm_srai_epi16(in20, 6);
+ in21 = _mm_srai_epi16(in21, 6);
+ in22 = _mm_srai_epi16(in22, 6);
+ in23 = _mm_srai_epi16(in23, 6);
+ in24 = _mm_srai_epi16(in24, 6);
+ in25 = _mm_srai_epi16(in25, 6);
+ in26 = _mm_srai_epi16(in26, 6);
+ in27 = _mm_srai_epi16(in27, 6);
+ in28 = _mm_srai_epi16(in28, 6);
+ in29 = _mm_srai_epi16(in29, 6);
+ in30 = _mm_srai_epi16(in30, 6);
+ in31 = _mm_srai_epi16(in31, 6);
+
+ RECON_AND_STORE(dest, in0);
+ RECON_AND_STORE(dest, in1);
+ RECON_AND_STORE(dest, in2);
+ RECON_AND_STORE(dest, in3);
+ RECON_AND_STORE(dest, in4);
+ RECON_AND_STORE(dest, in5);
+ RECON_AND_STORE(dest, in6);
+ RECON_AND_STORE(dest, in7);
+ RECON_AND_STORE(dest, in8);
+ RECON_AND_STORE(dest, in9);
+ RECON_AND_STORE(dest, in10);
+ RECON_AND_STORE(dest, in11);
+ RECON_AND_STORE(dest, in12);
+ RECON_AND_STORE(dest, in13);
+ RECON_AND_STORE(dest, in14);
+ RECON_AND_STORE(dest, in15);
+ RECON_AND_STORE(dest, in16);
+ RECON_AND_STORE(dest, in17);
+ RECON_AND_STORE(dest, in18);
+ RECON_AND_STORE(dest, in19);
+ RECON_AND_STORE(dest, in20);
+ RECON_AND_STORE(dest, in21);
+ RECON_AND_STORE(dest, in22);
+ RECON_AND_STORE(dest, in23);
+ RECON_AND_STORE(dest, in24);
+ RECON_AND_STORE(dest, in25);
+ RECON_AND_STORE(dest, in26);
+ RECON_AND_STORE(dest, in27);
+ RECON_AND_STORE(dest, in28);
+ RECON_AND_STORE(dest, in29);
+ RECON_AND_STORE(dest, in30);
+ RECON_AND_STORE(dest, in31);
+
+ dest += 8 - (stride * 32);
+ }
+ }
+}
diff --git a/libvpx/vp9/common/x86/vp9_iwalsh_mmx.asm b/libvpx/vp9/common/x86/vp9_iwalsh_mmx.asm
new file mode 100644
index 0000000..1af2521
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_iwalsh_mmx.asm
@@ -0,0 +1,173 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+;void vp9_short_inv_walsh4x4_1_mmx(short *input, short *output)
+global sym(vp9_short_inv_walsh4x4_1_mmx) PRIVATE
+sym(vp9_short_inv_walsh4x4_1_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 2
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0)
+ mov rax, 3
+
+ mov rdi, arg(1)
+ add rax, [rsi] ;input[0] + 3
+
+ movd mm0, eax
+
+ punpcklwd mm0, mm0 ;x x val val
+
+ punpckldq mm0, mm0 ;val val val val
+
+ psraw mm0, 3 ;(input[0] + 3) >> 3
+
+ movq [rdi + 0], mm0
+ movq [rdi + 8], mm0
+ movq [rdi + 16], mm0
+ movq [rdi + 24], mm0
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp9_short_inv_walsh4x4_mmx(short *input, short *output)
+global sym(vp9_short_inv_walsh4x4_mmx) PRIVATE
+sym(vp9_short_inv_walsh4x4_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 2
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rax, 3
+ mov rsi, arg(0)
+ mov rdi, arg(1)
+ shl rax, 16
+
+ movq mm0, [rsi + 0] ;ip[0]
+ movq mm1, [rsi + 8] ;ip[4]
+ or rax, 3 ;00030003h
+
+ movq mm2, [rsi + 16] ;ip[8]
+ movq mm3, [rsi + 24] ;ip[12]
+
+ movq mm7, rax
+ movq mm4, mm0
+
+ punpcklwd mm7, mm7 ;0003000300030003h
+ movq mm5, mm1
+
+ paddw mm4, mm3 ;ip[0] + ip[12] aka al
+ paddw mm5, mm2 ;ip[4] + ip[8] aka bl
+
+ movq mm6, mm4 ;temp al
+
+ paddw mm4, mm5 ;al + bl
+ psubw mm6, mm5 ;al - bl
+
+ psubw mm0, mm3 ;ip[0] - ip[12] aka d1
+ psubw mm1, mm2 ;ip[4] - ip[8] aka c1
+
+ movq mm5, mm0 ;temp dl
+
+ paddw mm0, mm1 ;dl + cl
+ psubw mm5, mm1 ;dl - cl
+
+ ; 03 02 01 00
+ ; 13 12 11 10
+ ; 23 22 21 20
+ ; 33 32 31 30
+
+ movq mm3, mm4 ; 03 02 01 00
+ punpcklwd mm4, mm0 ; 11 01 10 00
+ punpckhwd mm3, mm0 ; 13 03 12 02
+
+ movq mm1, mm6 ; 23 22 21 20
+ punpcklwd mm6, mm5 ; 31 21 30 20
+ punpckhwd mm1, mm5 ; 33 23 32 22
+
+ movq mm0, mm4 ; 11 01 10 00
+ movq mm2, mm3 ; 13 03 12 02
+
+ punpckldq mm0, mm6 ; 30 20 10 00 aka ip[0]
+ punpckhdq mm4, mm6 ; 31 21 11 01 aka ip[4]
+
+ punpckldq mm2, mm1 ; 32 22 12 02 aka ip[8]
+ punpckhdq mm3, mm1 ; 33 23 13 03 aka ip[12]
+;~~~~~~~~~~~~~~~~~~~~~
+ movq mm1, mm0
+ movq mm5, mm4
+
+ paddw mm1, mm3 ;ip[0] + ip[12] aka al
+ paddw mm5, mm2 ;ip[4] + ip[8] aka bl
+
+ movq mm6, mm1 ;temp al
+
+ paddw mm1, mm5 ;al + bl
+ psubw mm6, mm5 ;al - bl
+
+ psubw mm0, mm3 ;ip[0] - ip[12] aka d1
+ psubw mm4, mm2 ;ip[4] - ip[8] aka c1
+
+ movq mm5, mm0 ;temp dl
+
+ paddw mm0, mm4 ;dl + cl
+ psubw mm5, mm4 ;dl - cl
+;~~~~~~~~~~~~~~~~~~~~~
+ movq mm3, mm1 ; 03 02 01 00
+ punpcklwd mm1, mm0 ; 11 01 10 00
+ punpckhwd mm3, mm0 ; 13 03 12 02
+
+ movq mm4, mm6 ; 23 22 21 20
+ punpcklwd mm6, mm5 ; 31 21 30 20
+ punpckhwd mm4, mm5 ; 33 23 32 22
+
+ movq mm0, mm1 ; 11 01 10 00
+ movq mm2, mm3 ; 13 03 12 02
+
+ punpckldq mm0, mm6 ; 30 20 10 00 aka ip[0]
+ punpckhdq mm1, mm6 ; 31 21 11 01 aka ip[4]
+
+ punpckldq mm2, mm4 ; 32 22 12 02 aka ip[8]
+ punpckhdq mm3, mm4 ; 33 23 13 03 aka ip[12]
+
+ paddw mm0, mm7
+ paddw mm1, mm7
+ paddw mm2, mm7
+ paddw mm3, mm7
+
+ psraw mm0, 3
+ psraw mm1, 3
+ psraw mm2, 3
+ psraw mm3, 3
+
+ movq [rdi + 0], mm0
+ movq [rdi + 8], mm1
+ movq [rdi + 16], mm2
+ movq [rdi + 24], mm3
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
diff --git a/libvpx/vp9/common/x86/vp9_iwalsh_sse2.asm b/libvpx/vp9/common/x86/vp9_iwalsh_sse2.asm
new file mode 100644
index 0000000..84fa2fe
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_iwalsh_sse2.asm
@@ -0,0 +1,119 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+;void vp9_short_inv_walsh4x4_sse2(short *input, short *output)
+global sym(vp9_short_inv_walsh4x4_sse2) PRIVATE
+sym(vp9_short_inv_walsh4x4_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 2
+ SAVE_XMM 6
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0)
+ mov rdi, arg(1)
+ mov rax, 3
+
+ movdqa xmm0, [rsi + 0] ;ip[4] ip[0]
+ movdqa xmm1, [rsi + 16] ;ip[12] ip[8]
+
+ shl rax, 16
+ or rax, 3 ;00030003h
+
+ pshufd xmm2, xmm1, 4eh ;ip[8] ip[12]
+ movdqa xmm3, xmm0 ;ip[4] ip[0]
+
+ paddw xmm0, xmm2 ;ip[4]+ip[8] ip[0]+ip[12] aka b1 a1
+ psubw xmm3, xmm2 ;ip[4]-ip[8] ip[0]-ip[12] aka c1 d1
+
+ movdqa xmm4, xmm0
+ punpcklqdq xmm0, xmm3 ;d1 a1
+ punpckhqdq xmm4, xmm3 ;c1 b1
+ movd xmm6, eax
+
+ movdqa xmm1, xmm4 ;c1 b1
+ paddw xmm4, xmm0 ;dl+cl a1+b1 aka op[4] op[0]
+ psubw xmm0, xmm1 ;d1-c1 a1-b1 aka op[12] op[8]
+
+;;;temp output
+;; movdqu [rdi + 0], xmm4
+;; movdqu [rdi + 16], xmm3
+
+;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ ; 13 12 11 10 03 02 01 00
+ ;
+ ; 33 32 31 30 23 22 21 20
+ ;
+ movdqa xmm3, xmm4 ; 13 12 11 10 03 02 01 00
+ punpcklwd xmm4, xmm0 ; 23 03 22 02 21 01 20 00
+ punpckhwd xmm3, xmm0 ; 33 13 32 12 31 11 30 10
+ movdqa xmm1, xmm4 ; 23 03 22 02 21 01 20 00
+ punpcklwd xmm4, xmm3 ; 31 21 11 01 30 20 10 00
+ punpckhwd xmm1, xmm3 ; 33 23 13 03 32 22 12 02
+ ;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ pshufd xmm2, xmm1, 4eh ;ip[8] ip[12]
+ movdqa xmm3, xmm4 ;ip[4] ip[0]
+
+ pshufd xmm6, xmm6, 0 ;03 03 03 03 03 03 03 03
+
+ paddw xmm4, xmm2 ;ip[4]+ip[8] ip[0]+ip[12] aka b1 a1
+ psubw xmm3, xmm2 ;ip[4]-ip[8] ip[0]-ip[12] aka c1 d1
+
+ movdqa xmm5, xmm4
+ punpcklqdq xmm4, xmm3 ;d1 a1
+ punpckhqdq xmm5, xmm3 ;c1 b1
+
+ movdqa xmm1, xmm5 ;c1 b1
+ paddw xmm5, xmm4 ;dl+cl a1+b1 aka op[4] op[0]
+ psubw xmm4, xmm1 ;d1-c1 a1-b1 aka op[12] op[8]
+;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ ; 13 12 11 10 03 02 01 00
+ ;
+ ; 33 32 31 30 23 22 21 20
+ ;
+ movdqa xmm0, xmm5 ; 13 12 11 10 03 02 01 00
+ punpcklwd xmm5, xmm4 ; 23 03 22 02 21 01 20 00
+ punpckhwd xmm0, xmm4 ; 33 13 32 12 31 11 30 10
+ movdqa xmm1, xmm5 ; 23 03 22 02 21 01 20 00
+ punpcklwd xmm5, xmm0 ; 31 21 11 01 30 20 10 00
+ punpckhwd xmm1, xmm0 ; 33 23 13 03 32 22 12 02
+;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ paddw xmm5, xmm6
+ paddw xmm1, xmm6
+
+ psraw xmm5, 3
+ psraw xmm1, 3
+
+ movdqa [rdi + 0], xmm5
+ movdqa [rdi + 16], xmm1
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+SECTION_RODATA
+align 16
+x_s1sqr2:
+ times 4 dw 0x8A8C
+align 16
+x_c1sqr2less1:
+ times 4 dw 0x4E7B
+align 16
+fours:
+ times 4 dw 0x0004
diff --git a/libvpx/vp9/common/x86/vp9_loopfilter_intrin_sse2.c b/libvpx/vp9/common/x86/vp9_loopfilter_intrin_sse2.c
new file mode 100644
index 0000000..50f890a
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_loopfilter_intrin_sse2.c
@@ -0,0 +1,1013 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <emmintrin.h> /* SSE2 */
+#include "vp9/common/vp9_loopfilter.h"
+#include "vpx_ports/emmintrin_compat.h"
+
+prototype_loopfilter(vp9_loop_filter_vertical_edge_sse2);
+prototype_loopfilter(vp9_loop_filter_horizontal_edge_sse2);
+
+extern loop_filter_uvfunction vp9_loop_filter_horizontal_edge_uv_sse2;
+extern loop_filter_uvfunction vp9_loop_filter_vertical_edge_uv_sse2;
+
+void vp9_mb_lpf_horizontal_edge_w_sse2(unsigned char *s,
+ int p,
+ const unsigned char *_blimit,
+ const unsigned char *_limit,
+ const unsigned char *_thresh) {
+ DECLARE_ALIGNED(16, unsigned char, flat2_op[7][8]);
+ DECLARE_ALIGNED(16, unsigned char, flat2_oq[7][8]);
+
+ DECLARE_ALIGNED(16, unsigned char, flat_op[3][8]);
+ DECLARE_ALIGNED(16, unsigned char, flat_oq[3][8]);
+
+ DECLARE_ALIGNED(16, unsigned char, ap[8][8]);
+ DECLARE_ALIGNED(16, unsigned char, aq[8][8]);
+
+
+ __m128i mask, hev, flat, flat2;
+ const __m128i zero = _mm_set1_epi16(0);
+ const __m128i one = _mm_set1_epi8(1);
+ __m128i p7, p6, p5;
+ __m128i p4, p3, p2, p1, p0, q0, q1, q2, q3, q4;
+ __m128i q5, q6, q7;
+ int i = 0;
+ const unsigned int extended_thresh = _thresh[0] * 0x01010101u;
+ const unsigned int extended_limit = _limit[0] * 0x01010101u;
+ const unsigned int extended_blimit = _blimit[0] * 0x01010101u;
+ const __m128i thresh =
+ _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_thresh), 0);
+ const __m128i limit =
+ _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_limit), 0);
+ const __m128i blimit =
+ _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_blimit), 0);
+
+ p4 = _mm_loadl_epi64((__m128i *)(s - 5 * p));
+ p3 = _mm_loadl_epi64((__m128i *)(s - 4 * p));
+ p2 = _mm_loadl_epi64((__m128i *)(s - 3 * p));
+ p1 = _mm_loadl_epi64((__m128i *)(s - 2 * p));
+ p0 = _mm_loadl_epi64((__m128i *)(s - 1 * p));
+ q0 = _mm_loadl_epi64((__m128i *)(s - 0 * p));
+ q1 = _mm_loadl_epi64((__m128i *)(s + 1 * p));
+ q2 = _mm_loadl_epi64((__m128i *)(s + 2 * p));
+ q3 = _mm_loadl_epi64((__m128i *)(s + 3 * p));
+ q4 = _mm_loadl_epi64((__m128i *)(s + 4 * p));
+
+ _mm_storel_epi64((__m128i *)ap[4], p4);
+ _mm_storel_epi64((__m128i *)ap[3], p3);
+ _mm_storel_epi64((__m128i *)ap[2], p2);
+ _mm_storel_epi64((__m128i *)ap[1], p1);
+ _mm_storel_epi64((__m128i *)ap[0], p0);
+ _mm_storel_epi64((__m128i *)aq[4], q4);
+ _mm_storel_epi64((__m128i *)aq[3], q3);
+ _mm_storel_epi64((__m128i *)aq[2], q2);
+ _mm_storel_epi64((__m128i *)aq[1], q1);
+ _mm_storel_epi64((__m128i *)aq[0], q0);
+
+
+ {
+ const __m128i abs_p1p0 = _mm_or_si128(_mm_subs_epu8(p1, p0),
+ _mm_subs_epu8(p0, p1));
+ const __m128i abs_q1q0 = _mm_or_si128(_mm_subs_epu8(q1, q0),
+ _mm_subs_epu8(q0, q1));
+ const __m128i fe = _mm_set1_epi8(0xfe);
+ const __m128i ff = _mm_cmpeq_epi8(abs_p1p0, abs_p1p0);
+ __m128i abs_p0q0 = _mm_or_si128(_mm_subs_epu8(p0, q0),
+ _mm_subs_epu8(q0, p0));
+ __m128i abs_p1q1 = _mm_or_si128(_mm_subs_epu8(p1, q1),
+ _mm_subs_epu8(q1, p1));
+ __m128i work;
+ flat = _mm_max_epu8(abs_p1p0, abs_q1q0);
+ hev = _mm_subs_epu8(flat, thresh);
+ hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff);
+
+ abs_p0q0 =_mm_adds_epu8(abs_p0q0, abs_p0q0);
+ abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1);
+ mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit);
+ mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff);
+ // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
+ mask = _mm_max_epu8(flat, mask);
+ // mask |= (abs(p1 - p0) > limit) * -1;
+ // mask |= (abs(q1 - q0) > limit) * -1;
+ work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p2, p1),
+ _mm_subs_epu8(p1, p2)),
+ _mm_or_si128(_mm_subs_epu8(p3, p2),
+ _mm_subs_epu8(p2, p3)));
+ mask = _mm_max_epu8(work, mask);
+ work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(q2, q1),
+ _mm_subs_epu8(q1, q2)),
+ _mm_or_si128(_mm_subs_epu8(q3, q2),
+ _mm_subs_epu8(q2, q3)));
+ mask = _mm_max_epu8(work, mask);
+ mask = _mm_subs_epu8(mask, limit);
+ mask = _mm_cmpeq_epi8(mask, zero);
+ }
+
+ // lp filter
+ {
+ const __m128i t4 = _mm_set1_epi8(4);
+ const __m128i t3 = _mm_set1_epi8(3);
+ const __m128i t80 = _mm_set1_epi8(0x80);
+ const __m128i te0 = _mm_set1_epi8(0xe0);
+ const __m128i t1f = _mm_set1_epi8(0x1f);
+ const __m128i t1 = _mm_set1_epi8(0x1);
+ const __m128i t7f = _mm_set1_epi8(0x7f);
+
+ __m128i ps1 = _mm_xor_si128(p1, t80);
+ __m128i ps0 = _mm_xor_si128(p0, t80);
+ __m128i qs0 = _mm_xor_si128(q0, t80);
+ __m128i qs1 = _mm_xor_si128(q1, t80);
+ __m128i filt;
+ __m128i work_a;
+ __m128i filter1, filter2;
+
+ filt = _mm_and_si128(_mm_subs_epi8(ps1, qs1), hev);
+ work_a = _mm_subs_epi8(qs0, ps0);
+ filt = _mm_adds_epi8(filt, work_a);
+ filt = _mm_adds_epi8(filt, work_a);
+ filt = _mm_adds_epi8(filt, work_a);
+ /* (vp9_filter + 3 * (qs0 - ps0)) & mask */
+ filt = _mm_and_si128(filt, mask);
+
+ filter1 = _mm_adds_epi8(filt, t4);
+ filter2 = _mm_adds_epi8(filt, t3);
+
+ /* Filter1 >> 3 */
+ work_a = _mm_cmpgt_epi8(zero, filter1);
+ filter1 = _mm_srli_epi16(filter1, 3);
+ work_a = _mm_and_si128(work_a, te0);
+ filter1 = _mm_and_si128(filter1, t1f);
+ filter1 = _mm_or_si128(filter1, work_a);
+ qs0 = _mm_xor_si128(_mm_subs_epi8(qs0, filter1), t80);
+
+ /* Filter2 >> 3 */
+ work_a = _mm_cmpgt_epi8(zero, filter2);
+ filter2 = _mm_srli_epi16(filter2, 3);
+ work_a = _mm_and_si128(work_a, te0);
+ filter2 = _mm_and_si128(filter2, t1f);
+ filter2 = _mm_or_si128(filter2, work_a);
+ ps0 = _mm_xor_si128(_mm_adds_epi8(ps0, filter2), t80);
+
+ /* filt >> 1 */
+ filt = _mm_adds_epi8(filter1, t1);
+ work_a = _mm_cmpgt_epi8(zero, filt);
+ filt = _mm_srli_epi16(filt, 1);
+ work_a = _mm_and_si128(work_a, t80);
+ filt = _mm_and_si128(filt, t7f);
+ filt = _mm_or_si128(filt, work_a);
+ filt = _mm_andnot_si128(hev, filt);
+ ps1 = _mm_xor_si128(_mm_adds_epi8(ps1, filt), t80);
+ qs1 = _mm_xor_si128(_mm_subs_epi8(qs1, filt), t80);
+ // loopfilter done
+
+ {
+ __m128i work;
+ work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p2, p0),
+ _mm_subs_epu8(p0, p2)),
+ _mm_or_si128(_mm_subs_epu8(q2, q0),
+ _mm_subs_epu8(q0, q2)));
+ flat = _mm_max_epu8(work, flat);
+ work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p3, p0),
+ _mm_subs_epu8(p0, p3)),
+ _mm_or_si128(_mm_subs_epu8(q3, q0),
+ _mm_subs_epu8(q0, q3)));
+ flat = _mm_max_epu8(work, flat);
+ work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p4, p0),
+ _mm_subs_epu8(p0, p4)),
+ _mm_or_si128(_mm_subs_epu8(q4, q0),
+ _mm_subs_epu8(q0, q4)));
+ flat = _mm_subs_epu8(flat, one);
+ flat = _mm_cmpeq_epi8(flat, zero);
+ flat = _mm_and_si128(flat, mask);
+
+ p5 = _mm_loadl_epi64((__m128i *)(s - 6 * p));
+ q5 = _mm_loadl_epi64((__m128i *)(s + 5 * p));
+ flat2 = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p5, p0),
+ _mm_subs_epu8(p0, p5)),
+ _mm_or_si128(_mm_subs_epu8(q5, q0),
+ _mm_subs_epu8(q0, q5)));
+ _mm_storel_epi64((__m128i *)ap[5], p5);
+ _mm_storel_epi64((__m128i *)aq[5], q5);
+ flat2 = _mm_max_epu8(work, flat2);
+ p6 = _mm_loadl_epi64((__m128i *)(s - 7 * p));
+ q6 = _mm_loadl_epi64((__m128i *)(s + 6 * p));
+ work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p6, p0),
+ _mm_subs_epu8(p0, p6)),
+ _mm_or_si128(_mm_subs_epu8(q6, q0),
+ _mm_subs_epu8(q0, q6)));
+ _mm_storel_epi64((__m128i *)ap[6], p6);
+ _mm_storel_epi64((__m128i *)aq[6], q6);
+ flat2 = _mm_max_epu8(work, flat2);
+
+ p7 = _mm_loadl_epi64((__m128i *)(s - 8 * p));
+ q7 = _mm_loadl_epi64((__m128i *)(s + 7 * p));
+ work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p7, p0),
+ _mm_subs_epu8(p0, p7)),
+ _mm_or_si128(_mm_subs_epu8(q7, q0),
+ _mm_subs_epu8(q0, q7)));
+ _mm_storel_epi64((__m128i *)ap[7], p7);
+ _mm_storel_epi64((__m128i *)aq[7], q7);
+ flat2 = _mm_max_epu8(work, flat2);
+ flat2 = _mm_subs_epu8(flat2, one);
+ flat2 = _mm_cmpeq_epi8(flat2, zero);
+ flat2 = _mm_and_si128(flat2, flat); // flat2 & flat & mask
+ }
+
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ // flat and wide flat calculations
+ {
+ const __m128i eight = _mm_set1_epi16(8);
+ const __m128i four = _mm_set1_epi16(4);
+ {
+ __m128i workp_shft;
+ __m128i a, b, c;
+
+ p7 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(ap[7])), zero);
+ p6 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(ap[6])), zero);
+ p5 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(ap[5])), zero);
+ p4 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(ap[4])), zero);
+ p3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(ap[3])), zero);
+ p2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(ap[2])), zero);
+ p1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(ap[1])), zero);
+ p0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(ap[0])), zero);
+ q0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(aq[0])), zero);
+ q1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(aq[1])), zero);
+ q2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(aq[2])), zero);
+ q3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(aq[3])), zero);
+ q4 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(aq[4])), zero);
+ q5 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(aq[5])), zero);
+ q6 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(aq[6])), zero);
+ q7 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(aq[7])), zero);
+
+ c = _mm_sub_epi16(_mm_slli_epi16(p7, 3), p7); // p7 * 7
+ c = _mm_add_epi16(_mm_slli_epi16(p6, 1), _mm_add_epi16(p4, c));
+
+ b = _mm_add_epi16(_mm_add_epi16(p3, four), _mm_add_epi16(p3, p2));
+ a = _mm_add_epi16(p3, _mm_add_epi16(p2, p1));
+ a = _mm_add_epi16(_mm_add_epi16(p0, q0), a);
+
+ _mm_storel_epi64((__m128i *)&flat_op[2][i*8],
+ _mm_packus_epi16(_mm_srli_epi16(_mm_add_epi16(a, b), 3)
+ , b));
+
+ c = _mm_add_epi16(_mm_add_epi16(p5, eight), c);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_op[6][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ a = _mm_add_epi16(q1, a);
+ b = _mm_add_epi16(_mm_sub_epi16(b, _mm_add_epi16(p3, p2)), p1);
+ _mm_storel_epi64((__m128i *)&flat_op[1][i*8],
+ _mm_packus_epi16(_mm_srli_epi16(_mm_add_epi16(a, b), 3)
+ , b));
+
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p7, p6)), p5);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_op[5][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ a = _mm_add_epi16(q2, a);
+ b = _mm_add_epi16(_mm_sub_epi16(b, _mm_add_epi16(p3, p1)), p0);
+ _mm_storel_epi64((__m128i *)&flat_op[0][i*8],
+ _mm_packus_epi16(_mm_srli_epi16(_mm_add_epi16(a, b), 3)
+ , b));
+
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p7, p5)), p4);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_op[4][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ a = _mm_add_epi16(q3, a);
+ b = _mm_add_epi16(_mm_sub_epi16(b, _mm_add_epi16(p3, p0)), q0);
+ _mm_storel_epi64((__m128i *)&flat_oq[0][i*8],
+ _mm_packus_epi16(_mm_srli_epi16(_mm_add_epi16(a, b), 3)
+ , b));
+
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p7, p4)), p3);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_op[3][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ b = _mm_add_epi16(q3, b);
+ b = _mm_add_epi16(_mm_sub_epi16(b, _mm_add_epi16(p2, q0)), q1);
+ _mm_storel_epi64((__m128i *)&flat_oq[1][i*8],
+ _mm_packus_epi16(_mm_srli_epi16(_mm_add_epi16(a, b), 3)
+ , b));
+
+ c = _mm_add_epi16(q4, c);
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p7, p3)), p2);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_op[2][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ b = _mm_add_epi16(q3, b);
+ b = _mm_add_epi16(_mm_sub_epi16(b, _mm_add_epi16(p1, q1)), q2);
+ _mm_storel_epi64((__m128i *)&flat_oq[2][i*8],
+ _mm_packus_epi16(_mm_srli_epi16(_mm_add_epi16(a, b), 3)
+ , b));
+ a = _mm_add_epi16(q5, a);
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p7, p2)), p1);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_op[1][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ a = _mm_add_epi16(q6, a);
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p7, p1)), p0);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_op[0][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ a = _mm_add_epi16(q7, a);
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p7, p0)), q0);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_oq[0][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ a = _mm_add_epi16(q7, a);
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p6, q0)), q1);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_oq[1][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ a = _mm_add_epi16(q7, a);
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p5, q1)), q2);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_oq[2][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ a = _mm_add_epi16(q7, a);
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p4, q2)), q3);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_oq[3][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ a = _mm_add_epi16(q7, a);
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p3, q3)), q4);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_oq[4][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ a = _mm_add_epi16(q7, a);
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p2, q4)), q5);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_oq[5][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ a = _mm_add_epi16(q7, a);
+ c = _mm_add_epi16(_mm_sub_epi16(c, _mm_add_epi16(p1, q5)), q6);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(a, c), 4);
+ _mm_storel_epi64((__m128i *)&flat2_oq[6][i*8],
+ _mm_packus_epi16(workp_shft, workp_shft));
+ }
+ }
+ // wide flat
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ work_a = _mm_loadl_epi64((__m128i *)ap[2]);
+ p2 = _mm_loadl_epi64((__m128i *)flat_op[2]);
+ work_a = _mm_andnot_si128(flat, work_a);
+ p2 = _mm_and_si128(flat, p2);
+ p2 = _mm_or_si128(work_a, p2);
+ _mm_storel_epi64((__m128i *)flat_op[2], p2);
+
+ p1 = _mm_loadl_epi64((__m128i *)flat_op[1]);
+ work_a = _mm_andnot_si128(flat, ps1);
+ p1 = _mm_and_si128(flat, p1);
+ p1 = _mm_or_si128(work_a, p1);
+ _mm_storel_epi64((__m128i *)flat_op[1], p1);
+
+ p0 = _mm_loadl_epi64((__m128i *)flat_op[0]);
+ work_a = _mm_andnot_si128(flat, ps0);
+ p0 = _mm_and_si128(flat, p0);
+ p0 = _mm_or_si128(work_a, p0);
+ _mm_storel_epi64((__m128i *)flat_op[0], p0);
+
+ q0 = _mm_loadl_epi64((__m128i *)flat_oq[0]);
+ work_a = _mm_andnot_si128(flat, qs0);
+ q0 = _mm_and_si128(flat, q0);
+ q0 = _mm_or_si128(work_a, q0);
+ _mm_storel_epi64((__m128i *)flat_oq[0], q0);
+
+ q1 = _mm_loadl_epi64((__m128i *)flat_oq[1]);
+ work_a = _mm_andnot_si128(flat, qs1);
+ q1 = _mm_and_si128(flat, q1);
+ q1 = _mm_or_si128(work_a, q1);
+ _mm_storel_epi64((__m128i *)flat_oq[1], q1);
+
+ work_a = _mm_loadl_epi64((__m128i *)aq[2]);
+ q2 = _mm_loadl_epi64((__m128i *)flat_oq[2]);
+ work_a = _mm_andnot_si128(flat, work_a);
+ q2 = _mm_and_si128(flat, q2);
+ q2 = _mm_or_si128(work_a, q2);
+ _mm_storel_epi64((__m128i *)flat_oq[2], q2);
+
+ // write out op6 - op3
+ {
+ unsigned char *dst = (s - 7 * p);
+ for (i = 6; i > 2; i--) {
+ __m128i flat2_output;
+ work_a = _mm_loadl_epi64((__m128i *)ap[i]);
+ flat2_output = _mm_loadl_epi64((__m128i *)flat2_op[i]);
+ work_a = _mm_andnot_si128(flat2, work_a);
+ flat2_output = _mm_and_si128(flat2, flat2_output);
+ work_a = _mm_or_si128(work_a, flat2_output);
+ _mm_storel_epi64((__m128i *)dst, work_a);
+ dst += p;
+ }
+ }
+
+ work_a = _mm_loadl_epi64((__m128i *)flat_op[2]);
+ p2 = _mm_loadl_epi64((__m128i *)flat2_op[2]);
+ work_a = _mm_andnot_si128(flat2, work_a);
+ p2 = _mm_and_si128(flat2, p2);
+ p2 = _mm_or_si128(work_a, p2);
+ _mm_storel_epi64((__m128i *)(s - 3 * p), p2);
+
+ work_a = _mm_loadl_epi64((__m128i *)flat_op[1]);
+ p1 = _mm_loadl_epi64((__m128i *)flat2_op[1]);
+ work_a = _mm_andnot_si128(flat2, work_a);
+ p1 = _mm_and_si128(flat2, p1);
+ p1 = _mm_or_si128(work_a, p1);
+ _mm_storel_epi64((__m128i *)(s - 2 * p), p1);
+
+ work_a = _mm_loadl_epi64((__m128i *)flat_op[0]);
+ p0 = _mm_loadl_epi64((__m128i *)flat2_op[0]);
+ work_a = _mm_andnot_si128(flat2, work_a);
+ p0 = _mm_and_si128(flat2, p0);
+ p0 = _mm_or_si128(work_a, p0);
+ _mm_storel_epi64((__m128i *)(s - 1 * p), p0);
+
+ work_a = _mm_loadl_epi64((__m128i *)flat_oq[0]);
+ q0 = _mm_loadl_epi64((__m128i *)flat2_oq[0]);
+ work_a = _mm_andnot_si128(flat2, work_a);
+ q0 = _mm_and_si128(flat2, q0);
+ q0 = _mm_or_si128(work_a, q0);
+ _mm_storel_epi64((__m128i *)(s - 0 * p), q0);
+
+ work_a = _mm_loadl_epi64((__m128i *)flat_oq[1]);
+ q1 = _mm_loadl_epi64((__m128i *)flat2_oq[1]);
+ work_a = _mm_andnot_si128(flat2, work_a);
+ q1 = _mm_and_si128(flat2, q1);
+ q1 = _mm_or_si128(work_a, q1);
+ _mm_storel_epi64((__m128i *)(s + 1 * p), q1);
+
+ work_a = _mm_loadl_epi64((__m128i *)flat_oq[2]);
+ q2 = _mm_loadl_epi64((__m128i *)flat2_oq[2]);
+ work_a = _mm_andnot_si128(flat2, work_a);
+ q2 = _mm_and_si128(flat2, q2);
+ q2 = _mm_or_si128(work_a, q2);
+ _mm_storel_epi64((__m128i *)(s + 2 * p), q2);
+
+ // write out oq3 - oq7
+ {
+ unsigned char *dst = (s + 3 * p);
+ for (i = 3; i < 7; i++) {
+ __m128i flat2_output;
+ work_a = _mm_loadl_epi64((__m128i *)aq[i]);
+ flat2_output = _mm_loadl_epi64((__m128i *)flat2_oq[i]);
+ work_a = _mm_andnot_si128(flat2, work_a);
+ flat2_output = _mm_and_si128(flat2, flat2_output);
+ work_a = _mm_or_si128(work_a, flat2_output);
+ _mm_storel_epi64((__m128i *)dst, work_a);
+ dst += p;
+ }
+ }
+ }
+}
+
+void vp9_mbloop_filter_horizontal_edge_sse2(unsigned char *s,
+ int p,
+ const unsigned char *_blimit,
+ const unsigned char *_limit,
+ const unsigned char *_thresh,
+ int count) {
+ DECLARE_ALIGNED(16, unsigned char, flat_op2[16]);
+ DECLARE_ALIGNED(16, unsigned char, flat_op1[16]);
+ DECLARE_ALIGNED(16, unsigned char, flat_op0[16]);
+ DECLARE_ALIGNED(16, unsigned char, flat_oq2[16]);
+ DECLARE_ALIGNED(16, unsigned char, flat_oq1[16]);
+ DECLARE_ALIGNED(16, unsigned char, flat_oq0[16]);
+ __m128i mask, hev, flat;
+ const __m128i zero = _mm_set1_epi16(0);
+ __m128i p3, p2, p1, p0, q0, q1, q2, q3;
+ const unsigned int extended_thresh = _thresh[0] * 0x01010101u;
+ const unsigned int extended_limit = _limit[0] * 0x01010101u;
+ const unsigned int extended_blimit = _blimit[0] * 0x01010101u;
+ const __m128i thresh =
+ _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_thresh), 0);
+ const __m128i limit =
+ _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_limit), 0);
+ const __m128i blimit =
+ _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_blimit), 0);
+
+ (void)count;
+ p3 = _mm_loadl_epi64((__m128i *)(s - 4 * p));
+ p2 = _mm_loadl_epi64((__m128i *)(s - 3 * p));
+ p1 = _mm_loadl_epi64((__m128i *)(s - 2 * p));
+ p0 = _mm_loadl_epi64((__m128i *)(s - 1 * p));
+ q0 = _mm_loadl_epi64((__m128i *)(s - 0 * p));
+ q1 = _mm_loadl_epi64((__m128i *)(s + 1 * p));
+ q2 = _mm_loadl_epi64((__m128i *)(s + 2 * p));
+ q3 = _mm_loadl_epi64((__m128i *)(s + 3 * p));
+ {
+ const __m128i abs_p1p0 = _mm_or_si128(_mm_subs_epu8(p1, p0),
+ _mm_subs_epu8(p0, p1));
+ const __m128i abs_q1q0 = _mm_or_si128(_mm_subs_epu8(q1, q0),
+ _mm_subs_epu8(q0, q1));
+ const __m128i one = _mm_set1_epi8(1);
+ const __m128i fe = _mm_set1_epi8(0xfe);
+ const __m128i ff = _mm_cmpeq_epi8(abs_p1p0, abs_p1p0);
+ __m128i abs_p0q0 = _mm_or_si128(_mm_subs_epu8(p0, q0),
+ _mm_subs_epu8(q0, p0));
+ __m128i abs_p1q1 = _mm_or_si128(_mm_subs_epu8(p1, q1),
+ _mm_subs_epu8(q1, p1));
+ __m128i work;
+ flat = _mm_max_epu8(abs_p1p0, abs_q1q0);
+ hev = _mm_subs_epu8(flat, thresh);
+ hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff);
+
+ abs_p0q0 =_mm_adds_epu8(abs_p0q0, abs_p0q0);
+ abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1);
+ mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit);
+ mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff);
+ // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
+ mask = _mm_max_epu8(flat, mask);
+ // mask |= (abs(p1 - p0) > limit) * -1;
+ // mask |= (abs(q1 - q0) > limit) * -1;
+ work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p2, p1),
+ _mm_subs_epu8(p1, p2)),
+ _mm_or_si128(_mm_subs_epu8(p3, p2),
+ _mm_subs_epu8(p2, p3)));
+ mask = _mm_max_epu8(work, mask);
+ work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(q2, q1),
+ _mm_subs_epu8(q1, q2)),
+ _mm_or_si128(_mm_subs_epu8(q3, q2),
+ _mm_subs_epu8(q2, q3)));
+ mask = _mm_max_epu8(work, mask);
+ mask = _mm_subs_epu8(mask, limit);
+ mask = _mm_cmpeq_epi8(mask, zero);
+
+ work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p2, p0),
+ _mm_subs_epu8(p0, p2)),
+ _mm_or_si128(_mm_subs_epu8(q2, q0),
+ _mm_subs_epu8(q0, q2)));
+ flat = _mm_max_epu8(work, flat);
+ work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p3, p0),
+ _mm_subs_epu8(p0, p3)),
+ _mm_or_si128(_mm_subs_epu8(q3, q0),
+ _mm_subs_epu8(q0, q3)));
+ flat = _mm_max_epu8(work, flat);
+ flat = _mm_subs_epu8(flat, one);
+ flat = _mm_cmpeq_epi8(flat, zero);
+ flat = _mm_and_si128(flat, mask);
+ }
+ {
+ const __m128i four = _mm_set1_epi16(4);
+ unsigned char *src = s;
+ {
+ __m128i workp_a, workp_b, workp_shft;
+ p3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 4 * p)), zero);
+ p2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 3 * p)), zero);
+ p1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 2 * p)), zero);
+ p0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 1 * p)), zero);
+ q0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 0 * p)), zero);
+ q1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 1 * p)), zero);
+ q2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 2 * p)), zero);
+ q3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 3 * p)), zero);
+
+ workp_a = _mm_add_epi16(_mm_add_epi16(p3, p3), _mm_add_epi16(p2, p1));
+ workp_a = _mm_add_epi16(_mm_add_epi16(workp_a, four), p0);
+ workp_b = _mm_add_epi16(_mm_add_epi16(q0, p2), p3);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
+ _mm_storel_epi64((__m128i *)&flat_op2[0],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ workp_b = _mm_add_epi16(_mm_add_epi16(q0, q1), p1);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
+ _mm_storel_epi64((__m128i *)&flat_op1[0],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p3), q2);
+ workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, p1), p0);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
+ _mm_storel_epi64((__m128i *)&flat_op0[0],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p3), q3);
+ workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, p0), q0);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
+ _mm_storel_epi64((__m128i *)&flat_oq0[0],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p2), q3);
+ workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, q0), q1);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
+ _mm_storel_epi64((__m128i *)&flat_oq1[0],
+ _mm_packus_epi16(workp_shft, workp_shft));
+
+ workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p1), q3);
+ workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, q1), q2);
+ workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
+ _mm_storel_epi64((__m128i *)&flat_oq2[0],
+ _mm_packus_epi16(workp_shft, workp_shft));
+ }
+ }
+ // lp filter
+ {
+ const __m128i t4 = _mm_set1_epi8(4);
+ const __m128i t3 = _mm_set1_epi8(3);
+ const __m128i t80 = _mm_set1_epi8(0x80);
+ const __m128i te0 = _mm_set1_epi8(0xe0);
+ const __m128i t1f = _mm_set1_epi8(0x1f);
+ const __m128i t1 = _mm_set1_epi8(0x1);
+ const __m128i t7f = _mm_set1_epi8(0x7f);
+
+ const __m128i ps1 = _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s - 2 * p)),
+ t80);
+ const __m128i ps0 = _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s - 1 * p)),
+ t80);
+ const __m128i qs0 = _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s + 0 * p)),
+ t80);
+ const __m128i qs1 = _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s + 1 * p)),
+ t80);
+ __m128i filt;
+ __m128i work_a;
+ __m128i filter1, filter2;
+
+ filt = _mm_and_si128(_mm_subs_epi8(ps1, qs1), hev);
+ work_a = _mm_subs_epi8(qs0, ps0);
+ filt = _mm_adds_epi8(filt, work_a);
+ filt = _mm_adds_epi8(filt, work_a);
+ filt = _mm_adds_epi8(filt, work_a);
+ /* (vp9_filter + 3 * (qs0 - ps0)) & mask */
+ filt = _mm_and_si128(filt, mask);
+
+ filter1 = _mm_adds_epi8(filt, t4);
+ filter2 = _mm_adds_epi8(filt, t3);
+
+ /* Filter1 >> 3 */
+ work_a = _mm_cmpgt_epi8(zero, filter1);
+ filter1 = _mm_srli_epi16(filter1, 3);
+ work_a = _mm_and_si128(work_a, te0);
+ filter1 = _mm_and_si128(filter1, t1f);
+ filter1 = _mm_or_si128(filter1, work_a);
+
+ /* Filter2 >> 3 */
+ work_a = _mm_cmpgt_epi8(zero, filter2);
+ filter2 = _mm_srli_epi16(filter2, 3);
+ work_a = _mm_and_si128(work_a, te0);
+ filter2 = _mm_and_si128(filter2, t1f);
+ filter2 = _mm_or_si128(filter2, work_a);
+
+ /* filt >> 1 */
+ filt = _mm_adds_epi8(filter1, t1);
+ work_a = _mm_cmpgt_epi8(zero, filt);
+ filt = _mm_srli_epi16(filt, 1);
+ work_a = _mm_and_si128(work_a, t80);
+ filt = _mm_and_si128(filt, t7f);
+ filt = _mm_or_si128(filt, work_a);
+
+ filt = _mm_andnot_si128(hev, filt);
+
+ work_a = _mm_xor_si128(_mm_subs_epi8(qs0, filter1), t80);
+ q0 = _mm_loadl_epi64((__m128i *)flat_oq0);
+ work_a = _mm_andnot_si128(flat, work_a);
+ q0 = _mm_and_si128(flat, q0);
+ q0 = _mm_or_si128(work_a, q0);
+
+ work_a = _mm_xor_si128(_mm_subs_epi8(qs1, filt), t80);
+ q1 = _mm_loadl_epi64((__m128i *)flat_oq1);
+ work_a = _mm_andnot_si128(flat, work_a);
+ q1 = _mm_and_si128(flat, q1);
+ q1 = _mm_or_si128(work_a, q1);
+
+ work_a = _mm_loadu_si128((__m128i *)(s + 2 * p));
+ q2 = _mm_loadl_epi64((__m128i *)flat_oq2);
+ work_a = _mm_andnot_si128(flat, work_a);
+ q2 = _mm_and_si128(flat, q2);
+ q2 = _mm_or_si128(work_a, q2);
+
+ work_a = _mm_xor_si128(_mm_adds_epi8(ps0, filter2), t80);
+ p0 = _mm_loadl_epi64((__m128i *)flat_op0);
+ work_a = _mm_andnot_si128(flat, work_a);
+ p0 = _mm_and_si128(flat, p0);
+ p0 = _mm_or_si128(work_a, p0);
+
+ work_a = _mm_xor_si128(_mm_adds_epi8(ps1, filt), t80);
+ p1 = _mm_loadl_epi64((__m128i *)flat_op1);
+ work_a = _mm_andnot_si128(flat, work_a);
+ p1 = _mm_and_si128(flat, p1);
+ p1 = _mm_or_si128(work_a, p1);
+
+ work_a = _mm_loadu_si128((__m128i *)(s - 3 * p));
+ p2 = _mm_loadl_epi64((__m128i *)flat_op2);
+ work_a = _mm_andnot_si128(flat, work_a);
+ p2 = _mm_and_si128(flat, p2);
+ p2 = _mm_or_si128(work_a, p2);
+
+ _mm_storel_epi64((__m128i *)(s - 3 * p), p2);
+ _mm_storel_epi64((__m128i *)(s - 2 * p), p1);
+ _mm_storel_epi64((__m128i *)(s - 1 * p), p0);
+ _mm_storel_epi64((__m128i *)(s + 0 * p), q0);
+ _mm_storel_epi64((__m128i *)(s + 1 * p), q1);
+ _mm_storel_epi64((__m128i *)(s + 2 * p), q2);
+ }
+}
+
+void vp9_mbloop_filter_horizontal_edge_uv_sse2(unsigned char *u,
+ int p,
+ const unsigned char *_blimit,
+ const unsigned char *_limit,
+ const unsigned char *_thresh,
+ unsigned char *v) {
+ DECLARE_ALIGNED_ARRAY(16, unsigned char, src, 160);
+
+ /* Read source */
+ const __m128i p4 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(u - 5 * p)),
+ _mm_loadl_epi64((__m128i *)(v - 5 * p)));
+ const __m128i p3 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(u - 4 * p)),
+ _mm_loadl_epi64((__m128i *)(v - 4 * p)));
+ const __m128i p2 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(u - 3 * p)),
+ _mm_loadl_epi64((__m128i *)(v - 3 * p)));
+ const __m128i p1 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(u - 2 * p)),
+ _mm_loadl_epi64((__m128i *)(v - 2 * p)));
+ const __m128i p0 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(u - 1 * p)),
+ _mm_loadl_epi64((__m128i *)(v - 1 * p)));
+ const __m128i q0 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(u)),
+ _mm_loadl_epi64((__m128i *)(v)));
+ const __m128i q1 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(u + 1 * p)),
+ _mm_loadl_epi64((__m128i *)(v + 1 * p)));
+ const __m128i q2 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(u + 2 * p)),
+ _mm_loadl_epi64((__m128i *)(v + 2 * p)));
+ const __m128i q3 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(u + 3 * p)),
+ _mm_loadl_epi64((__m128i *)(v + 3 * p)));
+ const __m128i q4 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(u + 4 * p)),
+ _mm_loadl_epi64((__m128i *)(v + 4 * p)));
+
+ _mm_store_si128((__m128i *)(src), p4);
+ _mm_store_si128((__m128i *)(src + 16), p3);
+ _mm_store_si128((__m128i *)(src + 32), p2);
+ _mm_store_si128((__m128i *)(src + 48), p1);
+ _mm_store_si128((__m128i *)(src + 64), p0);
+ _mm_store_si128((__m128i *)(src + 80), q0);
+ _mm_store_si128((__m128i *)(src + 96), q1);
+ _mm_store_si128((__m128i *)(src + 112), q2);
+ _mm_store_si128((__m128i *)(src + 128), q3);
+ _mm_store_si128((__m128i *)(src + 144), q4);
+
+ /* Loop filtering */
+ vp9_mbloop_filter_horizontal_edge_sse2(src + 80, 16, _blimit, _limit,
+ _thresh, 1);
+
+ /* Store result */
+ _mm_storel_epi64((__m128i *)(u - 3 * p),
+ _mm_loadl_epi64((__m128i *)(src + 32)));
+ _mm_storel_epi64((__m128i *)(u - 2 * p),
+ _mm_loadl_epi64((__m128i *)(src + 48)));
+ _mm_storel_epi64((__m128i *)(u - p),
+ _mm_loadl_epi64((__m128i *)(src + 64)));
+ _mm_storel_epi64((__m128i *)u,
+ _mm_loadl_epi64((__m128i *)(src + 80)));
+ _mm_storel_epi64((__m128i *)(u + p),
+ _mm_loadl_epi64((__m128i *)(src + 96)));
+ _mm_storel_epi64((__m128i *)(u + 2 * p),
+ _mm_loadl_epi64((__m128i *)(src + 112)));
+
+ _mm_storel_epi64((__m128i *)(v - 3 * p),
+ _mm_loadl_epi64((__m128i *)(src + 40)));
+ _mm_storel_epi64((__m128i *)(v - 2 * p),
+ _mm_loadl_epi64((__m128i *)(src + 56)));
+ _mm_storel_epi64((__m128i *)(v - p),
+ _mm_loadl_epi64((__m128i *)(src + 72)));
+ _mm_storel_epi64((__m128i *)v,
+ _mm_loadl_epi64((__m128i *)(src + 88)));
+ _mm_storel_epi64((__m128i *)(v + p),
+ _mm_loadl_epi64((__m128i *)(src + 104)));
+ _mm_storel_epi64((__m128i *)(v + 2 * p),
+ _mm_loadl_epi64((__m128i *)(src + 120)));
+}
+
+static INLINE void transpose8x16(unsigned char *in0, unsigned char *in1,
+ int in_p, unsigned char *out, int out_p) {
+ __m128i x0, x1, x2, x3, x4, x5, x6, x7;
+ __m128i x8, x9, x10, x11, x12, x13, x14, x15;
+
+ /* Read in 16 lines */
+ x0 = _mm_loadl_epi64((__m128i *)in0);
+ x8 = _mm_loadl_epi64((__m128i *)in1);
+ x1 = _mm_loadl_epi64((__m128i *)(in0 + in_p));
+ x9 = _mm_loadl_epi64((__m128i *)(in1 + in_p));
+ x2 = _mm_loadl_epi64((__m128i *)(in0 + 2 * in_p));
+ x10 = _mm_loadl_epi64((__m128i *)(in1 + 2 * in_p));
+ x3 = _mm_loadl_epi64((__m128i *)(in0 + 3*in_p));
+ x11 = _mm_loadl_epi64((__m128i *)(in1 + 3*in_p));
+ x4 = _mm_loadl_epi64((__m128i *)(in0 + 4*in_p));
+ x12 = _mm_loadl_epi64((__m128i *)(in1 + 4*in_p));
+ x5 = _mm_loadl_epi64((__m128i *)(in0 + 5*in_p));
+ x13 = _mm_loadl_epi64((__m128i *)(in1 + 5*in_p));
+ x6 = _mm_loadl_epi64((__m128i *)(in0 + 6*in_p));
+ x14 = _mm_loadl_epi64((__m128i *)(in1 + 6*in_p));
+ x7 = _mm_loadl_epi64((__m128i *)(in0 + 7*in_p));
+ x15 = _mm_loadl_epi64((__m128i *)(in1 + 7*in_p));
+
+ x0 = _mm_unpacklo_epi8(x0, x1);
+ x1 = _mm_unpacklo_epi8(x2, x3);
+ x2 = _mm_unpacklo_epi8(x4, x5);
+ x3 = _mm_unpacklo_epi8(x6, x7);
+
+ x8 = _mm_unpacklo_epi8(x8, x9);
+ x9 = _mm_unpacklo_epi8(x10, x11);
+ x10 = _mm_unpacklo_epi8(x12, x13);
+ x11 = _mm_unpacklo_epi8(x14, x15);
+
+ x4 = _mm_unpacklo_epi16(x0, x1);
+ x5 = _mm_unpacklo_epi16(x2, x3);
+ x12 = _mm_unpacklo_epi16(x8, x9);
+ x13 = _mm_unpacklo_epi16(x10, x11);
+
+ x6 = _mm_unpacklo_epi32(x4, x5);
+ x7 = _mm_unpackhi_epi32(x4, x5);
+ x14 = _mm_unpacklo_epi32(x12, x13);
+ x15 = _mm_unpackhi_epi32(x12, x13);
+
+ /* Store first 4-line result */
+ _mm_storeu_si128((__m128i *)out, _mm_unpacklo_epi64(x6, x14));
+ _mm_storeu_si128((__m128i *)(out + out_p), _mm_unpackhi_epi64(x6, x14));
+ _mm_storeu_si128((__m128i *)(out + 2 * out_p), _mm_unpacklo_epi64(x7, x15));
+ _mm_storeu_si128((__m128i *)(out + 3 * out_p), _mm_unpackhi_epi64(x7, x15));
+
+ x4 = _mm_unpackhi_epi16(x0, x1);
+ x5 = _mm_unpackhi_epi16(x2, x3);
+ x12 = _mm_unpackhi_epi16(x8, x9);
+ x13 = _mm_unpackhi_epi16(x10, x11);
+
+ x6 = _mm_unpacklo_epi32(x4, x5);
+ x7 = _mm_unpackhi_epi32(x4, x5);
+ x14 = _mm_unpacklo_epi32(x12, x13);
+ x15 = _mm_unpackhi_epi32(x12, x13);
+
+ /* Store second 4-line result */
+ _mm_storeu_si128((__m128i *)(out + 4 * out_p), _mm_unpacklo_epi64(x6, x14));
+ _mm_storeu_si128((__m128i *)(out + 5 * out_p), _mm_unpackhi_epi64(x6, x14));
+ _mm_storeu_si128((__m128i *)(out + 6 * out_p), _mm_unpacklo_epi64(x7, x15));
+ _mm_storeu_si128((__m128i *)(out + 7 * out_p), _mm_unpackhi_epi64(x7, x15));
+}
+
+static INLINE void transpose(unsigned char *src[], int in_p,
+ unsigned char *dst[], int out_p,
+ int num_8x8_to_transpose) {
+ int idx8x8 = 0;
+ __m128i x0, x1, x2, x3, x4, x5, x6, x7;
+ do {
+ unsigned char *in = src[idx8x8];
+ unsigned char *out = dst[idx8x8];
+
+ x0 = _mm_loadl_epi64((__m128i *)(in + 0*in_p)); // 00 01 02 03 04 05 06 07
+ x1 = _mm_loadl_epi64((__m128i *)(in + 1*in_p)); // 10 11 12 13 14 15 16 17
+ x2 = _mm_loadl_epi64((__m128i *)(in + 2*in_p)); // 20 21 22 23 24 25 26 27
+ x3 = _mm_loadl_epi64((__m128i *)(in + 3*in_p)); // 30 31 32 33 34 35 36 37
+ x4 = _mm_loadl_epi64((__m128i *)(in + 4*in_p)); // 40 41 42 43 44 45 46 47
+ x5 = _mm_loadl_epi64((__m128i *)(in + 5*in_p)); // 50 51 52 53 54 55 56 57
+ x6 = _mm_loadl_epi64((__m128i *)(in + 6*in_p)); // 60 61 62 63 64 65 66 67
+ x7 = _mm_loadl_epi64((__m128i *)(in + 7*in_p)); // 70 71 72 73 74 75 76 77
+ // 00 10 01 11 02 12 03 13 04 14 05 15 06 16 07 17
+ x0 = _mm_unpacklo_epi8(x0, x1);
+ // 20 30 21 31 22 32 23 33 24 34 25 35 26 36 27 37
+ x1 = _mm_unpacklo_epi8(x2, x3);
+ // 40 50 41 51 42 52 43 53 44 54 45 55 46 56 47 57
+ x2 = _mm_unpacklo_epi8(x4, x5);
+ // 60 70 61 71 62 72 63 73 64 74 65 75 66 76 67 77
+ x3 = _mm_unpacklo_epi8(x6, x7);
+ // 00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33
+ x4 = _mm_unpacklo_epi16(x0, x1);
+ // 40 50 60 70 41 51 61 71 42 52 62 72 43 53 63 73
+ x5 = _mm_unpacklo_epi16(x2, x3);
+ // 00 10 20 30 40 50 60 70 01 11 21 31 41 51 61 71
+ x6 = _mm_unpacklo_epi32(x4, x5);
+ // 02 12 22 32 42 52 62 72 03 13 23 33 43 53 63 73
+ x7 = _mm_unpackhi_epi32(x4, x5);
+
+ _mm_storel_pd((double *)(out + 0*out_p),
+ _mm_castsi128_pd(x6)); // 00 10 20 30 40 50 60 70
+ _mm_storeh_pd((double *)(out + 1*out_p),
+ _mm_castsi128_pd(x6)); // 01 11 21 31 41 51 61 71
+ _mm_storel_pd((double *)(out + 2*out_p),
+ _mm_castsi128_pd(x7)); // 02 12 22 32 42 52 62 72
+ _mm_storeh_pd((double *)(out + 3*out_p),
+ _mm_castsi128_pd(x7)); // 03 13 23 33 43 53 63 73
+
+ // 04 14 24 34 05 15 25 35 06 16 26 36 07 17 27 37
+ x4 = _mm_unpackhi_epi16(x0, x1);
+ // 44 54 64 74 45 55 65 75 46 56 66 76 47 57 67 77
+ x5 = _mm_unpackhi_epi16(x2, x3);
+ // 04 14 24 34 44 54 64 74 05 15 25 35 45 55 65 75
+ x6 = _mm_unpacklo_epi32(x4, x5);
+ // 06 16 26 36 46 56 66 76 07 17 27 37 47 57 67 77
+ x7 = _mm_unpackhi_epi32(x4, x5);
+
+ _mm_storel_pd((double *)(out + 4*out_p),
+ _mm_castsi128_pd(x6)); // 04 14 24 34 44 54 64 74
+ _mm_storeh_pd((double *)(out + 5*out_p),
+ _mm_castsi128_pd(x6)); // 05 15 25 35 45 55 65 75
+ _mm_storel_pd((double *)(out + 6*out_p),
+ _mm_castsi128_pd(x7)); // 06 16 26 36 46 56 66 76
+ _mm_storeh_pd((double *)(out + 7*out_p),
+ _mm_castsi128_pd(x7)); // 07 17 27 37 47 57 67 77
+ } while (++idx8x8 < num_8x8_to_transpose);
+}
+
+void vp9_mbloop_filter_vertical_edge_sse2(unsigned char *s,
+ int p,
+ const unsigned char *blimit,
+ const unsigned char *limit,
+ const unsigned char *thresh,
+ int count) {
+ DECLARE_ALIGNED_ARRAY(16, unsigned char, t_dst, 256);
+ unsigned char *src[2];
+ unsigned char *dst[2];
+
+ (void)count;
+ /* Transpose 16x16 */
+ transpose8x16(s - 8, s - 8 + p * 8, p, t_dst, 16);
+ transpose8x16(s, s + p * 8, p, t_dst + 16 * 8, 16);
+
+ /* Loop filtering */
+ vp9_mbloop_filter_horizontal_edge_sse2(t_dst + 8 * 16, 16, blimit, limit,
+ thresh, 1);
+ src[0] = t_dst + 3 * 16;
+ src[1] = t_dst + 3 * 16 + 8;
+
+ dst[0] = s - 5;
+ dst[1] = s - 5 + p * 8;
+
+ /* Transpose 16x8 */
+ transpose(src, 16, dst, p, 2);
+}
+
+void vp9_mb_lpf_vertical_edge_w_sse2(unsigned char *s,
+ int p,
+ const unsigned char *blimit,
+ const unsigned char *limit,
+ const unsigned char *thresh) {
+ DECLARE_ALIGNED_ARRAY(16, unsigned char, t_dst, 256);
+ unsigned char *src[4];
+ unsigned char *dst[4];
+
+ dst[0] = t_dst;
+ dst[1] = t_dst + 8 * 16;
+
+ src[0] = s - 8;
+ src[1] = s - 8 + 8;
+
+ /* Transpose 16x16 */
+ transpose(src, p, dst, 16, 2);
+
+ /* Loop filtering */
+ vp9_mb_lpf_horizontal_edge_w_sse2(t_dst + 8 * 16, 16, blimit, limit,
+ thresh);
+
+ src[0] = t_dst;
+ src[1] = t_dst + 8 * 16;
+
+ dst[0] = s - 8;
+ dst[1] = s - 8 + 8;
+
+ transpose(src, 16, dst, p, 2);
+}
+
+
+void vp9_mbloop_filter_vertical_edge_uv_sse2(unsigned char *u,
+ int p,
+ const unsigned char *blimit,
+ const unsigned char *limit,
+ const unsigned char *thresh,
+ unsigned char *v) {
+ DECLARE_ALIGNED_ARRAY(16, unsigned char, t_dst, 256);
+ unsigned char *src[2];
+ unsigned char *dst[2];
+
+ /* Transpose 16x16 */
+ transpose8x16(u - 8, v - 8, p, t_dst, 16);
+ transpose8x16(u, v, p, t_dst + 16 * 8, 16);
+
+ /* Loop filtering */
+ vp9_mbloop_filter_horizontal_edge_sse2(t_dst + 8 * 16, 16, blimit, limit,
+ thresh, 1);
+
+ src[0] = t_dst + 3 * 16;
+ src[1] = t_dst + 3 * 16 + 8;
+
+ dst[0] = u - 5;
+ dst[1] = v - 5;
+
+ /* Transpose 16x8 */
+ transpose(src, 16, dst, p, 2);
+}
diff --git a/libvpx/vp9/common/x86/vp9_loopfilter_mmx.asm b/libvpx/vp9/common/x86/vp9_loopfilter_mmx.asm
new file mode 100644
index 0000000..4ebb51b
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_loopfilter_mmx.asm
@@ -0,0 +1,626 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+
+;void vp9_loop_filter_horizontal_edge_mmx
+;(
+; unsigned char *src_ptr,
+; int src_pixel_step,
+; const char *blimit,
+; const char *limit,
+; const char *thresh,
+; int count
+;)
+global sym(vp9_loop_filter_horizontal_edge_mmx) PRIVATE
+sym(vp9_loop_filter_horizontal_edge_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 32 ; reserve 32 bytes
+ %define t0 [rsp + 0] ;__declspec(align(16)) char t0[8];
+ %define t1 [rsp + 16] ;__declspec(align(16)) char t1[8];
+
+ mov rsi, arg(0) ;src_ptr
+ movsxd rax, dword ptr arg(1) ;src_pixel_step ; destination pitch?
+
+ movsxd rcx, dword ptr arg(5) ;count
+.next8_h:
+ mov rdx, arg(3) ;limit
+ movq mm7, [rdx]
+ mov rdi, rsi ; rdi points to row +1 for indirect addressing
+ add rdi, rax
+
+ ; calculate breakout conditions
+ movq mm2, [rdi+2*rax] ; q3
+ movq mm1, [rsi+2*rax] ; q2
+ movq mm6, mm1 ; q2
+ psubusb mm1, mm2 ; q2-=q3
+ psubusb mm2, mm6 ; q3-=q2
+ por mm1, mm2 ; abs(q3-q2)
+ psubusb mm1, mm7 ;
+
+
+ movq mm4, [rsi+rax] ; q1
+ movq mm3, mm4 ; q1
+ psubusb mm4, mm6 ; q1-=q2
+ psubusb mm6, mm3 ; q2-=q1
+ por mm4, mm6 ; abs(q2-q1)
+
+ psubusb mm4, mm7
+ por mm1, mm4
+
+ movq mm4, [rsi] ; q0
+ movq mm0, mm4 ; q0
+ psubusb mm4, mm3 ; q0-=q1
+ psubusb mm3, mm0 ; q1-=q0
+ por mm4, mm3 ; abs(q0-q1)
+ movq t0, mm4 ; save to t0
+ psubusb mm4, mm7
+ por mm1, mm4
+
+
+ neg rax ; negate pitch to deal with above border
+
+ movq mm2, [rsi+4*rax] ; p3
+ movq mm4, [rdi+4*rax] ; p2
+ movq mm5, mm4 ; p2
+ psubusb mm4, mm2 ; p2-=p3
+ psubusb mm2, mm5 ; p3-=p2
+ por mm4, mm2 ; abs(p3 - p2)
+ psubusb mm4, mm7
+ por mm1, mm4
+
+
+ movq mm4, [rsi+2*rax] ; p1
+ movq mm3, mm4 ; p1
+ psubusb mm4, mm5 ; p1-=p2
+ psubusb mm5, mm3 ; p2-=p1
+ por mm4, mm5 ; abs(p2 - p1)
+ psubusb mm4, mm7
+ por mm1, mm4
+
+ movq mm2, mm3 ; p1
+
+ movq mm4, [rsi+rax] ; p0
+ movq mm5, mm4 ; p0
+ psubusb mm4, mm3 ; p0-=p1
+ psubusb mm3, mm5 ; p1-=p0
+ por mm4, mm3 ; abs(p1 - p0)
+ movq t1, mm4 ; save to t1
+ psubusb mm4, mm7
+ por mm1, mm4
+
+ movq mm3, [rdi] ; q1
+ movq mm4, mm3 ; q1
+ psubusb mm3, mm2 ; q1-=p1
+ psubusb mm2, mm4 ; p1-=q1
+ por mm2, mm3 ; abs(p1-q1)
+ pand mm2, [GLOBAL(tfe)] ; set lsb of each byte to zero
+ psrlw mm2, 1 ; abs(p1-q1)/2
+
+ movq mm6, mm5 ; p0
+ movq mm3, [rsi] ; q0
+ psubusb mm5, mm3 ; p0-=q0
+ psubusb mm3, mm6 ; q0-=p0
+ por mm5, mm3 ; abs(p0 - q0)
+ paddusb mm5, mm5 ; abs(p0-q0)*2
+ paddusb mm5, mm2 ; abs (p0 - q0) *2 + abs(p1-q1)/2
+
+ mov rdx, arg(2) ;blimit ; get blimit
+ movq mm7, [rdx] ; blimit
+
+ psubusb mm5, mm7 ; abs (p0 - q0) *2 + abs(p1-q1)/2 > blimit
+ por mm1, mm5
+ pxor mm5, mm5
+ pcmpeqb mm1, mm5 ; mask mm1
+
+ ; calculate high edge variance
+ mov rdx, arg(4) ;thresh ; get thresh
+ movq mm7, [rdx] ;
+ movq mm4, t0 ; get abs (q1 - q0)
+ psubusb mm4, mm7
+ movq mm3, t1 ; get abs (p1 - p0)
+ psubusb mm3, mm7
+ paddb mm4, mm3 ; abs(q1 - q0) > thresh || abs(p1 - p0) > thresh
+
+ pcmpeqb mm4, mm5
+
+ pcmpeqb mm5, mm5
+ pxor mm4, mm5
+
+
+ ; start work on filters
+ movq mm2, [rsi+2*rax] ; p1
+ movq mm7, [rdi] ; q1
+ pxor mm2, [GLOBAL(t80)] ; p1 offset to convert to signed values
+ pxor mm7, [GLOBAL(t80)] ; q1 offset to convert to signed values
+ psubsb mm2, mm7 ; p1 - q1
+ pand mm2, mm4 ; high var mask (hvm)(p1 - q1)
+ pxor mm6, [GLOBAL(t80)] ; offset to convert to signed values
+ pxor mm0, [GLOBAL(t80)] ; offset to convert to signed values
+ movq mm3, mm0 ; q0
+ psubsb mm0, mm6 ; q0 - p0
+ paddsb mm2, mm0 ; 1 * (q0 - p0) + hvm(p1 - q1)
+ paddsb mm2, mm0 ; 2 * (q0 - p0) + hvm(p1 - q1)
+ paddsb mm2, mm0 ; 3 * (q0 - p0) + hvm(p1 - q1)
+ pand mm1, mm2 ; mask filter values we don't care about
+ movq mm2, mm1
+ paddsb mm1, [GLOBAL(t4)] ; 3* (q0 - p0) + hvm(p1 - q1) + 4
+ paddsb mm2, [GLOBAL(t3)] ; 3* (q0 - p0) + hvm(p1 - q1) + 3
+
+ pxor mm0, mm0 ;
+ pxor mm5, mm5
+ punpcklbw mm0, mm2 ;
+ punpckhbw mm5, mm2 ;
+ psraw mm0, 11 ;
+ psraw mm5, 11
+ packsswb mm0, mm5
+ movq mm2, mm0 ; (3* (q0 - p0) + hvm(p1 - q1) + 3) >> 3;
+
+ pxor mm0, mm0 ; 0
+ movq mm5, mm1 ; abcdefgh
+ punpcklbw mm0, mm1 ; e0f0g0h0
+ psraw mm0, 11 ; sign extended shift right by 3
+ pxor mm1, mm1 ; 0
+ punpckhbw mm1, mm5 ; a0b0c0d0
+ psraw mm1, 11 ; sign extended shift right by 3
+ movq mm5, mm0 ; save results
+
+ packsswb mm0, mm1 ; (3* (q0 - p0) + hvm(p1 - q1) + 4) >>3
+ paddsw mm5, [GLOBAL(ones)]
+ paddsw mm1, [GLOBAL(ones)]
+ psraw mm5, 1 ; partial shifted one more time for 2nd tap
+ psraw mm1, 1 ; partial shifted one more time for 2nd tap
+ packsswb mm5, mm1 ; (3* (q0 - p0) + hvm(p1 - q1) + 4) >>4
+ pandn mm4, mm5 ; high edge variance additive
+
+ paddsb mm6, mm2 ; p0+= p0 add
+ pxor mm6, [GLOBAL(t80)] ; unoffset
+ movq [rsi+rax], mm6 ; write back
+
+ movq mm6, [rsi+2*rax] ; p1
+ pxor mm6, [GLOBAL(t80)] ; reoffset
+ paddsb mm6, mm4 ; p1+= p1 add
+ pxor mm6, [GLOBAL(t80)] ; unoffset
+ movq [rsi+2*rax], mm6 ; write back
+
+ psubsb mm3, mm0 ; q0-= q0 add
+ pxor mm3, [GLOBAL(t80)] ; unoffset
+ movq [rsi], mm3 ; write back
+
+ psubsb mm7, mm4 ; q1-= q1 add
+ pxor mm7, [GLOBAL(t80)] ; unoffset
+ movq [rdi], mm7 ; write back
+
+ add rsi,8
+ neg rax
+ dec rcx
+ jnz .next8_h
+
+ add rsp, 32
+ pop rsp
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp9_loop_filter_vertical_edge_mmx
+;(
+; unsigned char *src_ptr,
+; int src_pixel_step,
+; const char *blimit,
+; const char *limit,
+; const char *thresh,
+; int count
+;)
+global sym(vp9_loop_filter_vertical_edge_mmx) PRIVATE
+sym(vp9_loop_filter_vertical_edge_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 64 ; reserve 64 bytes
+ %define t0 [rsp + 0] ;__declspec(align(16)) char t0[8];
+ %define t1 [rsp + 16] ;__declspec(align(16)) char t1[8];
+ %define srct [rsp + 32] ;__declspec(align(16)) char srct[32];
+
+ mov rsi, arg(0) ;src_ptr
+ movsxd rax, dword ptr arg(1) ;src_pixel_step ; destination pitch?
+
+ lea rsi, [rsi + rax*4 - 4]
+
+ movsxd rcx, dword ptr arg(5) ;count
+.next8_v:
+ mov rdi, rsi ; rdi points to row +1 for indirect addressing
+ add rdi, rax
+
+
+ ;transpose
+ movq mm6, [rsi+2*rax] ; 67 66 65 64 63 62 61 60
+ movq mm7, mm6 ; 77 76 75 74 73 72 71 70
+
+ punpckhbw mm7, [rdi+2*rax] ; 77 67 76 66 75 65 74 64
+ punpcklbw mm6, [rdi+2*rax] ; 73 63 72 62 71 61 70 60
+
+ movq mm4, [rsi] ; 47 46 45 44 43 42 41 40
+ movq mm5, mm4 ; 47 46 45 44 43 42 41 40
+
+ punpckhbw mm5, [rsi+rax] ; 57 47 56 46 55 45 54 44
+ punpcklbw mm4, [rsi+rax] ; 53 43 52 42 51 41 50 40
+
+ movq mm3, mm5 ; 57 47 56 46 55 45 54 44
+ punpckhwd mm5, mm7 ; 77 67 57 47 76 66 56 46
+
+ punpcklwd mm3, mm7 ; 75 65 55 45 74 64 54 44
+ movq mm2, mm4 ; 53 43 52 42 51 41 50 40
+
+ punpckhwd mm4, mm6 ; 73 63 53 43 72 62 52 42
+ punpcklwd mm2, mm6 ; 71 61 51 41 70 60 50 40
+
+ neg rax
+ movq mm6, [rsi+rax*2] ; 27 26 25 24 23 22 21 20
+
+ movq mm1, mm6 ; 27 26 25 24 23 22 21 20
+ punpckhbw mm6, [rsi+rax] ; 37 27 36 36 35 25 34 24
+
+ punpcklbw mm1, [rsi+rax] ; 33 23 32 22 31 21 30 20
+ movq mm7, [rsi+rax*4]; ; 07 06 05 04 03 02 01 00
+
+ punpckhbw mm7, [rdi+rax*4] ; 17 07 16 06 15 05 14 04
+ movq mm0, mm7 ; 17 07 16 06 15 05 14 04
+
+ punpckhwd mm7, mm6 ; 37 27 17 07 36 26 16 06
+ punpcklwd mm0, mm6 ; 35 25 15 05 34 24 14 04
+
+ movq mm6, mm7 ; 37 27 17 07 36 26 16 06
+ punpckhdq mm7, mm5 ; 77 67 57 47 37 27 17 07 = q3
+
+ punpckldq mm6, mm5 ; 76 66 56 46 36 26 16 06 = q2
+
+ movq mm5, mm6 ; 76 66 56 46 36 26 16 06
+ psubusb mm5, mm7 ; q2-q3
+
+ psubusb mm7, mm6 ; q3-q2
+ por mm7, mm5; ; mm7=abs (q3-q2)
+
+ movq mm5, mm0 ; 35 25 15 05 34 24 14 04
+ punpckhdq mm5, mm3 ; 75 65 55 45 35 25 15 05 = q1
+
+ punpckldq mm0, mm3 ; 74 64 54 44 34 24 15 04 = q0
+ movq mm3, mm5 ; 75 65 55 45 35 25 15 05 = q1
+
+ psubusb mm3, mm6 ; q1-q2
+ psubusb mm6, mm5 ; q2-q1
+
+ por mm6, mm3 ; mm6=abs(q2-q1)
+ lea rdx, srct
+
+ movq [rdx+24], mm5 ; save q1
+ movq [rdx+16], mm0 ; save q0
+
+ movq mm3, [rsi+rax*4] ; 07 06 05 04 03 02 01 00
+ punpcklbw mm3, [rdi+rax*4] ; 13 03 12 02 11 01 10 00
+
+ movq mm0, mm3 ; 13 03 12 02 11 01 10 00
+ punpcklwd mm0, mm1 ; 31 21 11 01 30 20 10 00
+
+ punpckhwd mm3, mm1 ; 33 23 13 03 32 22 12 02
+ movq mm1, mm0 ; 31 21 11 01 30 20 10 00
+
+ punpckldq mm0, mm2 ; 70 60 50 40 30 20 10 00 =p3
+ punpckhdq mm1, mm2 ; 71 61 51 41 31 21 11 01 =p2
+
+ movq mm2, mm1 ; 71 61 51 41 31 21 11 01 =p2
+ psubusb mm2, mm0 ; p2-p3
+
+ psubusb mm0, mm1 ; p3-p2
+ por mm0, mm2 ; mm0=abs(p3-p2)
+
+ movq mm2, mm3 ; 33 23 13 03 32 22 12 02
+ punpckldq mm2, mm4 ; 72 62 52 42 32 22 12 02 = p1
+
+ punpckhdq mm3, mm4 ; 73 63 53 43 33 23 13 03 = p0
+ movq [rdx+8], mm3 ; save p0
+
+ movq [rdx], mm2 ; save p1
+ movq mm5, mm2 ; mm5 = p1
+
+ psubusb mm2, mm1 ; p1-p2
+ psubusb mm1, mm5 ; p2-p1
+
+ por mm1, mm2 ; mm1=abs(p2-p1)
+ mov rdx, arg(3) ;limit
+
+ movq mm4, [rdx] ; mm4 = limit
+ psubusb mm7, mm4
+
+ psubusb mm0, mm4
+ psubusb mm1, mm4
+
+ psubusb mm6, mm4
+ por mm7, mm6
+
+ por mm0, mm1
+ por mm0, mm7 ; abs(q3-q2) > limit || abs(p3-p2) > limit ||abs(p2-p1) > limit || abs(q2-q1) > limit
+
+ movq mm1, mm5 ; p1
+
+ movq mm7, mm3 ; mm3=mm7=p0
+ psubusb mm7, mm5 ; p0 - p1
+
+ psubusb mm5, mm3 ; p1 - p0
+ por mm5, mm7 ; abs(p1-p0)
+
+ movq t0, mm5 ; save abs(p1-p0)
+ lea rdx, srct
+
+ psubusb mm5, mm4
+ por mm0, mm5 ; mm0=mask
+
+ movq mm5, [rdx+16] ; mm5=q0
+ movq mm7, [rdx+24] ; mm7=q1
+
+ movq mm6, mm5 ; mm6=q0
+ movq mm2, mm7 ; q1
+ psubusb mm5, mm7 ; q0-q1
+
+ psubusb mm7, mm6 ; q1-q0
+ por mm7, mm5 ; abs(q1-q0)
+
+ movq t1, mm7 ; save abs(q1-q0)
+ psubusb mm7, mm4
+
+ por mm0, mm7 ; mask
+
+ movq mm5, mm2 ; q1
+ psubusb mm5, mm1 ; q1-=p1
+ psubusb mm1, mm2 ; p1-=q1
+ por mm5, mm1 ; abs(p1-q1)
+ pand mm5, [GLOBAL(tfe)] ; set lsb of each byte to zero
+ psrlw mm5, 1 ; abs(p1-q1)/2
+
+ mov rdx, arg(2) ;blimit ;
+
+ movq mm4, [rdx] ;blimit
+ movq mm1, mm3 ; mm1=mm3=p0
+
+ movq mm7, mm6 ; mm7=mm6=q0
+ psubusb mm1, mm7 ; p0-q0
+
+ psubusb mm7, mm3 ; q0-p0
+ por mm1, mm7 ; abs(q0-p0)
+ paddusb mm1, mm1 ; abs(q0-p0)*2
+ paddusb mm1, mm5 ; abs (p0 - q0) *2 + abs(p1-q1)/2
+
+ psubusb mm1, mm4 ; abs (p0 - q0) *2 + abs(p1-q1)/2 > blimit
+ por mm1, mm0; ; mask
+
+ pxor mm0, mm0
+ pcmpeqb mm1, mm0
+
+ ; calculate high edge variance
+ mov rdx, arg(4) ;thresh ; get thresh
+ movq mm7, [rdx]
+ ;
+ movq mm4, t0 ; get abs (q1 - q0)
+ psubusb mm4, mm7
+
+ movq mm3, t1 ; get abs (p1 - p0)
+ psubusb mm3, mm7
+
+ por mm4, mm3 ; abs(q1 - q0) > thresh || abs(p1 - p0) > thresh
+ pcmpeqb mm4, mm0
+
+ pcmpeqb mm0, mm0
+ pxor mm4, mm0
+
+
+
+ ; start work on filters
+ lea rdx, srct
+
+ movq mm2, [rdx] ; p1
+ movq mm7, [rdx+24] ; q1
+
+ movq mm6, [rdx+8] ; p0
+ movq mm0, [rdx+16] ; q0
+
+ pxor mm2, [GLOBAL(t80)] ; p1 offset to convert to signed values
+ pxor mm7, [GLOBAL(t80)] ; q1 offset to convert to signed values
+
+ psubsb mm2, mm7 ; p1 - q1
+ pand mm2, mm4 ; high var mask (hvm)(p1 - q1)
+
+ pxor mm6, [GLOBAL(t80)] ; offset to convert to signed values
+ pxor mm0, [GLOBAL(t80)] ; offset to convert to signed values
+
+ movq mm3, mm0 ; q0
+ psubsb mm0, mm6 ; q0 - p0
+
+ paddsb mm2, mm0 ; 1 * (q0 - p0) + hvm(p1 - q1)
+ paddsb mm2, mm0 ; 2 * (q0 - p0) + hvm(p1 - q1)
+
+ paddsb mm2, mm0 ; 3 * (q0 - p0) + hvm(p1 - q1)
+ pand mm1, mm2 ; mask filter values we don't care about
+
+ movq mm2, mm1
+ paddsb mm1, [GLOBAL(t4)] ; 3* (q0 - p0) + hvm(p1 - q1) + 4
+
+ paddsb mm2, [GLOBAL(t3)] ; 3* (q0 - p0) + hvm(p1 - q1) + 3
+ pxor mm0, mm0 ;
+
+ pxor mm5, mm5
+ punpcklbw mm0, mm2 ;
+
+ punpckhbw mm5, mm2 ;
+ psraw mm0, 11 ;
+
+ psraw mm5, 11
+ packsswb mm0, mm5
+
+ movq mm2, mm0 ; (3* (q0 - p0) + hvm(p1 - q1) + 3) >> 3;
+
+ pxor mm0, mm0 ; 0
+ movq mm5, mm1 ; abcdefgh
+
+ punpcklbw mm0, mm1 ; e0f0g0h0
+ psraw mm0, 11 ; sign extended shift right by 3
+
+ pxor mm1, mm1 ; 0
+ punpckhbw mm1, mm5 ; a0b0c0d0
+
+ psraw mm1, 11 ; sign extended shift right by 3
+ movq mm5, mm0 ; save results
+
+ packsswb mm0, mm1 ; (3* (q0 - p0) + hvm(p1 - q1) + 4) >>3
+ paddsw mm5, [GLOBAL(ones)]
+
+ paddsw mm1, [GLOBAL(ones)]
+ psraw mm5, 1 ; partial shifted one more time for 2nd tap
+
+ psraw mm1, 1 ; partial shifted one more time for 2nd tap
+ packsswb mm5, mm1 ; (3* (q0 - p0) + hvm(p1 - q1) + 4) >>4
+
+ pandn mm4, mm5 ; high edge variance additive
+
+ paddsb mm6, mm2 ; p0+= p0 add
+ pxor mm6, [GLOBAL(t80)] ; unoffset
+
+ ; mm6=p0 ;
+ movq mm1, [rdx] ; p1
+ pxor mm1, [GLOBAL(t80)] ; reoffset
+
+ paddsb mm1, mm4 ; p1+= p1 add
+ pxor mm1, [GLOBAL(t80)] ; unoffset
+ ; mm6 = p0 mm1 = p1
+
+ psubsb mm3, mm0 ; q0-= q0 add
+ pxor mm3, [GLOBAL(t80)] ; unoffset
+
+ ; mm3 = q0
+ psubsb mm7, mm4 ; q1-= q1 add
+ pxor mm7, [GLOBAL(t80)] ; unoffset
+ ; mm7 = q1
+
+ ; tranpose and write back
+ ; mm1 = 72 62 52 42 32 22 12 02
+ ; mm6 = 73 63 53 43 33 23 13 03
+ ; mm3 = 74 64 54 44 34 24 14 04
+ ; mm7 = 75 65 55 45 35 25 15 05
+
+ movq mm2, mm1 ; 72 62 52 42 32 22 12 02
+ punpcklbw mm2, mm6 ; 33 32 23 22 13 12 03 02
+
+ movq mm4, mm3 ; 74 64 54 44 34 24 14 04
+ punpckhbw mm1, mm6 ; 73 72 63 62 53 52 43 42
+
+ punpcklbw mm4, mm7 ; 35 34 25 24 15 14 05 04
+ punpckhbw mm3, mm7 ; 75 74 65 64 55 54 45 44
+
+ movq mm6, mm2 ; 33 32 23 22 13 12 03 02
+ punpcklwd mm2, mm4 ; 15 14 13 12 05 04 03 02
+
+ punpckhwd mm6, mm4 ; 35 34 33 32 25 24 23 22
+ movq mm5, mm1 ; 73 72 63 62 53 52 43 42
+
+ punpcklwd mm1, mm3 ; 55 54 53 52 45 44 43 42
+ punpckhwd mm5, mm3 ; 75 74 73 72 65 64 63 62
+
+
+ ; mm2 = 15 14 13 12 05 04 03 02
+ ; mm6 = 35 34 33 32 25 24 23 22
+ ; mm5 = 55 54 53 52 45 44 43 42
+ ; mm1 = 75 74 73 72 65 64 63 62
+
+
+
+ movd [rsi+rax*4+2], mm2
+ psrlq mm2, 32
+
+ movd [rdi+rax*4+2], mm2
+ movd [rsi+rax*2+2], mm6
+
+ psrlq mm6, 32
+ movd [rsi+rax+2],mm6
+
+ movd [rsi+2], mm1
+ psrlq mm1, 32
+
+ movd [rdi+2], mm1
+ neg rax
+
+ movd [rdi+rax+2],mm5
+ psrlq mm5, 32
+
+ movd [rdi+rax*2+2], mm5
+
+ lea rsi, [rsi+rax*8]
+ dec rcx
+ jnz .next8_v
+
+ add rsp, 64
+ pop rsp
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+SECTION_RODATA
+align 16
+tfe:
+ times 8 db 0xfe
+align 16
+t80:
+ times 8 db 0x80
+align 16
+t1s:
+ times 8 db 0x01
+align 16
+t3:
+ times 8 db 0x03
+align 16
+t4:
+ times 8 db 0x04
+align 16
+ones:
+ times 4 dw 0x0001
+align 16
+s27:
+ times 4 dw 0x1b00
+align 16
+s18:
+ times 4 dw 0x1200
+align 16
+s9:
+ times 4 dw 0x0900
+align 16
+s63:
+ times 4 dw 0x003f
diff --git a/libvpx/vp9/common/x86/vp9_loopfilter_sse2.asm b/libvpx/vp9/common/x86/vp9_loopfilter_sse2.asm
new file mode 100644
index 0000000..74236cf
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_loopfilter_sse2.asm
@@ -0,0 +1,872 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+; Use of pmaxub instead of psubusb to compute filter mask was seen
+; in ffvp8
+
+%macro LFH_FILTER_AND_HEV_MASK 1
+%if %1
+ movdqa xmm2, [rdi+2*rax] ; q3
+ movdqa xmm1, [rsi+2*rax] ; q2
+ movdqa xmm4, [rsi+rax] ; q1
+ movdqa xmm5, [rsi] ; q0
+ neg rax ; negate pitch to deal with above border
+%else
+ movlps xmm2, [rsi + rcx*2] ; q3
+ movlps xmm1, [rsi + rcx] ; q2
+ movlps xmm4, [rsi] ; q1
+ movlps xmm5, [rsi + rax] ; q0
+
+ movhps xmm2, [rdi + rcx*2]
+ movhps xmm1, [rdi + rcx]
+ movhps xmm4, [rdi]
+ movhps xmm5, [rdi + rax]
+
+ lea rsi, [rsi + rax*4]
+ lea rdi, [rdi + rax*4]
+
+ movdqa XMMWORD PTR [rsp], xmm1 ; store q2
+ movdqa XMMWORD PTR [rsp + 16], xmm4 ; store q1
+%endif
+
+ movdqa xmm6, xmm1 ; q2
+ movdqa xmm3, xmm4 ; q1
+
+ psubusb xmm1, xmm2 ; q2-=q3
+ psubusb xmm2, xmm6 ; q3-=q2
+
+ psubusb xmm4, xmm6 ; q1-=q2
+ psubusb xmm6, xmm3 ; q2-=q1
+
+ por xmm4, xmm6 ; abs(q2-q1)
+ por xmm1, xmm2 ; abs(q3-q2)
+
+ movdqa xmm0, xmm5 ; q0
+ pmaxub xmm1, xmm4
+
+ psubusb xmm5, xmm3 ; q0-=q1
+ psubusb xmm3, xmm0 ; q1-=q0
+
+ por xmm5, xmm3 ; abs(q0-q1)
+ movdqa t0, xmm5 ; save to t0
+
+ pmaxub xmm1, xmm5
+
+%if %1
+ movdqa xmm2, [rsi+4*rax] ; p3
+ movdqa xmm4, [rdi+4*rax] ; p2
+ movdqa xmm6, [rsi+2*rax] ; p1
+%else
+ movlps xmm2, [rsi + rax] ; p3
+ movlps xmm4, [rsi] ; p2
+ movlps xmm6, [rsi + rcx] ; p1
+
+ movhps xmm2, [rdi + rax]
+ movhps xmm4, [rdi]
+ movhps xmm6, [rdi + rcx]
+
+ movdqa XMMWORD PTR [rsp + 32], xmm4 ; store p2
+ movdqa XMMWORD PTR [rsp + 48], xmm6 ; store p1
+%endif
+
+ movdqa xmm5, xmm4 ; p2
+ movdqa xmm3, xmm6 ; p1
+
+ psubusb xmm4, xmm2 ; p2-=p3
+ psubusb xmm2, xmm5 ; p3-=p2
+
+ psubusb xmm3, xmm5 ; p1-=p2
+ pmaxub xmm1, xmm4 ; abs(p3 - p2)
+
+ psubusb xmm5, xmm6 ; p2-=p1
+ pmaxub xmm1, xmm2 ; abs(p3 - p2)
+
+ pmaxub xmm1, xmm5 ; abs(p2 - p1)
+ movdqa xmm2, xmm6 ; p1
+
+ pmaxub xmm1, xmm3 ; abs(p2 - p1)
+%if %1
+ movdqa xmm4, [rsi+rax] ; p0
+ movdqa xmm3, [rdi] ; q1
+%else
+ movlps xmm4, [rsi + rcx*2] ; p0
+ movhps xmm4, [rdi + rcx*2]
+ movdqa xmm3, q1 ; q1
+%endif
+
+ movdqa xmm5, xmm4 ; p0
+ psubusb xmm4, xmm6 ; p0-=p1
+
+ psubusb xmm6, xmm5 ; p1-=p0
+
+ por xmm6, xmm4 ; abs(p1 - p0)
+ mov rdx, arg(2) ; get blimit
+
+ movdqa t1, xmm6 ; save to t1
+
+ movdqa xmm4, xmm3 ; q1
+ pmaxub xmm1, xmm6
+
+ psubusb xmm3, xmm2 ; q1-=p1
+ psubusb xmm2, xmm4 ; p1-=q1
+
+ psubusb xmm1, xmm7
+ por xmm2, xmm3 ; abs(p1-q1)
+
+ movdqa xmm7, XMMWORD PTR [rdx] ; blimit
+
+ movdqa xmm3, xmm0 ; q0
+ pand xmm2, [GLOBAL(tfe)] ; set lsb of each byte to zero
+
+ mov rdx, arg(4) ; hev get thresh
+
+ movdqa xmm6, xmm5 ; p0
+ psrlw xmm2, 1 ; abs(p1-q1)/2
+
+ psubusb xmm5, xmm3 ; p0-=q0
+
+ psubusb xmm3, xmm6 ; q0-=p0
+ por xmm5, xmm3 ; abs(p0 - q0)
+
+ paddusb xmm5, xmm5 ; abs(p0-q0)*2
+
+ movdqa xmm4, t0 ; hev get abs (q1 - q0)
+
+ movdqa xmm3, t1 ; get abs (p1 - p0)
+
+ paddusb xmm5, xmm2 ; abs (p0 - q0) *2 + abs(p1-q1)/2
+
+ movdqa xmm2, XMMWORD PTR [rdx] ; hev
+
+ psubusb xmm5, xmm7 ; abs (p0 - q0) *2 + abs(p1-q1)/2 > blimit
+ psubusb xmm4, xmm2 ; hev
+
+ psubusb xmm3, xmm2 ; hev
+ por xmm1, xmm5
+
+ pxor xmm7, xmm7
+ paddb xmm4, xmm3 ; hev abs(q1 - q0) > thresh || abs(p1 - p0) > thresh
+
+ pcmpeqb xmm4, xmm5 ; hev
+ pcmpeqb xmm3, xmm3 ; hev
+
+ pcmpeqb xmm1, xmm7 ; mask xmm1
+ pxor xmm4, xmm3 ; hev
+%endmacro
+
+%macro B_FILTER 1
+%if %1 == 0
+ movdqa xmm2, p1 ; p1
+ movdqa xmm7, q1 ; q1
+%elif %1 == 1
+ movdqa xmm2, [rsi+2*rax] ; p1
+ movdqa xmm7, [rdi] ; q1
+%elif %1 == 2
+ lea rdx, srct
+
+ movdqa xmm2, [rdx] ; p1
+ movdqa xmm7, [rdx+48] ; q1
+ movdqa xmm6, [rdx+16] ; p0
+ movdqa xmm0, [rdx+32] ; q0
+%endif
+
+ pxor xmm2, [GLOBAL(t80)] ; p1 offset to convert to signed values
+ pxor xmm7, [GLOBAL(t80)] ; q1 offset to convert to signed values
+
+ psubsb xmm2, xmm7 ; p1 - q1
+ pxor xmm6, [GLOBAL(t80)] ; offset to convert to signed values
+
+ pand xmm2, xmm4 ; high var mask (hvm)(p1 - q1)
+ pxor xmm0, [GLOBAL(t80)] ; offset to convert to signed values
+
+ movdqa xmm3, xmm0 ; q0
+ psubsb xmm0, xmm6 ; q0 - p0
+
+ paddsb xmm2, xmm0 ; 1 * (q0 - p0) + hvm(p1 - q1)
+
+ paddsb xmm2, xmm0 ; 2 * (q0 - p0) + hvm(p1 - q1)
+
+ paddsb xmm2, xmm0 ; 3 * (q0 - p0) + hvm(p1 - q1)
+
+ pand xmm1, xmm2 ; mask filter values we don't care about
+
+ movdqa xmm2, xmm1
+
+ paddsb xmm1, [GLOBAL(t4)] ; 3* (q0 - p0) + hvm(p1 - q1) + 4
+ paddsb xmm2, [GLOBAL(t3)] ; 3* (q0 - p0) + hvm(p1 - q1) + 3
+
+ punpckhbw xmm5, xmm2 ; axbxcxdx
+ punpcklbw xmm2, xmm2 ; exfxgxhx
+
+ punpcklbw xmm0, xmm1 ; exfxgxhx
+ psraw xmm5, 11 ; sign extended shift right by 3
+
+ punpckhbw xmm1, xmm1 ; axbxcxdx
+ psraw xmm2, 11 ; sign extended shift right by 3
+
+ packsswb xmm2, xmm5 ; (3* (q0 - p0) + hvm(p1 - q1) + 3) >> 3;
+ psraw xmm0, 11 ; sign extended shift right by 3
+
+ psraw xmm1, 11 ; sign extended shift right by 3
+ movdqa xmm5, xmm0 ; save results
+
+ packsswb xmm0, xmm1 ; (3* (q0 - p0) + hvm(p1 - q1) + 4) >>3
+ paddsw xmm5, [GLOBAL(ones)]
+
+ paddsw xmm1, [GLOBAL(ones)]
+ psraw xmm5, 1 ; partial shifted one more time for 2nd tap
+
+ psraw xmm1, 1 ; partial shifted one more time for 2nd tap
+
+ paddsb xmm6, xmm2 ; p0+= p0 add
+ packsswb xmm5, xmm1 ; (3* (q0 - p0) + hvm(p1 - q1) + 4) >>4
+
+%if %1 == 0
+ movdqa xmm1, p1 ; p1
+%elif %1 == 1
+ movdqa xmm1, [rsi+2*rax] ; p1
+%elif %1 == 2
+ movdqa xmm1, [rdx] ; p1
+%endif
+ pandn xmm4, xmm5 ; high edge variance additive
+ pxor xmm6, [GLOBAL(t80)] ; unoffset
+
+ pxor xmm1, [GLOBAL(t80)] ; reoffset
+ psubsb xmm3, xmm0 ; q0-= q0 add
+
+ paddsb xmm1, xmm4 ; p1+= p1 add
+ pxor xmm3, [GLOBAL(t80)] ; unoffset
+
+ pxor xmm1, [GLOBAL(t80)] ; unoffset
+ psubsb xmm7, xmm4 ; q1-= q1 add
+
+ pxor xmm7, [GLOBAL(t80)] ; unoffset
+%if %1 == 0
+ lea rsi, [rsi + rcx*2]
+ lea rdi, [rdi + rcx*2]
+ movq MMWORD PTR [rsi], xmm6 ; p0
+ movhps MMWORD PTR [rdi], xmm6
+ movq MMWORD PTR [rsi + rax], xmm1 ; p1
+ movhps MMWORD PTR [rdi + rax], xmm1
+ movq MMWORD PTR [rsi + rcx], xmm3 ; q0
+ movhps MMWORD PTR [rdi + rcx], xmm3
+ movq MMWORD PTR [rsi + rcx*2],xmm7 ; q1
+ movhps MMWORD PTR [rdi + rcx*2],xmm7
+%elif %1 == 1
+ movdqa [rsi+rax], xmm6 ; write back
+ movdqa [rsi+2*rax], xmm1 ; write back
+ movdqa [rsi], xmm3 ; write back
+ movdqa [rdi], xmm7 ; write back
+%endif
+
+%endmacro
+
+
+;void vp9_loop_filter_horizontal_edge_sse2
+;(
+; unsigned char *src_ptr,
+; int src_pixel_step,
+; const char *blimit,
+; const char *limit,
+; const char *thresh,
+; int count
+;)
+global sym(vp9_loop_filter_horizontal_edge_sse2) PRIVATE
+sym(vp9_loop_filter_horizontal_edge_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 32 ; reserve 32 bytes
+ %define t0 [rsp + 0] ;__declspec(align(16)) char t0[16];
+ %define t1 [rsp + 16] ;__declspec(align(16)) char t1[16];
+
+ mov rsi, arg(0) ;src_ptr
+ movsxd rax, dword ptr arg(1) ;src_pixel_step
+
+ mov rdx, arg(3) ;limit
+ movdqa xmm7, XMMWORD PTR [rdx]
+
+ lea rdi, [rsi+rax] ; rdi points to row +1 for indirect addressing
+
+ ; calculate breakout conditions and high edge variance
+ LFH_FILTER_AND_HEV_MASK 1
+ ; filter and write back the result
+ B_FILTER 1
+
+ add rsp, 32
+ pop rsp
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp9_loop_filter_horizontal_edge_uv_sse2
+;(
+; unsigned char *src_ptr,
+; int src_pixel_step,
+; const char *blimit,
+; const char *limit,
+; const char *thresh,
+; int count
+;)
+global sym(vp9_loop_filter_horizontal_edge_uv_sse2) PRIVATE
+sym(vp9_loop_filter_horizontal_edge_uv_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 96 ; reserve 96 bytes
+ %define q2 [rsp + 0] ;__declspec(align(16)) char q2[16];
+ %define q1 [rsp + 16] ;__declspec(align(16)) char q1[16];
+ %define p2 [rsp + 32] ;__declspec(align(16)) char p2[16];
+ %define p1 [rsp + 48] ;__declspec(align(16)) char p1[16];
+ %define t0 [rsp + 64] ;__declspec(align(16)) char t0[16];
+ %define t1 [rsp + 80] ;__declspec(align(16)) char t1[16];
+
+ mov rsi, arg(0) ; u
+ mov rdi, arg(5) ; v
+ movsxd rax, dword ptr arg(1) ; src_pixel_step
+ mov rcx, rax
+ neg rax ; negate pitch to deal with above border
+
+ mov rdx, arg(3) ;limit
+ movdqa xmm7, XMMWORD PTR [rdx]
+
+ lea rsi, [rsi + rcx]
+ lea rdi, [rdi + rcx]
+
+ ; calculate breakout conditions and high edge variance
+ LFH_FILTER_AND_HEV_MASK 0
+ ; filter and write back the result
+ B_FILTER 0
+
+ add rsp, 96
+ pop rsp
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+%macro TRANSPOSE_16X8 2
+ movq xmm4, QWORD PTR [rsi] ; xx xx xx xx xx xx xx xx 07 06 05 04 03 02 01 00
+ movq xmm1, QWORD PTR [rdi] ; xx xx xx xx xx xx xx xx 17 16 15 14 13 12 11 10
+ movq xmm0, QWORD PTR [rsi+2*rax] ; xx xx xx xx xx xx xx xx 27 26 25 24 23 22 21 20
+ movq xmm7, QWORD PTR [rdi+2*rax] ; xx xx xx xx xx xx xx xx 37 36 35 34 33 32 31 30
+ movq xmm5, QWORD PTR [rsi+4*rax] ; xx xx xx xx xx xx xx xx 47 46 45 44 43 42 41 40
+ movq xmm2, QWORD PTR [rdi+4*rax] ; xx xx xx xx xx xx xx xx 57 56 55 54 53 52 51 50
+
+ punpcklbw xmm4, xmm1 ; 17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00
+
+ movq xmm1, QWORD PTR [rdi+2*rcx] ; xx xx xx xx xx xx xx xx 77 76 75 74 73 72 71 70
+
+ movdqa xmm3, xmm4 ; 17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00
+ punpcklbw xmm0, xmm7 ; 37 27 36 36 35 25 34 24 33 23 32 22 31 21 30 20
+
+ movq xmm7, QWORD PTR [rsi+2*rcx] ; xx xx xx xx xx xx xx xx 67 66 65 64 63 62 61 60
+
+ punpcklbw xmm5, xmm2 ; 57 47 56 46 55 45 54 44 53 43 52 42 51 41 50 40
+%if %1
+ lea rsi, [rsi+rax*8]
+%else
+ mov rsi, arg(5) ; v_ptr
+%endif
+
+ movdqa xmm6, xmm5 ; 57 47 56 46 55 45 54 44 53 43 52 42 51 41 50 40
+ punpcklbw xmm7, xmm1 ; 77 67 76 66 75 65 74 64 73 63 72 62 71 61 70 60
+
+ punpcklwd xmm5, xmm7 ; 73 63 53 43 72 62 52 42 71 61 51 41 70 60 50 40
+
+ punpckhwd xmm6, xmm7 ; 77 67 57 47 76 66 56 46 75 65 55 45 74 64 54 44
+%if %1
+ lea rdi, [rdi+rax*8]
+%else
+ lea rsi, [rsi - 4]
+%endif
+
+ punpcklwd xmm3, xmm0 ; 33 23 13 03 32 22 12 02 31 21 11 01 30 20 10 00
+%if %1
+ lea rdx, srct
+%else
+ lea rdi, [rsi + rax] ; rdi points to row +1 for indirect addressing
+%endif
+
+ movdqa xmm2, xmm3 ; 33 23 13 03 32 22 12 02 31 21 11 01 30 20 10 00
+ punpckhwd xmm4, xmm0 ; 37 27 17 07 36 26 16 06 35 25 15 05 34 24 14 04
+
+ movdqa xmm7, xmm4 ; 37 27 17 07 36 26 16 06 35 25 15 05 34 24 14 04
+ punpckhdq xmm3, xmm5 ; 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02
+
+ punpckhdq xmm7, xmm6 ; 77 67 57 47 37 27 17 07 76 66 56 46 36 26 16 06
+
+ punpckldq xmm4, xmm6 ; 75 65 55 45 35 25 15 05 74 64 54 44 34 24 14 04
+
+ punpckldq xmm2, xmm5 ; 71 61 51 41 31 21 11 01 70 60 50 40 30 20 10 00
+
+ movdqa t0, xmm2 ; save to free XMM2
+ movq xmm2, QWORD PTR [rsi] ; xx xx xx xx xx xx xx xx 87 86 85 84 83 82 81 80
+ movq xmm6, QWORD PTR [rdi] ; xx xx xx xx xx xx xx xx 97 96 95 94 93 92 91 90
+ movq xmm0, QWORD PTR [rsi+2*rax] ; xx xx xx xx xx xx xx xx a7 a6 a5 a4 a3 a2 a1 a0
+ movq xmm5, QWORD PTR [rdi+2*rax] ; xx xx xx xx xx xx xx xx b7 b6 b5 b4 b3 b2 b1 b0
+ movq xmm1, QWORD PTR [rsi+4*rax] ; xx xx xx xx xx xx xx xx c7 c6 c5 c4 c3 c2 c1 c0
+
+ punpcklbw xmm2, xmm6 ; 97 87 96 86 95 85 94 84 93 83 92 82 91 81 90 80
+
+ movq xmm6, QWORD PTR [rdi+4*rax] ; xx xx xx xx xx xx xx xx d7 d6 d5 d4 d3 d2 d1 d0
+
+ punpcklbw xmm0, xmm5 ; b7 a7 b6 a6 b5 a5 b4 a4 b3 a3 b2 a2 b1 a1 b0 a0
+
+ movq xmm5, QWORD PTR [rsi+2*rcx] ; xx xx xx xx xx xx xx xx e7 e6 e5 e4 e3 e2 e1 e0
+
+ punpcklbw xmm1, xmm6 ; d7 c7 d6 c6 d5 c5 d4 c4 d3 c3 d2 c2 d1 e1 d0 c0
+
+ movq xmm6, QWORD PTR [rdi+2*rcx] ; xx xx xx xx xx xx xx xx f7 f6 f5 f4 f3 f2 f1 f0
+
+ punpcklbw xmm5, xmm6 ; f7 e7 f6 e6 f5 e5 f4 e4 f3 e3 f2 e2 f1 e1 f0 e0
+
+ movdqa xmm6, xmm1 ;
+ punpckhwd xmm6, xmm5 ; f7 e7 d7 c7 f6 e6 d6 c6 f5 e5 d5 c5 f4 e4 d4 c4
+
+ punpcklwd xmm1, xmm5 ; f3 e3 d3 c3 f2 e2 d2 c2 f1 e1 d1 c1 f0 e0 d0 c0
+ movdqa xmm5, xmm2 ; 97 87 96 86 95 85 94 84 93 83 92 82 91 81 90 80
+
+ punpcklwd xmm5, xmm0 ; b3 a3 93 83 b2 a2 92 82 b1 a1 91 81 b0 a0 90 80
+
+ punpckhwd xmm2, xmm0 ; b7 a7 97 87 b6 a6 96 86 b5 a5 95 85 b4 a4 94 84
+
+ movdqa xmm0, xmm5
+ punpckldq xmm0, xmm1 ; f1 e1 d1 c1 b1 a1 91 81 f0 e0 d0 c0 b0 a0 90 80
+
+ punpckhdq xmm5, xmm1 ; f3 e3 d3 c3 b3 a3 93 83 f2 e2 d2 c2 b2 a2 92 82
+ movdqa xmm1, xmm2 ; b7 a7 97 87 b6 a6 96 86 b5 a5 95 85 b4 a4 94 84
+
+ punpckldq xmm1, xmm6 ; f5 e5 d5 c5 b5 a5 95 85 f4 e4 d4 c4 b4 a4 94 84
+
+ punpckhdq xmm2, xmm6 ; f7 e7 d7 c7 b7 a7 97 87 f6 e6 d6 c6 b6 a6 96 86
+ movdqa xmm6, xmm7 ; 77 67 57 47 37 27 17 07 76 66 56 46 36 26 16 06
+
+ punpcklqdq xmm6, xmm2 ; f6 e6 d6 c6 b6 a6 96 86 76 66 56 46 36 26 16 06
+
+ punpckhqdq xmm7, xmm2 ; f7 e7 d7 c7 b7 a7 97 87 77 67 57 47 37 27 17 07
+%if %2
+ movdqa xmm2, xmm3 ; 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02
+ punpcklqdq xmm2, xmm5 ; f2 e2 d2 c2 b2 a2 92 82 72 62 52 42 32 22 12 02
+
+ punpckhqdq xmm3, xmm5 ; f3 e3 d3 c3 b3 a3 93 83 73 63 53 43 33 23 13 03
+
+ movdqa [rdx], xmm2 ; save 2
+
+ movdqa xmm5, xmm4 ; 75 65 55 45 35 25 15 05 74 64 54 44 34 24 14 04
+ punpcklqdq xmm4, xmm1 ; f4 e4 d4 c4 b4 a4 94 84 74 64 54 44 34 24 14 04
+
+ movdqa [rdx+16], xmm3 ; save 3
+
+ punpckhqdq xmm5, xmm1 ; f5 e5 d5 c5 b5 a5 95 85 75 65 55 45 35 25 15 05
+
+ movdqa [rdx+32], xmm4 ; save 4
+ movdqa [rdx+48], xmm5 ; save 5
+ movdqa xmm1, t0 ; get
+
+ movdqa xmm2, xmm1 ;
+ punpckhqdq xmm1, xmm0 ; f1 e1 d1 c1 b1 a1 91 81 71 61 51 41 31 21 11 01
+
+ punpcklqdq xmm2, xmm0 ; f0 e0 d0 c0 b0 a0 90 80 70 60 50 40 30 20 10 00
+%else
+ movdqa [rdx+112], xmm7 ; save 7
+
+ movdqa [rdx+96], xmm6 ; save 6
+
+ movdqa xmm2, xmm3 ; 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02
+ punpckhqdq xmm3, xmm5 ; f3 e3 d3 c3 b3 a3 93 83 73 63 53 43 33 23 13 03
+
+ punpcklqdq xmm2, xmm5 ; f2 e2 d2 c2 b2 a2 92 82 72 62 52 42 32 22 12 02
+
+ movdqa [rdx+32], xmm2 ; save 2
+
+ movdqa xmm5, xmm4 ; 75 65 55 45 35 25 15 05 74 64 54 44 34 24 14 04
+ punpcklqdq xmm4, xmm1 ; f4 e4 d4 c4 b4 a4 94 84 74 64 54 44 34 24 14 04
+
+ movdqa [rdx+48], xmm3 ; save 3
+
+ punpckhqdq xmm5, xmm1 ; f5 e5 d5 c5 b5 a5 95 85 75 65 55 45 35 25 15 05
+
+ movdqa [rdx+64], xmm4 ; save 4
+ movdqa [rdx+80], xmm5 ; save 5
+ movdqa xmm1, t0 ; get
+
+ movdqa xmm2, xmm1
+ punpckhqdq xmm1, xmm0 ; f1 e1 d1 c1 b1 a1 91 81 71 61 51 41 31 21 11 01
+
+ punpcklqdq xmm2, xmm0 ; f0 e0 d0 c0 b0 a0 90 80 70 60 50 40 30 20 10 00
+
+ movdqa [rdx+16], xmm1
+
+ movdqa [rdx], xmm2
+%endif
+%endmacro
+
+%macro LFV_FILTER_MASK_HEV_MASK 1
+ movdqa xmm0, xmm6 ; q2
+ psubusb xmm0, xmm7 ; q2-q3
+
+ psubusb xmm7, xmm6 ; q3-q2
+ movdqa xmm4, xmm5 ; q1
+
+ por xmm7, xmm0 ; abs (q3-q2)
+ psubusb xmm4, xmm6 ; q1-q2
+
+ movdqa xmm0, xmm1
+ psubusb xmm6, xmm5 ; q2-q1
+
+ por xmm6, xmm4 ; abs (q2-q1)
+ psubusb xmm0, xmm2 ; p2 - p3;
+
+ psubusb xmm2, xmm1 ; p3 - p2;
+ por xmm0, xmm2 ; abs(p2-p3)
+%if %1
+ movdqa xmm2, [rdx] ; p1
+%else
+ movdqa xmm2, [rdx+32] ; p1
+%endif
+ movdqa xmm5, xmm2 ; p1
+ pmaxub xmm0, xmm7
+
+ psubusb xmm5, xmm1 ; p1-p2
+ psubusb xmm1, xmm2 ; p2-p1
+
+ movdqa xmm7, xmm3 ; p0
+ psubusb xmm7, xmm2 ; p0-p1
+
+ por xmm1, xmm5 ; abs(p2-p1)
+ pmaxub xmm0, xmm6
+
+ pmaxub xmm0, xmm1
+ movdqa xmm1, xmm2 ; p1
+
+ psubusb xmm2, xmm3 ; p1-p0
+ lea rdx, srct
+
+ por xmm2, xmm7 ; abs(p1-p0)
+
+ movdqa t0, xmm2 ; save abs(p1-p0)
+
+ pmaxub xmm0, xmm2
+
+%if %1
+ movdqa xmm5, [rdx+32] ; q0
+ movdqa xmm7, [rdx+48] ; q1
+%else
+ movdqa xmm5, [rdx+64] ; q0
+ movdqa xmm7, [rdx+80] ; q1
+%endif
+ mov rdx, arg(3) ; limit
+
+ movdqa xmm6, xmm5 ; q0
+ movdqa xmm2, xmm7 ; q1
+
+ psubusb xmm5, xmm7 ; q0-q1
+ psubusb xmm7, xmm6 ; q1-q0
+
+ por xmm7, xmm5 ; abs(q1-q0)
+
+ movdqa t1, xmm7 ; save abs(q1-q0)
+
+ movdqa xmm4, XMMWORD PTR [rdx]; limit
+
+ pmaxub xmm0, xmm7
+ mov rdx, arg(2) ; blimit
+
+ psubusb xmm0, xmm4
+ movdqa xmm5, xmm2 ; q1
+
+ psubusb xmm5, xmm1 ; q1-=p1
+ psubusb xmm1, xmm2 ; p1-=q1
+
+ por xmm5, xmm1 ; abs(p1-q1)
+ movdqa xmm1, xmm3 ; p0
+
+ pand xmm5, [GLOBAL(tfe)] ; set lsb of each byte to zero
+ psubusb xmm1, xmm6 ; p0-q0
+
+ psrlw xmm5, 1 ; abs(p1-q1)/2
+ psubusb xmm6, xmm3 ; q0-p0
+
+ movdqa xmm4, XMMWORD PTR [rdx]; blimit
+
+ mov rdx, arg(4) ; get thresh
+
+ por xmm1, xmm6 ; abs(q0-p0)
+
+ movdqa xmm6, t0 ; get abs (q1 - q0)
+
+ paddusb xmm1, xmm1 ; abs(q0-p0)*2
+
+ movdqa xmm3, t1 ; get abs (p1 - p0)
+
+ movdqa xmm7, XMMWORD PTR [rdx]
+
+ paddusb xmm1, xmm5 ; abs (p0 - q0) *2 + abs(p1-q1)/2
+ psubusb xmm6, xmm7 ; abs(q1 - q0) > thresh
+
+ psubusb xmm3, xmm7 ; abs(p1 - p0)> thresh
+
+ psubusb xmm1, xmm4 ; abs (p0 - q0) *2 + abs(p1-q1)/2 > blimit
+ por xmm6, xmm3 ; abs(q1 - q0) > thresh || abs(p1 - p0) > thresh
+
+ por xmm1, xmm0 ; mask
+ pcmpeqb xmm6, xmm0
+
+ pxor xmm0, xmm0
+ pcmpeqb xmm4, xmm4
+
+ pcmpeqb xmm1, xmm0
+ pxor xmm4, xmm6
+%endmacro
+
+%macro BV_TRANSPOSE 0
+ ; xmm1 = f2 e2 d2 c2 b2 a2 92 82 72 62 52 42 32 22 12 02
+ ; xmm6 = f3 e3 d3 c3 b3 a3 93 83 73 63 53 43 33 23 13 03
+ ; xmm3 = f4 e4 d4 c4 b4 a4 94 84 74 64 54 44 34 24 14 04
+ ; xmm7 = f5 e5 d5 c5 b5 a5 95 85 75 65 55 45 35 25 15 05
+ movdqa xmm2, xmm1 ; f2 e2 d2 c2 b2 a2 92 82 72 62 52 42 32 22 12 02
+ punpcklbw xmm2, xmm6 ; 73 72 63 62 53 52 43 42 33 32 23 22 13 12 03 02
+
+ movdqa xmm4, xmm3 ; f4 e4 d4 c4 b4 a4 94 84 74 64 54 44 34 24 14 04
+ punpckhbw xmm1, xmm6 ; f3 f2 e3 e2 d3 d2 c3 c2 b3 b2 a3 a2 93 92 83 82
+
+ punpcklbw xmm4, xmm7 ; 75 74 65 64 55 54 45 44 35 34 25 24 15 14 05 04
+
+ punpckhbw xmm3, xmm7 ; f5 f4 e5 e4 d5 d4 c5 c4 b5 b4 a5 a4 95 94 85 84
+
+ movdqa xmm6, xmm2 ; 73 72 63 62 53 52 43 42 33 32 23 22 13 12 03 02
+ punpcklwd xmm2, xmm4 ; 35 34 33 32 25 24 23 22 15 14 13 12 05 04 03 02
+
+ punpckhwd xmm6, xmm4 ; 75 74 73 72 65 64 63 62 55 54 53 52 45 44 43 42
+ movdqa xmm5, xmm1 ; f3 f2 e3 e2 d3 d2 c3 c2 b3 b2 a3 a2 93 92 83 82
+
+ punpcklwd xmm1, xmm3 ; b5 b4 b3 b2 a5 a4 a3 a2 95 94 93 92 85 84 83 82
+
+ punpckhwd xmm5, xmm3 ; f5 f4 f3 f2 e5 e4 e3 e2 d5 d4 d3 d2 c5 c4 c3 c2
+ ; xmm2 = 35 34 33 32 25 24 23 22 15 14 13 12 05 04 03 02
+ ; xmm6 = 75 74 73 72 65 64 63 62 55 54 53 52 45 44 43 42
+ ; xmm1 = b5 b4 b3 b2 a5 a4 a3 a2 95 94 93 92 85 84 83 82
+ ; xmm5 = f5 f4 f3 f2 e5 e4 e3 e2 d5 d4 d3 d2 c5 c4 c3 c2
+%endmacro
+
+%macro BV_WRITEBACK 2
+ movd [rsi+2], %1
+ psrldq %1, 4
+
+ movd [rdi+2], %1
+ psrldq %1, 4
+
+ movd [rsi+2*rax+2], %1
+ psrldq %1, 4
+
+ movd [rdi+2*rax+2], %1
+
+ movd [rsi+4*rax+2], %2
+ psrldq %2, 4
+
+ movd [rdi+4*rax+2], %2
+ psrldq %2, 4
+
+ movd [rsi+2*rcx+2], %2
+ psrldq %2, 4
+
+ movd [rdi+2*rcx+2], %2
+%endmacro
+
+
+;void vp9_loop_filter_vertical_edge_sse2
+;(
+; unsigned char *src_ptr,
+; int src_pixel_step,
+; const char *blimit,
+; const char *limit,
+; const char *thresh,
+; int count
+;)
+global sym(vp9_loop_filter_vertical_edge_sse2) PRIVATE
+sym(vp9_loop_filter_vertical_edge_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 96 ; reserve 96 bytes
+ %define t0 [rsp + 0] ;__declspec(align(16)) char t0[16];
+ %define t1 [rsp + 16] ;__declspec(align(16)) char t1[16];
+ %define srct [rsp + 32] ;__declspec(align(16)) char srct[64];
+
+ mov rsi, arg(0) ; src_ptr
+ movsxd rax, dword ptr arg(1) ; src_pixel_step
+
+ lea rsi, [rsi - 4]
+ lea rdi, [rsi + rax] ; rdi points to row +1 for indirect addressing
+ lea rcx, [rax*2+rax]
+
+ ;transpose 16x8 to 8x16, and store the 8-line result on stack.
+ TRANSPOSE_16X8 1, 1
+
+ ; calculate filter mask and high edge variance
+ LFV_FILTER_MASK_HEV_MASK 1
+
+ ; start work on filters
+ B_FILTER 2
+
+ ; tranpose and write back - only work on q1, q0, p0, p1
+ BV_TRANSPOSE
+ ; store 16-line result
+
+ lea rdx, [rax]
+ neg rdx
+
+ BV_WRITEBACK xmm1, xmm5
+
+ lea rsi, [rsi+rdx*8]
+ lea rdi, [rdi+rdx*8]
+ BV_WRITEBACK xmm2, xmm6
+
+ add rsp, 96
+ pop rsp
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp9_loop_filter_vertical_edge_uv_sse2
+;(
+; unsigned char *u,
+; int src_pixel_step,
+; const char *blimit,
+; const char *limit,
+; const char *thresh,
+; unsigned char *v
+;)
+global sym(vp9_loop_filter_vertical_edge_uv_sse2) PRIVATE
+sym(vp9_loop_filter_vertical_edge_uv_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 96 ; reserve 96 bytes
+ %define t0 [rsp + 0] ;__declspec(align(16)) char t0[16];
+ %define t1 [rsp + 16] ;__declspec(align(16)) char t1[16];
+ %define srct [rsp + 32] ;__declspec(align(16)) char srct[64];
+
+ mov rsi, arg(0) ; u_ptr
+ movsxd rax, dword ptr arg(1) ; src_pixel_step
+
+ lea rsi, [rsi - 4]
+ lea rdi, [rsi + rax] ; rdi points to row +1 for indirect addressing
+ lea rcx, [rax+2*rax]
+
+ lea rdx, srct
+
+ ;transpose 16x8 to 8x16, and store the 8-line result on stack.
+ TRANSPOSE_16X8 0, 1
+
+ ; calculate filter mask and high edge variance
+ LFV_FILTER_MASK_HEV_MASK 1
+
+ ; start work on filters
+ B_FILTER 2
+
+ ; tranpose and write back - only work on q1, q0, p0, p1
+ BV_TRANSPOSE
+
+ lea rdi, [rsi + rax] ; rdi points to row +1 for indirect addressing
+
+ ; store 16-line result
+ BV_WRITEBACK xmm1, xmm5
+
+ mov rsi, arg(0) ; u_ptr
+ lea rsi, [rsi - 4]
+ lea rdi, [rsi + rax] ; rdi points to row +1 for indirect addressing
+ BV_WRITEBACK xmm2, xmm6
+
+ add rsp, 96
+ pop rsp
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+SECTION_RODATA
+align 16
+tfe:
+ times 16 db 0xfe
+align 16
+t80:
+ times 16 db 0x80
+align 16
+t1s:
+ times 16 db 0x01
+align 16
+t3:
+ times 16 db 0x03
+align 16
+t4:
+ times 16 db 0x04
+align 16
+ones:
+ times 8 dw 0x0001
+align 16
+s9:
+ times 8 dw 0x0900
+align 16
+s63:
+ times 8 dw 0x003f
diff --git a/libvpx/vp9/common/x86/vp9_loopfilter_x86.h b/libvpx/vp9/common/x86/vp9_loopfilter_x86.h
new file mode 100644
index 0000000..fb5af05
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_loopfilter_x86.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_X86_VP9_LOOPFILTER_X86_H_
+#define VP9_COMMON_X86_VP9_LOOPFILTER_X86_H_
+
+/* Note:
+ *
+ * This platform is commonly built for runtime CPU detection. If you modify
+ * any of the function mappings present in this file, be sure to also update
+ * them in the function pointer initialization code
+ */
+
+#if HAVE_MMX
+extern prototype_loopfilter_block(vp9_loop_filter_mbv_mmx);
+extern prototype_loopfilter_block(vp9_loop_filter_bv_mmx);
+extern prototype_loopfilter_block(vp9_loop_filter_mbh_mmx);
+extern prototype_loopfilter_block(vp9_loop_filter_bh_mmx);
+#endif
+
+#if HAVE_SSE2
+extern prototype_loopfilter_block(vp9_loop_filter_mbv_sse2);
+extern prototype_loopfilter_block(vp9_loop_filter_bv_sse2);
+extern prototype_loopfilter_block(vp9_loop_filter_mbh_sse2);
+extern prototype_loopfilter_block(vp9_loop_filter_bh_sse2);
+#endif
+
+#endif // LOOPFILTER_X86_H
diff --git a/libvpx/vp9/common/x86/vp9_mask_sse3.asm b/libvpx/vp9/common/x86/vp9_mask_sse3.asm
new file mode 100644
index 0000000..fe46823
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_mask_sse3.asm
@@ -0,0 +1,484 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+;void int vp8_makemask_sse3(
+; unsigned char *y,
+; unsigned char *u,
+; unsigned char *v,
+; unsigned char *ym,
+; unsigned char *uvm,
+; int yp,
+; int uvp,
+; int ys,
+; int us,
+; int vs,
+; int yt,
+; int ut,
+; int vt)
+global sym(vp8_makemask_sse3) PRIVATE
+sym(vp8_makemask_sse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 14
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;y
+ mov rdi, arg(1) ;u
+ mov rcx, arg(2) ;v
+ mov rax, arg(3) ;ym
+ movsxd rbx, dword arg(4) ;yp
+ movsxd rdx, dword arg(5) ;uvp
+
+ pxor xmm0,xmm0
+
+ ;make 16 copies of the center y value
+ movd xmm1, arg(6)
+ pshufb xmm1, xmm0
+
+ ; make 16 copies of the center u value
+ movd xmm2, arg(7)
+ pshufb xmm2, xmm0
+
+ ; make 16 copies of the center v value
+ movd xmm3, arg(8)
+ pshufb xmm3, xmm0
+ unpcklpd xmm2, xmm3
+
+ ;make 16 copies of the y tolerance
+ movd xmm3, arg(9)
+ pshufb xmm3, xmm0
+
+ ;make 16 copies of the u tolerance
+ movd xmm4, arg(10)
+ pshufb xmm4, xmm0
+
+ ;make 16 copies of the v tolerance
+ movd xmm5, arg(11)
+ pshufb xmm5, xmm0
+ unpckhpd xmm4, xmm5
+
+ mov r8,8
+
+NextPairOfRows:
+
+ ;grab the y source values
+ movdqu xmm0, [rsi]
+
+ ;compute abs difference between source and y target
+ movdqa xmm6, xmm1
+ movdqa xmm7, xmm0
+ psubusb xmm0, xmm1
+ psubusb xmm6, xmm7
+ por xmm0, xmm6
+
+ ;compute abs difference between
+ movdqa xmm6, xmm3
+ pcmpgtb xmm6, xmm0
+
+ ;grab the y source values
+ add rsi, rbx
+ movdqu xmm0, [rsi]
+
+ ;compute abs difference between source and y target
+ movdqa xmm11, xmm1
+ movdqa xmm7, xmm0
+ psubusb xmm0, xmm1
+ psubusb xmm11, xmm7
+ por xmm0, xmm11
+
+ ;compute abs difference between
+ movdqa xmm11, xmm3
+ pcmpgtb xmm11, xmm0
+
+
+ ;grab the u and v source values
+ movdqu xmm7, [rdi]
+ movdqu xmm8, [rcx]
+ unpcklpd xmm7, xmm8
+
+ ;compute abs difference between source and uv targets
+ movdqa xmm9, xmm2
+ movdqa xmm10, xmm7
+ psubusb xmm7, xmm2
+ psubusb xmm9, xmm10
+ por xmm7, xmm9
+
+ ;check whether the number is < tolerance
+ movdqa xmm0, xmm4
+ pcmpgtb xmm0, xmm7
+
+ ;double u and v masks
+ movdqa xmm8, xmm0
+ punpckhbw xmm0, xmm0
+ punpcklbw xmm8, xmm8
+
+ ;mask row 0 and output
+ pand xmm6, xmm8
+ pand xmm6, xmm0
+ movdqa [rax],xmm6
+
+ ;mask row 1 and output
+ pand xmm11, xmm8
+ pand xmm11, xmm0
+ movdqa [rax+16],xmm11
+
+
+ ; to the next row or set of rows
+ add rsi, rbx
+ add rdi, rdx
+ add rcx, rdx
+ add rax,32
+ dec r8
+ jnz NextPairOfRows
+
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;GROW_HORIZ (register for result, source register or mem local)
+; takes source and shifts left and ors with source
+; then shifts right and ors with source
+%macro GROW_HORIZ 2
+ movdqa %1, %2
+ movdqa xmm14, %1
+ movdqa xmm15, %1
+ pslldq xmm14, 1
+ psrldq xmm15, 1
+ por %1,xmm14
+ por %1,xmm15
+%endmacro
+;GROW_VERT (result, center row, above row, below row)
+%macro GROW_VERT 4
+ movdqa %1,%2
+ por %1,%3
+ por %1,%4
+%endmacro
+
+;GROW_NEXTLINE (new line to grow, new source, line to write)
+%macro GROW_NEXTLINE 3
+ GROW_HORIZ %1, %2
+ GROW_VERT xmm3, xmm0, xmm1, xmm2
+ movdqa %3,xmm3
+%endmacro
+
+
+;void int vp8_growmaskmb_sse3(
+; unsigned char *om,
+; unsigned char *nm,
+global sym(vp8_growmaskmb_sse3) PRIVATE
+sym(vp8_growmaskmb_sse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 2
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src
+ mov rdi, arg(1) ;rst
+
+ GROW_HORIZ xmm0, [rsi]
+ GROW_HORIZ xmm1, [rsi+16]
+ GROW_HORIZ xmm2, [rsi+32]
+
+ GROW_VERT xmm3, xmm0, xmm1, xmm2
+ por xmm0,xmm1
+ movdqa [rdi], xmm0
+ movdqa [rdi+16],xmm3
+
+ GROW_NEXTLINE xmm0,[rsi+48],[rdi+32]
+ GROW_NEXTLINE xmm1,[rsi+64],[rdi+48]
+ GROW_NEXTLINE xmm2,[rsi+80],[rdi+64]
+ GROW_NEXTLINE xmm0,[rsi+96],[rdi+80]
+ GROW_NEXTLINE xmm1,[rsi+112],[rdi+96]
+ GROW_NEXTLINE xmm2,[rsi+128],[rdi+112]
+ GROW_NEXTLINE xmm0,[rsi+144],[rdi+128]
+ GROW_NEXTLINE xmm1,[rsi+160],[rdi+144]
+ GROW_NEXTLINE xmm2,[rsi+176],[rdi+160]
+ GROW_NEXTLINE xmm0,[rsi+192],[rdi+176]
+ GROW_NEXTLINE xmm1,[rsi+208],[rdi+192]
+ GROW_NEXTLINE xmm2,[rsi+224],[rdi+208]
+ GROW_NEXTLINE xmm0,[rsi+240],[rdi+224]
+
+ por xmm0,xmm2
+ movdqa [rdi+240], xmm0
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+
+;unsigned int vp8_sad16x16_masked_wmt(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride,
+; unsigned char *mask)
+global sym(vp8_sad16x16_masked_wmt) PRIVATE
+sym(vp8_sad16x16_masked_wmt):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ push rsi
+ push rdi
+ ; end prolog
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ mov rbx, arg(4) ;mask
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ mov rcx, 16
+
+ pxor xmm3, xmm3
+
+NextSadRow:
+ movdqu xmm0, [rsi]
+ movdqu xmm1, [rdi]
+ movdqu xmm2, [rbx]
+ pand xmm0, xmm2
+ pand xmm1, xmm2
+
+ psadbw xmm0, xmm1
+ paddw xmm3, xmm0
+
+ add rsi, rax
+ add rdi, rdx
+ add rbx, 16
+
+ dec rcx
+ jnz NextSadRow
+
+ movdqa xmm4 , xmm3
+ psrldq xmm4, 8
+ paddw xmm3, xmm4
+ movq rax, xmm3
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;unsigned int vp8_sad16x16_unmasked_wmt(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride,
+; unsigned char *mask)
+global sym(vp8_sad16x16_unmasked_wmt) PRIVATE
+sym(vp8_sad16x16_unmasked_wmt):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ push rsi
+ push rdi
+ ; end prolog
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ mov rbx, arg(4) ;mask
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ mov rcx, 16
+
+ pxor xmm3, xmm3
+
+next_vp8_sad16x16_unmasked_wmt:
+ movdqu xmm0, [rsi]
+ movdqu xmm1, [rdi]
+ movdqu xmm2, [rbx]
+ por xmm0, xmm2
+ por xmm1, xmm2
+
+ psadbw xmm0, xmm1
+ paddw xmm3, xmm0
+
+ add rsi, rax
+ add rdi, rdx
+ add rbx, 16
+
+ dec rcx
+ jnz next_vp8_sad16x16_unmasked_wmt
+
+ movdqa xmm4 , xmm3
+ psrldq xmm4, 8
+ paddw xmm3, xmm4
+ movq rax, xmm3
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;unsigned int vp8_masked_predictor_wmt(
+; unsigned char *masked,
+; unsigned char *unmasked,
+; int src_stride,
+; unsigned char *dst_ptr,
+; int dst_stride,
+; unsigned char *mask)
+global sym(vp8_masked_predictor_wmt) PRIVATE
+sym(vp8_masked_predictor_wmt):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ push rsi
+ push rdi
+ ; end prolog
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(1) ;ref_ptr
+
+ mov rbx, arg(5) ;mask
+ movsxd rax, dword ptr arg(2) ;src_stride
+ mov r11, arg(3) ; destination
+ movsxd rdx, dword ptr arg(4) ;dst_stride
+
+ mov rcx, 16
+
+ pxor xmm3, xmm3
+
+next_vp8_masked_predictor_wmt:
+ movdqu xmm0, [rsi]
+ movdqu xmm1, [rdi]
+ movdqu xmm2, [rbx]
+
+ pand xmm0, xmm2
+ pandn xmm2, xmm1
+ por xmm0, xmm2
+ movdqu [r11], xmm0
+
+ add r11, rdx
+ add rsi, rax
+ add rdi, rdx
+ add rbx, 16
+
+ dec rcx
+ jnz next_vp8_masked_predictor_wmt
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;unsigned int vp8_masked_predictor_uv_wmt(
+; unsigned char *masked,
+; unsigned char *unmasked,
+; int src_stride,
+; unsigned char *dst_ptr,
+; int dst_stride,
+; unsigned char *mask)
+global sym(vp8_masked_predictor_uv_wmt) PRIVATE
+sym(vp8_masked_predictor_uv_wmt):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ push rsi
+ push rdi
+ ; end prolog
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(1) ;ref_ptr
+
+ mov rbx, arg(5) ;mask
+ movsxd rax, dword ptr arg(2) ;src_stride
+ mov r11, arg(3) ; destination
+ movsxd rdx, dword ptr arg(4) ;dst_stride
+
+ mov rcx, 8
+
+ pxor xmm3, xmm3
+
+next_vp8_masked_predictor_uv_wmt:
+ movq xmm0, [rsi]
+ movq xmm1, [rdi]
+ movq xmm2, [rbx]
+
+ pand xmm0, xmm2
+ pandn xmm2, xmm1
+ por xmm0, xmm2
+ movq [r11], xmm0
+
+ add r11, rdx
+ add rsi, rax
+ add rdi, rax
+ add rbx, 8
+
+ dec rcx
+ jnz next_vp8_masked_predictor_uv_wmt
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;unsigned int vp8_uv_from_y_mask(
+; unsigned char *ymask,
+; unsigned char *uvmask)
+global sym(vp8_uv_from_y_mask) PRIVATE
+sym(vp8_uv_from_y_mask):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ push rsi
+ push rdi
+ ; end prolog
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(1) ;dst_ptr
+
+
+ mov rcx, 8
+
+ pxor xmm3, xmm3
+
+next_p8_uv_from_y_mask:
+ movdqu xmm0, [rsi]
+ pshufb xmm0, [shuf1b] ;[GLOBAL(shuf1b)]
+ movq [rdi],xmm0
+ add rdi, 8
+ add rsi,32
+
+ dec rcx
+ jnz next_p8_uv_from_y_mask
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+SECTION_RODATA
+align 16
+shuf1b:
+ db 0, 2, 4, 6, 8, 10, 12, 14, 0, 0, 0, 0, 0, 0, 0, 0
+
diff --git a/libvpx/vp9/common/x86/vp9_postproc_mmx.asm b/libvpx/vp9/common/x86/vp9_postproc_mmx.asm
new file mode 100644
index 0000000..c2118db
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_postproc_mmx.asm
@@ -0,0 +1,534 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+%define VP9_FILTER_WEIGHT 128
+%define VP9_FILTER_SHIFT 7
+
+;void vp9_post_proc_down_and_across_mmx
+;(
+; unsigned char *src_ptr,
+; unsigned char *dst_ptr,
+; int src_pixels_per_line,
+; int dst_pixels_per_line,
+; int rows,
+; int cols,
+; int flimit
+;)
+global sym(vp9_post_proc_down_and_across_mmx) PRIVATE
+sym(vp9_post_proc_down_and_across_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+%if ABI_IS_32BIT=1 && CONFIG_PIC=1
+ ; move the global rd onto the stack, since we don't have enough registers
+ ; to do PIC addressing
+ movq mm0, [GLOBAL(rd)]
+ sub rsp, 8
+ movq [rsp], mm0
+%define RD [rsp]
+%else
+%define RD [GLOBAL(rd)]
+%endif
+
+ push rbx
+ lea rbx, [GLOBAL(Blur)]
+ movd mm2, dword ptr arg(6) ;flimit
+ punpcklwd mm2, mm2
+ punpckldq mm2, mm2
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(1) ;dst_ptr
+
+ movsxd rcx, DWORD PTR arg(4) ;rows
+ movsxd rax, DWORD PTR arg(2) ;src_pixels_per_line ; destination pitch?
+ pxor mm0, mm0 ; mm0 = 00000000
+
+.nextrow:
+
+ xor rdx, rdx ; clear out rdx for use as loop counter
+.nextcol:
+
+ pxor mm7, mm7 ; mm7 = 00000000
+ movq mm6, [rbx + 32 ] ; mm6 = kernel 2 taps
+ movq mm3, [rsi] ; mm4 = r0 p0..p7
+ punpcklbw mm3, mm0 ; mm3 = p0..p3
+ movq mm1, mm3 ; mm1 = p0..p3
+ pmullw mm3, mm6 ; mm3 *= kernel 2 modifiers
+
+ movq mm6, [rbx + 48] ; mm6 = kernel 3 taps
+ movq mm5, [rsi + rax] ; mm4 = r1 p0..p7
+ punpcklbw mm5, mm0 ; mm5 = r1 p0..p3
+ pmullw mm6, mm5 ; mm6 *= p0..p3 * kernel 3 modifiers
+ paddusw mm3, mm6 ; mm3 += mm6
+
+ ; thresholding
+ movq mm7, mm1 ; mm7 = r0 p0..p3
+ psubusw mm7, mm5 ; mm7 = r0 p0..p3 - r1 p0..p3
+ psubusw mm5, mm1 ; mm5 = r1 p0..p3 - r0 p0..p3
+ paddusw mm7, mm5 ; mm7 = abs(r0 p0..p3 - r1 p0..p3)
+ pcmpgtw mm7, mm2
+
+ movq mm6, [rbx + 64 ] ; mm6 = kernel 4 modifiers
+ movq mm5, [rsi + 2*rax] ; mm4 = r2 p0..p7
+ punpcklbw mm5, mm0 ; mm5 = r2 p0..p3
+ pmullw mm6, mm5 ; mm5 *= kernel 4 modifiers
+ paddusw mm3, mm6 ; mm3 += mm5
+
+ ; thresholding
+ movq mm6, mm1 ; mm6 = r0 p0..p3
+ psubusw mm6, mm5 ; mm6 = r0 p0..p3 - r2 p0..p3
+ psubusw mm5, mm1 ; mm5 = r2 p0..p3 - r2 p0..p3
+ paddusw mm6, mm5 ; mm6 = abs(r0 p0..p3 - r2 p0..p3)
+ pcmpgtw mm6, mm2
+ por mm7, mm6 ; accumulate thresholds
+
+
+ neg rax
+ movq mm6, [rbx ] ; kernel 0 taps
+ movq mm5, [rsi+2*rax] ; mm4 = r-2 p0..p7
+ punpcklbw mm5, mm0 ; mm5 = r-2 p0..p3
+ pmullw mm6, mm5 ; mm5 *= kernel 0 modifiers
+ paddusw mm3, mm6 ; mm3 += mm5
+
+ ; thresholding
+ movq mm6, mm1 ; mm6 = r0 p0..p3
+ psubusw mm6, mm5 ; mm6 = p0..p3 - r-2 p0..p3
+ psubusw mm5, mm1 ; mm5 = r-2 p0..p3 - p0..p3
+ paddusw mm6, mm5 ; mm6 = abs(r0 p0..p3 - r-2 p0..p3)
+ pcmpgtw mm6, mm2
+ por mm7, mm6 ; accumulate thresholds
+
+ movq mm6, [rbx + 16] ; kernel 1 taps
+ movq mm4, [rsi+rax] ; mm4 = r-1 p0..p7
+ punpcklbw mm4, mm0 ; mm4 = r-1 p0..p3
+ pmullw mm6, mm4 ; mm4 *= kernel 1 modifiers.
+ paddusw mm3, mm6 ; mm3 += mm5
+
+ ; thresholding
+ movq mm6, mm1 ; mm6 = r0 p0..p3
+ psubusw mm6, mm4 ; mm6 = p0..p3 - r-2 p0..p3
+ psubusw mm4, mm1 ; mm5 = r-1 p0..p3 - p0..p3
+ paddusw mm6, mm4 ; mm6 = abs(r0 p0..p3 - r-1 p0..p3)
+ pcmpgtw mm6, mm2
+ por mm7, mm6 ; accumulate thresholds
+
+
+ paddusw mm3, RD ; mm3 += round value
+ psraw mm3, VP9_FILTER_SHIFT ; mm3 /= 128
+
+ pand mm1, mm7 ; mm1 select vals > thresh from source
+ pandn mm7, mm3 ; mm7 select vals < thresh from blurred result
+ paddusw mm1, mm7 ; combination
+
+ packuswb mm1, mm0 ; pack to bytes
+
+ movd [rdi], mm1 ;
+ neg rax ; pitch is positive
+
+
+ add rsi, 4
+ add rdi, 4
+ add rdx, 4
+
+ cmp edx, dword ptr arg(5) ;cols
+ jl .nextcol
+ ; done with the all cols, start the across filtering in place
+ sub rsi, rdx
+ sub rdi, rdx
+
+
+ push rax
+ xor rdx, rdx
+ mov rax, [rdi-4];
+
+.acrossnextcol:
+ pxor mm7, mm7 ; mm7 = 00000000
+ movq mm6, [rbx + 32 ] ;
+ movq mm4, [rdi+rdx] ; mm4 = p0..p7
+ movq mm3, mm4 ; mm3 = p0..p7
+ punpcklbw mm3, mm0 ; mm3 = p0..p3
+ movq mm1, mm3 ; mm1 = p0..p3
+ pmullw mm3, mm6 ; mm3 *= kernel 2 modifiers
+
+ movq mm6, [rbx + 48]
+ psrlq mm4, 8 ; mm4 = p1..p7
+ movq mm5, mm4 ; mm5 = p1..p7
+ punpcklbw mm5, mm0 ; mm5 = p1..p4
+ pmullw mm6, mm5 ; mm6 *= p1..p4 * kernel 3 modifiers
+ paddusw mm3, mm6 ; mm3 += mm6
+
+ ; thresholding
+ movq mm7, mm1 ; mm7 = p0..p3
+ psubusw mm7, mm5 ; mm7 = p0..p3 - p1..p4
+ psubusw mm5, mm1 ; mm5 = p1..p4 - p0..p3
+ paddusw mm7, mm5 ; mm7 = abs(p0..p3 - p1..p4)
+ pcmpgtw mm7, mm2
+
+ movq mm6, [rbx + 64 ]
+ psrlq mm4, 8 ; mm4 = p2..p7
+ movq mm5, mm4 ; mm5 = p2..p7
+ punpcklbw mm5, mm0 ; mm5 = p2..p5
+ pmullw mm6, mm5 ; mm5 *= kernel 4 modifiers
+ paddusw mm3, mm6 ; mm3 += mm5
+
+ ; thresholding
+ movq mm6, mm1 ; mm6 = p0..p3
+ psubusw mm6, mm5 ; mm6 = p0..p3 - p1..p4
+ psubusw mm5, mm1 ; mm5 = p1..p4 - p0..p3
+ paddusw mm6, mm5 ; mm6 = abs(p0..p3 - p1..p4)
+ pcmpgtw mm6, mm2
+ por mm7, mm6 ; accumulate thresholds
+
+
+ movq mm6, [rbx ]
+ movq mm4, [rdi+rdx-2] ; mm4 = p-2..p5
+ movq mm5, mm4 ; mm5 = p-2..p5
+ punpcklbw mm5, mm0 ; mm5 = p-2..p1
+ pmullw mm6, mm5 ; mm5 *= kernel 0 modifiers
+ paddusw mm3, mm6 ; mm3 += mm5
+
+ ; thresholding
+ movq mm6, mm1 ; mm6 = p0..p3
+ psubusw mm6, mm5 ; mm6 = p0..p3 - p1..p4
+ psubusw mm5, mm1 ; mm5 = p1..p4 - p0..p3
+ paddusw mm6, mm5 ; mm6 = abs(p0..p3 - p1..p4)
+ pcmpgtw mm6, mm2
+ por mm7, mm6 ; accumulate thresholds
+
+ movq mm6, [rbx + 16]
+ psrlq mm4, 8 ; mm4 = p-1..p5
+ punpcklbw mm4, mm0 ; mm4 = p-1..p2
+ pmullw mm6, mm4 ; mm4 *= kernel 1 modifiers.
+ paddusw mm3, mm6 ; mm3 += mm5
+
+ ; thresholding
+ movq mm6, mm1 ; mm6 = p0..p3
+ psubusw mm6, mm4 ; mm6 = p0..p3 - p1..p4
+ psubusw mm4, mm1 ; mm5 = p1..p4 - p0..p3
+ paddusw mm6, mm4 ; mm6 = abs(p0..p3 - p1..p4)
+ pcmpgtw mm6, mm2
+ por mm7, mm6 ; accumulate thresholds
+
+ paddusw mm3, RD ; mm3 += round value
+ psraw mm3, VP9_FILTER_SHIFT ; mm3 /= 128
+
+ pand mm1, mm7 ; mm1 select vals > thresh from source
+ pandn mm7, mm3 ; mm7 select vals < thresh from blurred result
+ paddusw mm1, mm7 ; combination
+
+ packuswb mm1, mm0 ; pack to bytes
+ mov DWORD PTR [rdi+rdx-4], eax ; store previous four bytes
+ movd eax, mm1
+
+ add rdx, 4
+ cmp edx, dword ptr arg(5) ;cols
+ jl .acrossnextcol;
+
+ mov DWORD PTR [rdi+rdx-4], eax
+ pop rax
+
+ ; done with this rwo
+ add rsi,rax ; next line
+ movsxd rax, dword ptr arg(3) ;dst_pixels_per_line ; destination pitch?
+ add rdi,rax ; next destination
+ movsxd rax, dword ptr arg(2) ;src_pixels_per_line ; destination pitch?
+
+ dec rcx ; decrement count
+ jnz .nextrow ; next row
+ pop rbx
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+%undef RD
+
+
+;void vp9_mbpost_proc_down_mmx(unsigned char *dst,
+; int pitch, int rows, int cols,int flimit)
+extern sym(vp9_rv)
+global sym(vp9_mbpost_proc_down_mmx) PRIVATE
+sym(vp9_mbpost_proc_down_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 136
+
+ ; unsigned char d[16][8] at [rsp]
+ ; create flimit2 at [rsp+128]
+ mov eax, dword ptr arg(4) ;flimit
+ mov [rsp+128], eax
+ mov [rsp+128+4], eax
+%define flimit2 [rsp+128]
+
+%if ABI_IS_32BIT=0
+ lea r8, [GLOBAL(sym(vp9_rv))]
+%endif
+
+ ;rows +=8;
+ add dword ptr arg(2), 8
+
+ ;for(c=0; c<cols; c+=4)
+.loop_col:
+ mov rsi, arg(0) ;s
+ pxor mm0, mm0 ;
+
+ movsxd rax, dword ptr arg(1) ;pitch ;
+ neg rax ; rax = -pitch
+
+ lea rsi, [rsi + rax*8]; ; rdi = s[-pitch*8]
+ neg rax
+
+
+ pxor mm5, mm5
+ pxor mm6, mm6 ;
+
+ pxor mm7, mm7 ;
+ mov rdi, rsi
+
+ mov rcx, 15 ;
+
+.loop_initvar:
+ movd mm1, DWORD PTR [rdi];
+ punpcklbw mm1, mm0 ;
+
+ paddw mm5, mm1 ;
+ pmullw mm1, mm1 ;
+
+ movq mm2, mm1 ;
+ punpcklwd mm1, mm0 ;
+
+ punpckhwd mm2, mm0 ;
+ paddd mm6, mm1 ;
+
+ paddd mm7, mm2 ;
+ lea rdi, [rdi+rax] ;
+
+ dec rcx
+ jne .loop_initvar
+ ;save the var and sum
+ xor rdx, rdx
+.loop_row:
+ movd mm1, DWORD PTR [rsi] ; [s-pitch*8]
+ movd mm2, DWORD PTR [rdi] ; [s+pitch*7]
+
+ punpcklbw mm1, mm0
+ punpcklbw mm2, mm0
+
+ paddw mm5, mm2
+ psubw mm5, mm1
+
+ pmullw mm2, mm2
+ movq mm4, mm2
+
+ punpcklwd mm2, mm0
+ punpckhwd mm4, mm0
+
+ paddd mm6, mm2
+ paddd mm7, mm4
+
+ pmullw mm1, mm1
+ movq mm2, mm1
+
+ punpcklwd mm1, mm0
+ psubd mm6, mm1
+
+ punpckhwd mm2, mm0
+ psubd mm7, mm2
+
+
+ movq mm3, mm6
+ pslld mm3, 4
+
+ psubd mm3, mm6
+ movq mm1, mm5
+
+ movq mm4, mm5
+ pmullw mm1, mm1
+
+ pmulhw mm4, mm4
+ movq mm2, mm1
+
+ punpcklwd mm1, mm4
+ punpckhwd mm2, mm4
+
+ movq mm4, mm7
+ pslld mm4, 4
+
+ psubd mm4, mm7
+
+ psubd mm3, mm1
+ psubd mm4, mm2
+
+ psubd mm3, flimit2
+ psubd mm4, flimit2
+
+ psrad mm3, 31
+ psrad mm4, 31
+
+ packssdw mm3, mm4
+ packsswb mm3, mm0
+
+ movd mm1, DWORD PTR [rsi+rax*8]
+
+ movq mm2, mm1
+ punpcklbw mm1, mm0
+
+ paddw mm1, mm5
+ mov rcx, rdx
+
+ and rcx, 127
+%if ABI_IS_32BIT=1 && CONFIG_PIC=1
+ push rax
+ lea rax, [GLOBAL(sym(vp9_rv))]
+ movq mm4, [rax + rcx*2] ;vp9_rv[rcx*2]
+ pop rax
+%elif ABI_IS_32BIT=0
+ movq mm4, [r8 + rcx*2] ;vp9_rv[rcx*2]
+%else
+ movq mm4, [sym(vp9_rv) + rcx*2]
+%endif
+ paddw mm1, mm4
+ ;paddw xmm1, eight8s
+ psraw mm1, 4
+
+ packuswb mm1, mm0
+ pand mm1, mm3
+
+ pandn mm3, mm2
+ por mm1, mm3
+
+ and rcx, 15
+ movd DWORD PTR [rsp+rcx*4], mm1 ;d[rcx*4]
+
+ mov rcx, rdx
+ sub rcx, 8
+
+ and rcx, 15
+ movd mm1, DWORD PTR [rsp+rcx*4] ;d[rcx*4]
+
+ movd [rsi], mm1
+ lea rsi, [rsi+rax]
+
+ lea rdi, [rdi+rax]
+ add rdx, 1
+
+ cmp edx, dword arg(2) ;rows
+ jl .loop_row
+
+
+ add dword arg(0), 4 ; s += 4
+ sub dword arg(3), 4 ; cols -= 4
+ cmp dword arg(3), 0
+ jg .loop_col
+
+ add rsp, 136
+ pop rsp
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+%undef flimit2
+
+
+;void vp9_plane_add_noise_mmx (unsigned char *start, unsigned char *noise,
+; unsigned char blackclamp[16],
+; unsigned char whiteclamp[16],
+; unsigned char bothclamp[16],
+; unsigned int width, unsigned int height, int pitch)
+extern sym(rand)
+global sym(vp9_plane_add_noise_mmx) PRIVATE
+sym(vp9_plane_add_noise_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 8
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+.addnoise_loop:
+ call sym(rand) WRT_PLT
+ mov rcx, arg(1) ;noise
+ and rax, 0xff
+ add rcx, rax
+
+ ; we rely on the fact that the clamping vectors are stored contiguously
+ ; in black/white/both order. Note that we have to reload this here because
+ ; rdx could be trashed by rand()
+ mov rdx, arg(2) ; blackclamp
+
+
+ mov rdi, rcx
+ movsxd rcx, dword arg(5) ;[Width]
+ mov rsi, arg(0) ;Pos
+ xor rax,rax
+
+.addnoise_nextset:
+ movq mm1,[rsi+rax] ; get the source
+
+ psubusb mm1, [rdx] ;blackclamp ; clamp both sides so we don't outrange adding noise
+ paddusb mm1, [rdx+32] ;bothclamp
+ psubusb mm1, [rdx+16] ;whiteclamp
+
+ movq mm2,[rdi+rax] ; get the noise for this line
+ paddb mm1,mm2 ; add it in
+ movq [rsi+rax],mm1 ; store the result
+
+ add rax,8 ; move to the next line
+
+ cmp rax, rcx
+ jl .addnoise_nextset
+
+ movsxd rax, dword arg(7) ; Pitch
+ add arg(0), rax ; Start += Pitch
+ sub dword arg(6), 1 ; Height -= 1
+ jg .addnoise_loop
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+SECTION_RODATA
+align 16
+Blur:
+ times 16 dw 16
+ times 8 dw 64
+ times 16 dw 16
+ times 8 dw 0
+
+rd:
+ times 4 dw 0x40
diff --git a/libvpx/vp9/common/x86/vp9_postproc_sse2.asm b/libvpx/vp9/common/x86/vp9_postproc_sse2.asm
new file mode 100644
index 0000000..858fc99
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_postproc_sse2.asm
@@ -0,0 +1,695 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+;void vp9_post_proc_down_and_across_xmm
+;(
+; unsigned char *src_ptr,
+; unsigned char *dst_ptr,
+; int src_pixels_per_line,
+; int dst_pixels_per_line,
+; int rows,
+; int cols,
+; int flimit
+;)
+global sym(vp9_post_proc_down_and_across_xmm) PRIVATE
+sym(vp9_post_proc_down_and_across_xmm):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 7
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+%if ABI_IS_32BIT=1 && CONFIG_PIC=1
+ ALIGN_STACK 16, rax
+ ; move the global rd onto the stack, since we don't have enough registers
+ ; to do PIC addressing
+ movdqa xmm0, [GLOBAL(rd42)]
+ sub rsp, 16
+ movdqa [rsp], xmm0
+%define RD42 [rsp]
+%else
+%define RD42 [GLOBAL(rd42)]
+%endif
+
+
+ movd xmm2, dword ptr arg(6) ;flimit
+ punpcklwd xmm2, xmm2
+ punpckldq xmm2, xmm2
+ punpcklqdq xmm2, xmm2
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(1) ;dst_ptr
+
+ movsxd rcx, DWORD PTR arg(4) ;rows
+ movsxd rax, DWORD PTR arg(2) ;src_pixels_per_line ; destination pitch?
+ pxor xmm0, xmm0 ; mm0 = 00000000
+
+.nextrow:
+
+ xor rdx, rdx ; clear out rdx for use as loop counter
+.nextcol:
+ movq xmm3, QWORD PTR [rsi] ; mm4 = r0 p0..p7
+ punpcklbw xmm3, xmm0 ; mm3 = p0..p3
+ movdqa xmm1, xmm3 ; mm1 = p0..p3
+ psllw xmm3, 2 ;
+
+ movq xmm5, QWORD PTR [rsi + rax] ; mm4 = r1 p0..p7
+ punpcklbw xmm5, xmm0 ; mm5 = r1 p0..p3
+ paddusw xmm3, xmm5 ; mm3 += mm6
+
+ ; thresholding
+ movdqa xmm7, xmm1 ; mm7 = r0 p0..p3
+ psubusw xmm7, xmm5 ; mm7 = r0 p0..p3 - r1 p0..p3
+ psubusw xmm5, xmm1 ; mm5 = r1 p0..p3 - r0 p0..p3
+ paddusw xmm7, xmm5 ; mm7 = abs(r0 p0..p3 - r1 p0..p3)
+ pcmpgtw xmm7, xmm2
+
+ movq xmm5, QWORD PTR [rsi + 2*rax] ; mm4 = r2 p0..p7
+ punpcklbw xmm5, xmm0 ; mm5 = r2 p0..p3
+ paddusw xmm3, xmm5 ; mm3 += mm5
+
+ ; thresholding
+ movdqa xmm6, xmm1 ; mm6 = r0 p0..p3
+ psubusw xmm6, xmm5 ; mm6 = r0 p0..p3 - r2 p0..p3
+ psubusw xmm5, xmm1 ; mm5 = r2 p0..p3 - r2 p0..p3
+ paddusw xmm6, xmm5 ; mm6 = abs(r0 p0..p3 - r2 p0..p3)
+ pcmpgtw xmm6, xmm2
+ por xmm7, xmm6 ; accumulate thresholds
+
+
+ neg rax
+ movq xmm5, QWORD PTR [rsi+2*rax] ; mm4 = r-2 p0..p7
+ punpcklbw xmm5, xmm0 ; mm5 = r-2 p0..p3
+ paddusw xmm3, xmm5 ; mm3 += mm5
+
+ ; thresholding
+ movdqa xmm6, xmm1 ; mm6 = r0 p0..p3
+ psubusw xmm6, xmm5 ; mm6 = p0..p3 - r-2 p0..p3
+ psubusw xmm5, xmm1 ; mm5 = r-2 p0..p3 - p0..p3
+ paddusw xmm6, xmm5 ; mm6 = abs(r0 p0..p3 - r-2 p0..p3)
+ pcmpgtw xmm6, xmm2
+ por xmm7, xmm6 ; accumulate thresholds
+
+ movq xmm4, QWORD PTR [rsi+rax] ; mm4 = r-1 p0..p7
+ punpcklbw xmm4, xmm0 ; mm4 = r-1 p0..p3
+ paddusw xmm3, xmm4 ; mm3 += mm5
+
+ ; thresholding
+ movdqa xmm6, xmm1 ; mm6 = r0 p0..p3
+ psubusw xmm6, xmm4 ; mm6 = p0..p3 - r-2 p0..p3
+ psubusw xmm4, xmm1 ; mm5 = r-1 p0..p3 - p0..p3
+ paddusw xmm6, xmm4 ; mm6 = abs(r0 p0..p3 - r-1 p0..p3)
+ pcmpgtw xmm6, xmm2
+ por xmm7, xmm6 ; accumulate thresholds
+
+
+ paddusw xmm3, RD42 ; mm3 += round value
+ psraw xmm3, 3 ; mm3 /= 8
+
+ pand xmm1, xmm7 ; mm1 select vals > thresh from source
+ pandn xmm7, xmm3 ; mm7 select vals < thresh from blurred result
+ paddusw xmm1, xmm7 ; combination
+
+ packuswb xmm1, xmm0 ; pack to bytes
+ movq QWORD PTR [rdi], xmm1 ;
+
+ neg rax ; pitch is positive
+ add rsi, 8
+ add rdi, 8
+
+ add rdx, 8
+ cmp edx, dword arg(5) ;cols
+
+ jl .nextcol
+
+ ; done with the all cols, start the across filtering in place
+ sub rsi, rdx
+ sub rdi, rdx
+
+ xor rdx, rdx
+ movq mm0, QWORD PTR [rdi-8];
+
+.acrossnextcol:
+ movq xmm7, QWORD PTR [rdi +rdx -2]
+ movd xmm4, DWORD PTR [rdi +rdx +6]
+
+ pslldq xmm4, 8
+ por xmm4, xmm7
+
+ movdqa xmm3, xmm4
+ psrldq xmm3, 2
+ punpcklbw xmm3, xmm0 ; mm3 = p0..p3
+ movdqa xmm1, xmm3 ; mm1 = p0..p3
+ psllw xmm3, 2
+
+
+ movdqa xmm5, xmm4
+ psrldq xmm5, 3
+ punpcklbw xmm5, xmm0 ; mm5 = p1..p4
+ paddusw xmm3, xmm5 ; mm3 += mm6
+
+ ; thresholding
+ movdqa xmm7, xmm1 ; mm7 = p0..p3
+ psubusw xmm7, xmm5 ; mm7 = p0..p3 - p1..p4
+ psubusw xmm5, xmm1 ; mm5 = p1..p4 - p0..p3
+ paddusw xmm7, xmm5 ; mm7 = abs(p0..p3 - p1..p4)
+ pcmpgtw xmm7, xmm2
+
+ movdqa xmm5, xmm4
+ psrldq xmm5, 4
+ punpcklbw xmm5, xmm0 ; mm5 = p2..p5
+ paddusw xmm3, xmm5 ; mm3 += mm5
+
+ ; thresholding
+ movdqa xmm6, xmm1 ; mm6 = p0..p3
+ psubusw xmm6, xmm5 ; mm6 = p0..p3 - p1..p4
+ psubusw xmm5, xmm1 ; mm5 = p1..p4 - p0..p3
+ paddusw xmm6, xmm5 ; mm6 = abs(p0..p3 - p1..p4)
+ pcmpgtw xmm6, xmm2
+ por xmm7, xmm6 ; accumulate thresholds
+
+
+ movdqa xmm5, xmm4 ; mm5 = p-2..p5
+ punpcklbw xmm5, xmm0 ; mm5 = p-2..p1
+ paddusw xmm3, xmm5 ; mm3 += mm5
+
+ ; thresholding
+ movdqa xmm6, xmm1 ; mm6 = p0..p3
+ psubusw xmm6, xmm5 ; mm6 = p0..p3 - p1..p4
+ psubusw xmm5, xmm1 ; mm5 = p1..p4 - p0..p3
+ paddusw xmm6, xmm5 ; mm6 = abs(p0..p3 - p1..p4)
+ pcmpgtw xmm6, xmm2
+ por xmm7, xmm6 ; accumulate thresholds
+
+ psrldq xmm4, 1 ; mm4 = p-1..p5
+ punpcklbw xmm4, xmm0 ; mm4 = p-1..p2
+ paddusw xmm3, xmm4 ; mm3 += mm5
+
+ ; thresholding
+ movdqa xmm6, xmm1 ; mm6 = p0..p3
+ psubusw xmm6, xmm4 ; mm6 = p0..p3 - p1..p4
+ psubusw xmm4, xmm1 ; mm5 = p1..p4 - p0..p3
+ paddusw xmm6, xmm4 ; mm6 = abs(p0..p3 - p1..p4)
+ pcmpgtw xmm6, xmm2
+ por xmm7, xmm6 ; accumulate thresholds
+
+ paddusw xmm3, RD42 ; mm3 += round value
+ psraw xmm3, 3 ; mm3 /= 8
+
+ pand xmm1, xmm7 ; mm1 select vals > thresh from source
+ pandn xmm7, xmm3 ; mm7 select vals < thresh from blurred result
+ paddusw xmm1, xmm7 ; combination
+
+ packuswb xmm1, xmm0 ; pack to bytes
+ movq QWORD PTR [rdi+rdx-8], mm0 ; store previous four bytes
+ movdq2q mm0, xmm1
+
+ add rdx, 8
+ cmp edx, dword arg(5) ;cols
+ jl .acrossnextcol;
+
+ ; last 8 pixels
+ movq QWORD PTR [rdi+rdx-8], mm0
+
+ ; done with this rwo
+ add rsi,rax ; next line
+ mov eax, dword arg(3) ;dst_pixels_per_line ; destination pitch?
+ add rdi,rax ; next destination
+ mov eax, dword arg(2) ;src_pixels_per_line ; destination pitch?
+
+ dec rcx ; decrement count
+ jnz .nextrow ; next row
+
+%if ABI_IS_32BIT=1 && CONFIG_PIC=1
+ add rsp,16
+ pop rsp
+%endif
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+%undef RD42
+
+
+;void vp9_mbpost_proc_down_xmm(unsigned char *dst,
+; int pitch, int rows, int cols,int flimit)
+extern sym(vp9_rv)
+global sym(vp9_mbpost_proc_down_xmm) PRIVATE
+sym(vp9_mbpost_proc_down_xmm):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 128+16
+
+ ; unsigned char d[16][8] at [rsp]
+ ; create flimit2 at [rsp+128]
+ mov eax, dword ptr arg(4) ;flimit
+ mov [rsp+128], eax
+ mov [rsp+128+4], eax
+ mov [rsp+128+8], eax
+ mov [rsp+128+12], eax
+%define flimit4 [rsp+128]
+
+%if ABI_IS_32BIT=0
+ lea r8, [GLOBAL(sym(vp9_rv))]
+%endif
+
+ ;rows +=8;
+ add dword arg(2), 8
+
+ ;for(c=0; c<cols; c+=8)
+.loop_col:
+ mov rsi, arg(0) ; s
+ pxor xmm0, xmm0 ;
+
+ movsxd rax, dword ptr arg(1) ;pitch ;
+ neg rax ; rax = -pitch
+
+ lea rsi, [rsi + rax*8]; ; rdi = s[-pitch*8]
+ neg rax
+
+
+ pxor xmm5, xmm5
+ pxor xmm6, xmm6 ;
+
+ pxor xmm7, xmm7 ;
+ mov rdi, rsi
+
+ mov rcx, 15 ;
+
+.loop_initvar:
+ movq xmm1, QWORD PTR [rdi];
+ punpcklbw xmm1, xmm0 ;
+
+ paddw xmm5, xmm1 ;
+ pmullw xmm1, xmm1 ;
+
+ movdqa xmm2, xmm1 ;
+ punpcklwd xmm1, xmm0 ;
+
+ punpckhwd xmm2, xmm0 ;
+ paddd xmm6, xmm1 ;
+
+ paddd xmm7, xmm2 ;
+ lea rdi, [rdi+rax] ;
+
+ dec rcx
+ jne .loop_initvar
+ ;save the var and sum
+ xor rdx, rdx
+.loop_row:
+ movq xmm1, QWORD PTR [rsi] ; [s-pitch*8]
+ movq xmm2, QWORD PTR [rdi] ; [s+pitch*7]
+
+ punpcklbw xmm1, xmm0
+ punpcklbw xmm2, xmm0
+
+ paddw xmm5, xmm2
+ psubw xmm5, xmm1
+
+ pmullw xmm2, xmm2
+ movdqa xmm4, xmm2
+
+ punpcklwd xmm2, xmm0
+ punpckhwd xmm4, xmm0
+
+ paddd xmm6, xmm2
+ paddd xmm7, xmm4
+
+ pmullw xmm1, xmm1
+ movdqa xmm2, xmm1
+
+ punpcklwd xmm1, xmm0
+ psubd xmm6, xmm1
+
+ punpckhwd xmm2, xmm0
+ psubd xmm7, xmm2
+
+
+ movdqa xmm3, xmm6
+ pslld xmm3, 4
+
+ psubd xmm3, xmm6
+ movdqa xmm1, xmm5
+
+ movdqa xmm4, xmm5
+ pmullw xmm1, xmm1
+
+ pmulhw xmm4, xmm4
+ movdqa xmm2, xmm1
+
+ punpcklwd xmm1, xmm4
+ punpckhwd xmm2, xmm4
+
+ movdqa xmm4, xmm7
+ pslld xmm4, 4
+
+ psubd xmm4, xmm7
+
+ psubd xmm3, xmm1
+ psubd xmm4, xmm2
+
+ psubd xmm3, flimit4
+ psubd xmm4, flimit4
+
+ psrad xmm3, 31
+ psrad xmm4, 31
+
+ packssdw xmm3, xmm4
+ packsswb xmm3, xmm0
+
+ movq xmm1, QWORD PTR [rsi+rax*8]
+
+ movq xmm2, xmm1
+ punpcklbw xmm1, xmm0
+
+ paddw xmm1, xmm5
+ mov rcx, rdx
+
+ and rcx, 127
+%if ABI_IS_32BIT=1 && CONFIG_PIC=1
+ push rax
+ lea rax, [GLOBAL(sym(vp9_rv))]
+ movdqu xmm4, [rax + rcx*2] ;vp9_rv[rcx*2]
+ pop rax
+%elif ABI_IS_32BIT=0
+ movdqu xmm4, [r8 + rcx*2] ;vp9_rv[rcx*2]
+%else
+ movdqu xmm4, [sym(vp9_rv) + rcx*2]
+%endif
+
+ paddw xmm1, xmm4
+ ;paddw xmm1, eight8s
+ psraw xmm1, 4
+
+ packuswb xmm1, xmm0
+ pand xmm1, xmm3
+
+ pandn xmm3, xmm2
+ por xmm1, xmm3
+
+ and rcx, 15
+ movq QWORD PTR [rsp + rcx*8], xmm1 ;d[rcx*8]
+
+ mov rcx, rdx
+ sub rcx, 8
+
+ and rcx, 15
+ movq mm0, [rsp + rcx*8] ;d[rcx*8]
+
+ movq [rsi], mm0
+ lea rsi, [rsi+rax]
+
+ lea rdi, [rdi+rax]
+ add rdx, 1
+
+ cmp edx, dword arg(2) ;rows
+ jl .loop_row
+
+ add dword arg(0), 8 ; s += 8
+ sub dword arg(3), 8 ; cols -= 8
+ cmp dword arg(3), 0
+ jg .loop_col
+
+ add rsp, 128+16
+ pop rsp
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+%undef flimit4
+
+
+;void vp9_mbpost_proc_across_ip_xmm(unsigned char *src,
+; int pitch, int rows, int cols,int flimit)
+global sym(vp9_mbpost_proc_across_ip_xmm) PRIVATE
+sym(vp9_mbpost_proc_across_ip_xmm):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16
+
+ ; create flimit4 at [rsp]
+ mov eax, dword ptr arg(4) ;flimit
+ mov [rsp], eax
+ mov [rsp+4], eax
+ mov [rsp+8], eax
+ mov [rsp+12], eax
+%define flimit4 [rsp]
+
+
+ ;for(r=0;r<rows;r++)
+.ip_row_loop:
+
+ xor rdx, rdx ;sumsq=0;
+ xor rcx, rcx ;sum=0;
+ mov rsi, arg(0); s
+ mov rdi, -8
+.ip_var_loop:
+ ;for(i=-8;i<=6;i++)
+ ;{
+ ; sumsq += s[i]*s[i];
+ ; sum += s[i];
+ ;}
+ movzx eax, byte [rsi+rdi]
+ add ecx, eax
+ mul al
+ add edx, eax
+ add rdi, 1
+ cmp rdi, 6
+ jle .ip_var_loop
+
+
+ ;mov rax, sumsq
+ ;movd xmm7, rax
+ movd xmm7, edx
+
+ ;mov rax, sum
+ ;movd xmm6, rax
+ movd xmm6, ecx
+
+ mov rsi, arg(0) ;s
+ xor rcx, rcx
+
+ movsxd rdx, dword arg(3) ;cols
+ add rdx, 8
+ pxor mm0, mm0
+ pxor mm1, mm1
+
+ pxor xmm0, xmm0
+.nextcol4:
+
+ movd xmm1, DWORD PTR [rsi+rcx-8] ; -8 -7 -6 -5
+ movd xmm2, DWORD PTR [rsi+rcx+7] ; +7 +8 +9 +10
+
+ punpcklbw xmm1, xmm0 ; expanding
+ punpcklbw xmm2, xmm0 ; expanding
+
+ punpcklwd xmm1, xmm0 ; expanding to dwords
+ punpcklwd xmm2, xmm0 ; expanding to dwords
+
+ psubd xmm2, xmm1 ; 7--8 8--7 9--6 10--5
+ paddd xmm1, xmm1 ; -8*2 -7*2 -6*2 -5*2
+
+ paddd xmm1, xmm2 ; 7+-8 8+-7 9+-6 10+-5
+ pmaddwd xmm1, xmm2 ; squared of 7+-8 8+-7 9+-6 10+-5
+
+ paddd xmm6, xmm2
+ paddd xmm7, xmm1
+
+ pshufd xmm6, xmm6, 0 ; duplicate the last ones
+ pshufd xmm7, xmm7, 0 ; duplicate the last ones
+
+ psrldq xmm1, 4 ; 8--7 9--6 10--5 0000
+ psrldq xmm2, 4 ; 8--7 9--6 10--5 0000
+
+ pshufd xmm3, xmm1, 3 ; 0000 8--7 8--7 8--7 squared
+ pshufd xmm4, xmm2, 3 ; 0000 8--7 8--7 8--7 squared
+
+ paddd xmm6, xmm4
+ paddd xmm7, xmm3
+
+ pshufd xmm3, xmm1, 01011111b ; 0000 0000 9--6 9--6 squared
+ pshufd xmm4, xmm2, 01011111b ; 0000 0000 9--6 9--6 squared
+
+ paddd xmm7, xmm3
+ paddd xmm6, xmm4
+
+ pshufd xmm3, xmm1, 10111111b ; 0000 0000 8--7 8--7 squared
+ pshufd xmm4, xmm2, 10111111b ; 0000 0000 8--7 8--7 squared
+
+ paddd xmm7, xmm3
+ paddd xmm6, xmm4
+
+ movdqa xmm3, xmm6
+ pmaddwd xmm3, xmm3
+
+ movdqa xmm5, xmm7
+ pslld xmm5, 4
+
+ psubd xmm5, xmm7
+ psubd xmm5, xmm3
+
+ psubd xmm5, flimit4
+ psrad xmm5, 31
+
+ packssdw xmm5, xmm0
+ packsswb xmm5, xmm0
+
+ movd xmm1, DWORD PTR [rsi+rcx]
+ movq xmm2, xmm1
+
+ punpcklbw xmm1, xmm0
+ punpcklwd xmm1, xmm0
+
+ paddd xmm1, xmm6
+ paddd xmm1, [GLOBAL(four8s)]
+
+ psrad xmm1, 4
+ packssdw xmm1, xmm0
+
+ packuswb xmm1, xmm0
+ pand xmm1, xmm5
+
+ pandn xmm5, xmm2
+ por xmm5, xmm1
+
+ movd [rsi+rcx-8], mm0
+ movq mm0, mm1
+
+ movdq2q mm1, xmm5
+ psrldq xmm7, 12
+
+ psrldq xmm6, 12
+ add rcx, 4
+
+ cmp rcx, rdx
+ jl .nextcol4
+
+ ;s+=pitch;
+ movsxd rax, dword arg(1)
+ add arg(0), rax
+
+ sub dword arg(2), 1 ;rows-=1
+ cmp dword arg(2), 0
+ jg .ip_row_loop
+
+ add rsp, 16
+ pop rsp
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+%undef flimit4
+
+
+;void vp9_plane_add_noise_wmt (unsigned char *start, unsigned char *noise,
+; unsigned char blackclamp[16],
+; unsigned char whiteclamp[16],
+; unsigned char bothclamp[16],
+; unsigned int width, unsigned int height, int pitch)
+extern sym(rand)
+global sym(vp9_plane_add_noise_wmt) PRIVATE
+sym(vp9_plane_add_noise_wmt):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 8
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+.addnoise_loop:
+ call sym(rand) WRT_PLT
+ mov rcx, arg(1) ;noise
+ and rax, 0xff
+ add rcx, rax
+
+ ; we rely on the fact that the clamping vectors are stored contiguously
+ ; in black/white/both order. Note that we have to reload this here because
+ ; rdx could be trashed by rand()
+ mov rdx, arg(2) ; blackclamp
+
+
+ mov rdi, rcx
+ movsxd rcx, dword arg(5) ;[Width]
+ mov rsi, arg(0) ;Pos
+ xor rax,rax
+
+.addnoise_nextset:
+ movdqu xmm1,[rsi+rax] ; get the source
+
+ psubusb xmm1, [rdx] ;blackclamp ; clamp both sides so we don't outrange adding noise
+ paddusb xmm1, [rdx+32] ;bothclamp
+ psubusb xmm1, [rdx+16] ;whiteclamp
+
+ movdqu xmm2,[rdi+rax] ; get the noise for this line
+ paddb xmm1,xmm2 ; add it in
+ movdqu [rsi+rax],xmm1 ; store the result
+
+ add rax,16 ; move to the next line
+
+ cmp rax, rcx
+ jl .addnoise_nextset
+
+ movsxd rax, dword arg(7) ; Pitch
+ add arg(0), rax ; Start += Pitch
+ sub dword arg(6), 1 ; Height -= 1
+ jg .addnoise_loop
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+SECTION_RODATA
+align 16
+rd42:
+ times 8 dw 0x04
+four8s:
+ times 4 dd 8
diff --git a/libvpx/vp9/common/x86/vp9_postproc_x86.h b/libvpx/vp9/common/x86/vp9_postproc_x86.h
new file mode 100644
index 0000000..b0e8b18
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_postproc_x86.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_COMMON_X86_VP9_POSTPROC_X86_H_
+#define VP9_COMMON_X86_VP9_POSTPROC_X86_H_
+
+/* Note:
+ *
+ * This platform is commonly built for runtime CPU detection. If you modify
+ * any of the function mappings present in this file, be sure to also update
+ * them in the function pointer initialization code
+ */
+
+#if HAVE_MMX
+extern prototype_postproc_inplace(vp9_mbpost_proc_down_mmx);
+extern prototype_postproc(vp9_post_proc_down_and_across_mmx);
+extern prototype_postproc_addnoise(vp9_plane_add_noise_mmx);
+
+#if !CONFIG_RUNTIME_CPU_DETECT
+#undef vp9_postproc_down
+#define vp9_postproc_down vp9_mbpost_proc_down_mmx
+
+#undef vp9_postproc_downacross
+#define vp9_postproc_downacross vp9_post_proc_down_and_across_mmx
+
+#undef vp9_postproc_addnoise
+#define vp9_postproc_addnoise vp9_plane_add_noise_mmx
+
+#endif
+#endif
+
+
+#if HAVE_SSE2
+extern prototype_postproc_inplace(vp9_mbpost_proc_down_xmm);
+extern prototype_postproc_inplace(vp9_mbpost_proc_across_ip_xmm);
+extern prototype_postproc(vp9_post_proc_down_and_across_xmm);
+extern prototype_postproc_addnoise(vp9_plane_add_noise_wmt);
+
+#if !CONFIG_RUNTIME_CPU_DETECT
+#undef vp9_postproc_down
+#define vp9_postproc_down vp9_mbpost_proc_down_xmm
+
+#undef vp9_postproc_across
+#define vp9_postproc_across vp9_mbpost_proc_across_ip_xmm
+
+#undef vp9_postproc_downacross
+#define vp9_postproc_downacross vp9_post_proc_down_and_across_xmm
+
+#undef vp9_postproc_addnoise
+#define vp9_postproc_addnoise vp9_plane_add_noise_wmt
+
+
+#endif
+#endif
+
+#endif
diff --git a/libvpx/vp9/common/x86/vp9_recon_mmx.asm b/libvpx/vp9/common/x86/vp9_recon_mmx.asm
new file mode 100644
index 0000000..6fbbe48
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_recon_mmx.asm
@@ -0,0 +1,272 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+;void copy_mem8x8_mmx(
+; unsigned char *src,
+; int src_stride,
+; unsigned char *dst,
+; int dst_stride
+; )
+global sym(vp9_copy_mem8x8_mmx) PRIVATE
+sym(vp9_copy_mem8x8_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src;
+ movq mm0, [rsi]
+
+ movsxd rax, dword ptr arg(1) ;src_stride;
+ mov rdi, arg(2) ;dst;
+
+ movq mm1, [rsi+rax]
+ movq mm2, [rsi+rax*2]
+
+ movsxd rcx, dword ptr arg(3) ;dst_stride
+ lea rsi, [rsi+rax*2]
+
+ movq [rdi], mm0
+ add rsi, rax
+
+ movq [rdi+rcx], mm1
+ movq [rdi+rcx*2], mm2
+
+
+ lea rdi, [rdi+rcx*2]
+ movq mm3, [rsi]
+
+ add rdi, rcx
+ movq mm4, [rsi+rax]
+
+ movq mm5, [rsi+rax*2]
+ movq [rdi], mm3
+
+ lea rsi, [rsi+rax*2]
+ movq [rdi+rcx], mm4
+
+ movq [rdi+rcx*2], mm5
+ lea rdi, [rdi+rcx*2]
+
+ movq mm0, [rsi+rax]
+ movq mm1, [rsi+rax*2]
+
+ movq [rdi+rcx], mm0
+ movq [rdi+rcx*2],mm1
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void copy_mem8x4_mmx(
+; unsigned char *src,
+; int src_stride,
+; unsigned char *dst,
+; int dst_stride
+; )
+global sym(vp9_copy_mem8x4_mmx) PRIVATE
+sym(vp9_copy_mem8x4_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src;
+ movq mm0, [rsi]
+
+ movsxd rax, dword ptr arg(1) ;src_stride;
+ mov rdi, arg(2) ;dst;
+
+ movq mm1, [rsi+rax]
+ movq mm2, [rsi+rax*2]
+
+ movsxd rcx, dword ptr arg(3) ;dst_stride
+ lea rsi, [rsi+rax*2]
+
+ movq [rdi], mm0
+ movq [rdi+rcx], mm1
+
+ movq [rdi+rcx*2], mm2
+ lea rdi, [rdi+rcx*2]
+
+ movq mm3, [rsi+rax]
+ movq [rdi+rcx], mm3
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void copy_mem16x16_mmx(
+; unsigned char *src,
+; int src_stride,
+; unsigned char *dst,
+; int dst_stride
+; )
+global sym(vp9_copy_mem16x16_mmx) PRIVATE
+sym(vp9_copy_mem16x16_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src;
+ movsxd rax, dword ptr arg(1) ;src_stride;
+
+ mov rdi, arg(2) ;dst;
+ movsxd rcx, dword ptr arg(3) ;dst_stride
+
+ movq mm0, [rsi]
+ movq mm3, [rsi+8];
+
+ movq mm1, [rsi+rax]
+ movq mm4, [rsi+rax+8]
+
+ movq mm2, [rsi+rax*2]
+ movq mm5, [rsi+rax*2+8]
+
+ lea rsi, [rsi+rax*2]
+ add rsi, rax
+
+ movq [rdi], mm0
+ movq [rdi+8], mm3
+
+ movq [rdi+rcx], mm1
+ movq [rdi+rcx+8], mm4
+
+ movq [rdi+rcx*2], mm2
+ movq [rdi+rcx*2+8], mm5
+
+ lea rdi, [rdi+rcx*2]
+ add rdi, rcx
+
+ movq mm0, [rsi]
+ movq mm3, [rsi+8];
+
+ movq mm1, [rsi+rax]
+ movq mm4, [rsi+rax+8]
+
+ movq mm2, [rsi+rax*2]
+ movq mm5, [rsi+rax*2+8]
+
+ lea rsi, [rsi+rax*2]
+ add rsi, rax
+
+ movq [rdi], mm0
+ movq [rdi+8], mm3
+
+ movq [rdi+rcx], mm1
+ movq [rdi+rcx+8], mm4
+
+ movq [rdi+rcx*2], mm2
+ movq [rdi+rcx*2+8], mm5
+
+ lea rdi, [rdi+rcx*2]
+ add rdi, rcx
+
+ movq mm0, [rsi]
+ movq mm3, [rsi+8];
+
+ movq mm1, [rsi+rax]
+ movq mm4, [rsi+rax+8]
+
+ movq mm2, [rsi+rax*2]
+ movq mm5, [rsi+rax*2+8]
+
+ lea rsi, [rsi+rax*2]
+ add rsi, rax
+
+ movq [rdi], mm0
+ movq [rdi+8], mm3
+
+ movq [rdi+rcx], mm1
+ movq [rdi+rcx+8], mm4
+
+ movq [rdi+rcx*2], mm2
+ movq [rdi+rcx*2+8], mm5
+
+ lea rdi, [rdi+rcx*2]
+ add rdi, rcx
+
+ movq mm0, [rsi]
+ movq mm3, [rsi+8];
+
+ movq mm1, [rsi+rax]
+ movq mm4, [rsi+rax+8]
+
+ movq mm2, [rsi+rax*2]
+ movq mm5, [rsi+rax*2+8]
+
+ lea rsi, [rsi+rax*2]
+ add rsi, rax
+
+ movq [rdi], mm0
+ movq [rdi+8], mm3
+
+ movq [rdi+rcx], mm1
+ movq [rdi+rcx+8], mm4
+
+ movq [rdi+rcx*2], mm2
+ movq [rdi+rcx*2+8], mm5
+
+ lea rdi, [rdi+rcx*2]
+ add rdi, rcx
+
+ movq mm0, [rsi]
+ movq mm3, [rsi+8];
+
+ movq mm1, [rsi+rax]
+ movq mm4, [rsi+rax+8]
+
+ movq mm2, [rsi+rax*2]
+ movq mm5, [rsi+rax*2+8]
+
+ lea rsi, [rsi+rax*2]
+ add rsi, rax
+
+ movq [rdi], mm0
+ movq [rdi+8], mm3
+
+ movq [rdi+rcx], mm1
+ movq [rdi+rcx+8], mm4
+
+ movq [rdi+rcx*2], mm2
+ movq [rdi+rcx*2+8], mm5
+
+ lea rdi, [rdi+rcx*2]
+ add rdi, rcx
+
+ movq mm0, [rsi]
+ movq mm3, [rsi+8];
+
+ movq [rdi], mm0
+ movq [rdi+8], mm3
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
diff --git a/libvpx/vp9/common/x86/vp9_recon_sse2.asm b/libvpx/vp9/common/x86/vp9_recon_sse2.asm
new file mode 100644
index 0000000..9ee3043
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_recon_sse2.asm
@@ -0,0 +1,572 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+;void copy_mem16x16_sse2(
+; unsigned char *src,
+; int src_stride,
+; unsigned char *dst,
+; int dst_stride
+; )
+global sym(vp9_copy_mem16x16_sse2) PRIVATE
+sym(vp9_copy_mem16x16_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src;
+ movdqu xmm0, [rsi]
+
+ movsxd rax, dword ptr arg(1) ;src_stride;
+ mov rdi, arg(2) ;dst;
+
+ movdqu xmm1, [rsi+rax]
+ movdqu xmm2, [rsi+rax*2]
+
+ movsxd rcx, dword ptr arg(3) ;dst_stride
+ lea rsi, [rsi+rax*2]
+
+ movdqa [rdi], xmm0
+ add rsi, rax
+
+ movdqa [rdi+rcx], xmm1
+ movdqa [rdi+rcx*2],xmm2
+
+ lea rdi, [rdi+rcx*2]
+ movdqu xmm3, [rsi]
+
+ add rdi, rcx
+ movdqu xmm4, [rsi+rax]
+
+ movdqu xmm5, [rsi+rax*2]
+ lea rsi, [rsi+rax*2]
+
+ movdqa [rdi], xmm3
+ add rsi, rax
+
+ movdqa [rdi+rcx], xmm4
+ movdqa [rdi+rcx*2],xmm5
+
+ lea rdi, [rdi+rcx*2]
+ movdqu xmm0, [rsi]
+
+ add rdi, rcx
+ movdqu xmm1, [rsi+rax]
+
+ movdqu xmm2, [rsi+rax*2]
+ lea rsi, [rsi+rax*2]
+
+ movdqa [rdi], xmm0
+ add rsi, rax
+
+ movdqa [rdi+rcx], xmm1
+
+ movdqa [rdi+rcx*2], xmm2
+ movdqu xmm3, [rsi]
+
+ movdqu xmm4, [rsi+rax]
+ lea rdi, [rdi+rcx*2]
+
+ add rdi, rcx
+ movdqu xmm5, [rsi+rax*2]
+
+ lea rsi, [rsi+rax*2]
+ movdqa [rdi], xmm3
+
+ add rsi, rax
+ movdqa [rdi+rcx], xmm4
+
+ movdqa [rdi+rcx*2],xmm5
+ movdqu xmm0, [rsi]
+
+ lea rdi, [rdi+rcx*2]
+ movdqu xmm1, [rsi+rax]
+
+ add rdi, rcx
+ movdqu xmm2, [rsi+rax*2]
+
+ lea rsi, [rsi+rax*2]
+ movdqa [rdi], xmm0
+
+ movdqa [rdi+rcx], xmm1
+ movdqa [rdi+rcx*2],xmm2
+
+ movdqu xmm3, [rsi+rax]
+ lea rdi, [rdi+rcx*2]
+
+ movdqa [rdi+rcx], xmm3
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp9_intra_pred_uv_dc_mmx2(
+; unsigned char *dst,
+; int dst_stride
+; unsigned char *src,
+; int src_stride,
+; )
+global sym(vp9_intra_pred_uv_dc_mmx2) PRIVATE
+sym(vp9_intra_pred_uv_dc_mmx2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ push rsi
+ push rdi
+ ; end prolog
+
+ ; from top
+ mov rsi, arg(2) ;src;
+ movsxd rax, dword ptr arg(3) ;src_stride;
+ sub rsi, rax
+ pxor mm0, mm0
+ movq mm1, [rsi]
+ psadbw mm1, mm0
+
+ ; from left
+ dec rsi
+ lea rdi, [rax*3]
+ movzx ecx, byte [rsi+rax]
+ movzx edx, byte [rsi+rax*2]
+ add ecx, edx
+ movzx edx, byte [rsi+rdi]
+ add ecx, edx
+ lea rsi, [rsi+rax*4]
+ movzx edx, byte [rsi]
+ add ecx, edx
+ movzx edx, byte [rsi+rax]
+ add ecx, edx
+ movzx edx, byte [rsi+rax*2]
+ add ecx, edx
+ movzx edx, byte [rsi+rdi]
+ add ecx, edx
+ movzx edx, byte [rsi+rax*4]
+ add ecx, edx
+
+ ; add up
+ pextrw edx, mm1, 0x0
+ lea edx, [edx+ecx+8]
+ sar edx, 4
+ movd mm1, edx
+ pshufw mm1, mm1, 0x0
+ packuswb mm1, mm1
+
+ ; write out
+ mov rdi, arg(0) ;dst;
+ movsxd rcx, dword ptr arg(1) ;dst_stride
+ lea rax, [rcx*3]
+
+ movq [rdi ], mm1
+ movq [rdi+rcx ], mm1
+ movq [rdi+rcx*2], mm1
+ movq [rdi+rax ], mm1
+ lea rdi, [rdi+rcx*4]
+ movq [rdi ], mm1
+ movq [rdi+rcx ], mm1
+ movq [rdi+rcx*2], mm1
+ movq [rdi+rax ], mm1
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp9_intra_pred_uv_dctop_mmx2(
+; unsigned char *dst,
+; int dst_stride
+; unsigned char *src,
+; int src_stride,
+; )
+global sym(vp9_intra_pred_uv_dctop_mmx2) PRIVATE
+sym(vp9_intra_pred_uv_dctop_mmx2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ; from top
+ mov rsi, arg(2) ;src;
+ movsxd rax, dword ptr arg(3) ;src_stride;
+ sub rsi, rax
+ pxor mm0, mm0
+ movq mm1, [rsi]
+ psadbw mm1, mm0
+
+ ; add up
+ paddw mm1, [GLOBAL(dc_4)]
+ psraw mm1, 3
+ pshufw mm1, mm1, 0x0
+ packuswb mm1, mm1
+
+ ; write out
+ mov rdi, arg(0) ;dst;
+ movsxd rcx, dword ptr arg(1) ;dst_stride
+ lea rax, [rcx*3]
+
+ movq [rdi ], mm1
+ movq [rdi+rcx ], mm1
+ movq [rdi+rcx*2], mm1
+ movq [rdi+rax ], mm1
+ lea rdi, [rdi+rcx*4]
+ movq [rdi ], mm1
+ movq [rdi+rcx ], mm1
+ movq [rdi+rcx*2], mm1
+ movq [rdi+rax ], mm1
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp9_intra_pred_uv_dcleft_mmx2(
+; unsigned char *dst,
+; int dst_stride
+; unsigned char *src,
+; int src_stride,
+; )
+global sym(vp9_intra_pred_uv_dcleft_mmx2) PRIVATE
+sym(vp9_intra_pred_uv_dcleft_mmx2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ push rsi
+ push rdi
+ ; end prolog
+
+ ; from left
+ mov rsi, arg(2) ;src;
+ movsxd rax, dword ptr arg(3) ;src_stride;
+ dec rsi
+ lea rdi, [rax*3]
+ movzx ecx, byte [rsi]
+ movzx edx, byte [rsi+rax]
+ add ecx, edx
+ movzx edx, byte [rsi+rax*2]
+ add ecx, edx
+ movzx edx, byte [rsi+rdi]
+ add ecx, edx
+ lea rsi, [rsi+rax*4]
+ movzx edx, byte [rsi]
+ add ecx, edx
+ movzx edx, byte [rsi+rax]
+ add ecx, edx
+ movzx edx, byte [rsi+rax*2]
+ add ecx, edx
+ movzx edx, byte [rsi+rdi]
+ lea edx, [ecx+edx+4]
+
+ ; add up
+ shr edx, 3
+ movd mm1, edx
+ pshufw mm1, mm1, 0x0
+ packuswb mm1, mm1
+
+ ; write out
+ mov rdi, arg(0) ;dst;
+ movsxd rcx, dword ptr arg(1) ;dst_stride
+ lea rax, [rcx*3]
+
+ movq [rdi ], mm1
+ movq [rdi+rcx ], mm1
+ movq [rdi+rcx*2], mm1
+ movq [rdi+rax ], mm1
+ lea rdi, [rdi+rcx*4]
+ movq [rdi ], mm1
+ movq [rdi+rcx ], mm1
+ movq [rdi+rcx*2], mm1
+ movq [rdi+rax ], mm1
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp9_intra_pred_uv_dc128_mmx(
+; unsigned char *dst,
+; int dst_stride
+; unsigned char *src,
+; int src_stride,
+; )
+global sym(vp9_intra_pred_uv_dc128_mmx) PRIVATE
+sym(vp9_intra_pred_uv_dc128_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ GET_GOT rbx
+ ; end prolog
+
+ ; write out
+ movq mm1, [GLOBAL(dc_128)]
+ mov rax, arg(0) ;dst;
+ movsxd rdx, dword ptr arg(1) ;dst_stride
+ lea rcx, [rdx*3]
+
+ movq [rax ], mm1
+ movq [rax+rdx ], mm1
+ movq [rax+rdx*2], mm1
+ movq [rax+rcx ], mm1
+ lea rax, [rax+rdx*4]
+ movq [rax ], mm1
+ movq [rax+rdx ], mm1
+ movq [rax+rdx*2], mm1
+ movq [rax+rcx ], mm1
+
+ ; begin epilog
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp9_intra_pred_uv_tm_sse2(
+; unsigned char *dst,
+; int dst_stride
+; unsigned char *src,
+; int src_stride,
+; )
+%macro vp9_intra_pred_uv_tm 1
+global sym(vp9_intra_pred_uv_tm_%1) PRIVATE
+sym(vp9_intra_pred_uv_tm_%1):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ; read top row
+ mov edx, 4
+ mov rsi, arg(2) ;src;
+ movsxd rax, dword ptr arg(3) ;src_stride;
+ sub rsi, rax
+ pxor xmm0, xmm0
+%ifidn %1, ssse3
+ movdqa xmm2, [GLOBAL(dc_1024)]
+%endif
+ movq xmm1, [rsi]
+ punpcklbw xmm1, xmm0
+
+ ; set up left ptrs ans subtract topleft
+ movd xmm3, [rsi-1]
+ lea rsi, [rsi+rax-1]
+%ifidn %1, sse2
+ punpcklbw xmm3, xmm0
+ pshuflw xmm3, xmm3, 0x0
+ punpcklqdq xmm3, xmm3
+%else
+ pshufb xmm3, xmm2
+%endif
+ psubw xmm1, xmm3
+
+ ; set up dest ptrs
+ mov rdi, arg(0) ;dst;
+ movsxd rcx, dword ptr arg(1) ;dst_stride
+
+.vp9_intra_pred_uv_tm_%1_loop:
+ movd xmm3, [rsi]
+ movd xmm5, [rsi+rax]
+%ifidn %1, sse2
+ punpcklbw xmm3, xmm0
+ punpcklbw xmm5, xmm0
+ pshuflw xmm3, xmm3, 0x0
+ pshuflw xmm5, xmm5, 0x0
+ punpcklqdq xmm3, xmm3
+ punpcklqdq xmm5, xmm5
+%else
+ pshufb xmm3, xmm2
+ pshufb xmm5, xmm2
+%endif
+ paddw xmm3, xmm1
+ paddw xmm5, xmm1
+ packuswb xmm3, xmm5
+ movq [rdi ], xmm3
+ movhps[rdi+rcx], xmm3
+ lea rsi, [rsi+rax*2]
+ lea rdi, [rdi+rcx*2]
+ dec edx
+ jnz .vp9_intra_pred_uv_tm_%1_loop
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+%endmacro
+
+vp9_intra_pred_uv_tm sse2
+vp9_intra_pred_uv_tm ssse3
+
+;void vp9_intra_pred_uv_ve_mmx(
+; unsigned char *dst,
+; int dst_stride
+; unsigned char *src,
+; int src_stride,
+; )
+global sym(vp9_intra_pred_uv_ve_mmx) PRIVATE
+sym(vp9_intra_pred_uv_ve_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ ; end prolog
+
+ ; read from top
+ mov rax, arg(2) ;src;
+ movsxd rdx, dword ptr arg(3) ;src_stride;
+ sub rax, rdx
+ movq mm1, [rax]
+
+ ; write out
+ mov rax, arg(0) ;dst;
+ movsxd rdx, dword ptr arg(1) ;dst_stride
+ lea rcx, [rdx*3]
+
+ movq [rax ], mm1
+ movq [rax+rdx ], mm1
+ movq [rax+rdx*2], mm1
+ movq [rax+rcx ], mm1
+ lea rax, [rax+rdx*4]
+ movq [rax ], mm1
+ movq [rax+rdx ], mm1
+ movq [rax+rdx*2], mm1
+ movq [rax+rcx ], mm1
+
+ ; begin epilog
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp9_intra_pred_uv_ho_mmx2(
+; unsigned char *dst,
+; int dst_stride
+; unsigned char *src,
+; int src_stride,
+; )
+%macro vp9_intra_pred_uv_ho 1
+global sym(vp9_intra_pred_uv_ho_%1) PRIVATE
+sym(vp9_intra_pred_uv_ho_%1):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ push rsi
+ push rdi
+%ifidn %1, ssse3
+%ifndef GET_GOT_SAVE_ARG
+ push rbx
+%endif
+ GET_GOT rbx
+%endif
+ ; end prolog
+
+ ; read from left and write out
+%ifidn %1, mmx2
+ mov edx, 4
+%endif
+ mov rsi, arg(2) ;src;
+ movsxd rax, dword ptr arg(3) ;src_stride;
+ mov rdi, arg(0) ;dst;
+ movsxd rcx, dword ptr arg(1) ;dst_stride
+%ifidn %1, ssse3
+ lea rdx, [rcx*3]
+ movdqa xmm2, [GLOBAL(dc_00001111)]
+ lea rbx, [rax*3]
+%endif
+ dec rsi
+%ifidn %1, mmx2
+.vp9_intra_pred_uv_ho_%1_loop:
+ movd mm0, [rsi]
+ movd mm1, [rsi+rax]
+ punpcklbw mm0, mm0
+ punpcklbw mm1, mm1
+ pshufw mm0, mm0, 0x0
+ pshufw mm1, mm1, 0x0
+ movq [rdi ], mm0
+ movq [rdi+rcx], mm1
+ lea rsi, [rsi+rax*2]
+ lea rdi, [rdi+rcx*2]
+ dec edx
+ jnz .vp9_intra_pred_uv_ho_%1_loop
+%else
+ movd xmm0, [rsi]
+ movd xmm3, [rsi+rax]
+ movd xmm1, [rsi+rax*2]
+ movd xmm4, [rsi+rbx]
+ punpcklbw xmm0, xmm3
+ punpcklbw xmm1, xmm4
+ pshufb xmm0, xmm2
+ pshufb xmm1, xmm2
+ movq [rdi ], xmm0
+ movhps [rdi+rcx], xmm0
+ movq [rdi+rcx*2], xmm1
+ movhps [rdi+rdx], xmm1
+ lea rsi, [rsi+rax*4]
+ lea rdi, [rdi+rcx*4]
+ movd xmm0, [rsi]
+ movd xmm3, [rsi+rax]
+ movd xmm1, [rsi+rax*2]
+ movd xmm4, [rsi+rbx]
+ punpcklbw xmm0, xmm3
+ punpcklbw xmm1, xmm4
+ pshufb xmm0, xmm2
+ pshufb xmm1, xmm2
+ movq [rdi ], xmm0
+ movhps [rdi+rcx], xmm0
+ movq [rdi+rcx*2], xmm1
+ movhps [rdi+rdx], xmm1
+%endif
+
+ ; begin epilog
+%ifidn %1, ssse3
+ RESTORE_GOT
+%ifndef GET_GOT_SAVE_ARG
+ pop rbx
+%endif
+%endif
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+%endmacro
+
+vp9_intra_pred_uv_ho mmx2
+vp9_intra_pred_uv_ho ssse3
+
+SECTION_RODATA
+dc_128:
+ times 8 db 128
+dc_4:
+ times 4 dw 4
+align 16
+dc_1024:
+ times 8 dw 0x400
+align 16
+dc_00001111:
+ times 8 db 0
+ times 8 db 1
diff --git a/libvpx/vp9/common/x86/vp9_recon_wrapper_sse2.c b/libvpx/vp9/common/x86/vp9_recon_wrapper_sse2.c
new file mode 100644
index 0000000..97148fb
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_recon_wrapper_sse2.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vpx_config.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp9/common/vp9_blockd.h"
+
+#define build_intra_predictors_mbuv_prototype(sym) \
+ void sym(unsigned char *dst, int dst_stride, \
+ const unsigned char *src, int src_stride)
+typedef build_intra_predictors_mbuv_prototype((*build_intra_pred_mbuv_fn_t));
+
+extern build_intra_predictors_mbuv_prototype(vp9_intra_pred_uv_dc_mmx2);
+extern build_intra_predictors_mbuv_prototype(vp9_intra_pred_uv_dctop_mmx2);
+extern build_intra_predictors_mbuv_prototype(vp9_intra_pred_uv_dcleft_mmx2);
+extern build_intra_predictors_mbuv_prototype(vp9_intra_pred_uv_dc128_mmx);
+extern build_intra_predictors_mbuv_prototype(vp9_intra_pred_uv_ho_mmx2);
+extern build_intra_predictors_mbuv_prototype(vp9_intra_pred_uv_ho_ssse3);
+extern build_intra_predictors_mbuv_prototype(vp9_intra_pred_uv_ve_mmx);
+extern build_intra_predictors_mbuv_prototype(vp9_intra_pred_uv_tm_sse2);
+extern build_intra_predictors_mbuv_prototype(vp9_intra_pred_uv_tm_ssse3);
+
+static void build_intra_predictors_mbuv_x86(MACROBLOCKD *xd,
+ unsigned char *dst_u,
+ unsigned char *dst_v,
+ int dst_stride,
+ build_intra_pred_mbuv_fn_t tm_fn,
+ build_intra_pred_mbuv_fn_t ho_fn) {
+ int mode = xd->mode_info_context->mbmi.uv_mode;
+ build_intra_pred_mbuv_fn_t fn;
+ int src_stride = xd->plane[1].dst.stride;
+
+ switch (mode) {
+ case V_PRED:
+ fn = vp9_intra_pred_uv_ve_mmx;
+ break;
+ case H_PRED:
+ fn = ho_fn;
+ break;
+ case TM_PRED:
+ fn = tm_fn;
+ break;
+ case DC_PRED:
+ if (xd->up_available) {
+ if (xd->left_available) {
+ fn = vp9_intra_pred_uv_dc_mmx2;
+ break;
+ } else {
+ fn = vp9_intra_pred_uv_dctop_mmx2;
+ break;
+ }
+ } else if (xd->left_available) {
+ fn = vp9_intra_pred_uv_dcleft_mmx2;
+ break;
+ } else {
+ fn = vp9_intra_pred_uv_dc128_mmx;
+ break;
+ }
+ break;
+ default:
+ return;
+ }
+
+ fn(dst_u, dst_stride, xd->plane[1].dst.buf, src_stride);
+ fn(dst_v, dst_stride, xd->plane[2].dst.buf, src_stride);
+}
+
+void vp9_build_intra_predictors_mbuv_sse2(MACROBLOCKD *xd) {
+ build_intra_predictors_mbuv_x86(xd, xd->plane[1].dst.buf,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
+ vp9_intra_pred_uv_tm_sse2,
+ vp9_intra_pred_uv_ho_mmx2);
+}
+
+void vp9_build_intra_predictors_mbuv_ssse3(MACROBLOCKD *xd) {
+ build_intra_predictors_mbuv_x86(xd, xd->plane[1].dst.buf,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
+ vp9_intra_pred_uv_tm_ssse3,
+ vp9_intra_pred_uv_ho_ssse3);
+}
+
+void vp9_build_intra_predictors_mbuv_s_sse2(MACROBLOCKD *xd) {
+ build_intra_predictors_mbuv_x86(xd, xd->plane[1].dst.buf,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
+ vp9_intra_pred_uv_tm_sse2,
+ vp9_intra_pred_uv_ho_mmx2);
+}
+
+void vp9_build_intra_predictors_mbuv_s_ssse3(MACROBLOCKD *xd) {
+ build_intra_predictors_mbuv_x86(xd, xd->plane[1].dst.buf,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
+ vp9_intra_pred_uv_tm_ssse3,
+ vp9_intra_pred_uv_ho_ssse3);
+}
diff --git a/libvpx/vp9/common/x86/vp9_sadmxn_sse2.c b/libvpx/vp9/common/x86/vp9_sadmxn_sse2.c
new file mode 100644
index 0000000..ed873a5
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_sadmxn_sse2.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <emmintrin.h> /* SSE2 */
+#include "vpx/vpx_integer.h"
+#include "vpx_ports/emmintrin_compat.h"
+
+unsigned int vp9_sad16x3_sse2(
+ const unsigned char *src_ptr,
+ int src_stride,
+ const unsigned char *ref_ptr,
+ int ref_stride) {
+ __m128i s0, s1, s2;
+ __m128i r0, r1, r2;
+ __m128i sad;
+
+ s0 = _mm_loadu_si128((const __m128i *)(src_ptr + 0 * src_stride));
+ s1 = _mm_loadu_si128((const __m128i *)(src_ptr + 1 * src_stride));
+ s2 = _mm_loadu_si128((const __m128i *)(src_ptr + 2 * src_stride));
+
+ r0 = _mm_loadu_si128((const __m128i *)(ref_ptr + 0 * ref_stride));
+ r1 = _mm_loadu_si128((const __m128i *)(ref_ptr + 1 * ref_stride));
+ r2 = _mm_loadu_si128((const __m128i *)(ref_ptr + 2 * ref_stride));
+
+ sad = _mm_sad_epu8(s0, r0);
+ sad = _mm_add_epi16(sad, _mm_sad_epu8(s1, r1));
+ sad = _mm_add_epi16(sad, _mm_sad_epu8(s2, r2));
+ sad = _mm_add_epi16(sad, _mm_srli_si128(sad, 8));
+
+ return _mm_cvtsi128_si32(sad);
+}
+
+unsigned int vp9_sad3x16_sse2(
+ const unsigned char *src_ptr,
+ int src_stride,
+ const unsigned char *ref_ptr,
+ int ref_stride) {
+ int r;
+ __m128i s0, s1, s2, s3;
+ __m128i r0, r1, r2, r3;
+ __m128i sad = _mm_setzero_si128();
+ __m128i mask;
+ const int offset = (uintptr_t)src_ptr & 3;
+
+ /* In current use case, the offset is 1 if CONFIG_SUBPELREFMV is off.
+ * Here, for offset=1, we adjust src_ptr to be 4-byte aligned. Then, movd
+ * takes much less time.
+ */
+ if (offset == 1)
+ src_ptr -= 1;
+
+ /* mask = 0xffffffffffff0000ffffffffffff0000 */
+ mask = _mm_cmpeq_epi32(sad, sad);
+ mask = _mm_slli_epi64(mask, 16);
+
+ for (r = 0; r < 16; r += 4) {
+ s0 = _mm_cvtsi32_si128 (*(const int *)(src_ptr + 0 * src_stride));
+ s1 = _mm_cvtsi32_si128 (*(const int *)(src_ptr + 1 * src_stride));
+ s2 = _mm_cvtsi32_si128 (*(const int *)(src_ptr + 2 * src_stride));
+ s3 = _mm_cvtsi32_si128 (*(const int *)(src_ptr + 3 * src_stride));
+ r0 = _mm_cvtsi32_si128 (*(const int *)(ref_ptr + 0 * ref_stride));
+ r1 = _mm_cvtsi32_si128 (*(const int *)(ref_ptr + 1 * ref_stride));
+ r2 = _mm_cvtsi32_si128 (*(const int *)(ref_ptr + 2 * ref_stride));
+ r3 = _mm_cvtsi32_si128 (*(const int *)(ref_ptr + 3 * ref_stride));
+
+ s0 = _mm_unpacklo_epi8(s0, s1);
+ r0 = _mm_unpacklo_epi8(r0, r1);
+ s2 = _mm_unpacklo_epi8(s2, s3);
+ r2 = _mm_unpacklo_epi8(r2, r3);
+ s0 = _mm_unpacklo_epi64(s0, s2);
+ r0 = _mm_unpacklo_epi64(r0, r2);
+
+ // throw out extra byte
+ if (offset == 1)
+ s0 = _mm_and_si128(s0, mask);
+ else
+ s0 = _mm_slli_epi64(s0, 16);
+ r0 = _mm_slli_epi64(r0, 16);
+
+ sad = _mm_add_epi16(sad, _mm_sad_epu8(s0, r0));
+
+ src_ptr += src_stride*4;
+ ref_ptr += ref_stride*4;
+ }
+
+ sad = _mm_add_epi16(sad, _mm_srli_si128(sad, 8));
+ return _mm_cvtsi128_si32(sad);
+}
diff --git a/libvpx/vp9/common/x86/vp9_subpixel_8t_ssse3.asm b/libvpx/vp9/common/x86/vp9_subpixel_8t_ssse3.asm
new file mode 100644
index 0000000..bbf9888
--- /dev/null
+++ b/libvpx/vp9/common/x86/vp9_subpixel_8t_ssse3.asm
@@ -0,0 +1,1011 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+;/************************************************************************************
+; Notes: filter_block1d_h6 applies a 6 tap filter horizontally to the input pixels. The
+; input pixel array has output_height rows. This routine assumes that output_height is an
+; even number. This function handles 8 pixels in horizontal direction, calculating ONE
+; rows each iteration to take advantage of the 128 bits operations.
+;
+; This is an implementation of some of the SSE optimizations first seen in ffvp8
+;
+;*************************************************************************************/
+
+
+%macro VERTx4 1
+ mov rdx, arg(5) ;filter ptr
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;output_ptr
+ mov rcx, 0x0400040
+
+ movdqa xmm4, [rdx] ;load filters
+ movd xmm5, rcx
+ packsswb xmm4, xmm4
+ pshuflw xmm0, xmm4, 0b ;k0_k1
+ pshuflw xmm1, xmm4, 01010101b ;k2_k3
+ pshuflw xmm2, xmm4, 10101010b ;k4_k5
+ pshuflw xmm3, xmm4, 11111111b ;k6_k7
+
+ punpcklqdq xmm0, xmm0
+ punpcklqdq xmm1, xmm1
+ punpcklqdq xmm2, xmm2
+ punpcklqdq xmm3, xmm3
+
+ movdqa k0k1, xmm0
+ movdqa k2k3, xmm1
+ pshufd xmm5, xmm5, 0
+ movdqa k4k5, xmm2
+ movdqa k6k7, xmm3
+ movdqa krd, xmm5
+
+ movsxd rdx, DWORD PTR arg(1) ;pixels_per_line
+
+%if ABI_IS_32BIT=0
+ movsxd r8, DWORD PTR arg(3) ;out_pitch
+%endif
+ mov rax, rsi
+ movsxd rcx, DWORD PTR arg(4) ;output_height
+ add rax, rdx
+
+ lea rbx, [rdx + rdx*4]
+ add rbx, rdx ;pitch * 6
+
+.loop:
+ movd xmm0, [rsi] ;A
+ movd xmm1, [rsi + rdx] ;B
+ movd xmm2, [rsi + rdx * 2] ;C
+ movd xmm3, [rax + rdx * 2] ;D
+ movd xmm4, [rsi + rdx * 4] ;E
+ movd xmm5, [rax + rdx * 4] ;F
+
+ punpcklbw xmm0, xmm1 ;A B
+ punpcklbw xmm2, xmm3 ;C D
+ punpcklbw xmm4, xmm5 ;E F
+
+ movd xmm6, [rsi + rbx] ;G
+ movd xmm7, [rax + rbx] ;H
+
+ pmaddubsw xmm0, k0k1
+ pmaddubsw xmm2, k2k3
+ punpcklbw xmm6, xmm7 ;G H
+ pmaddubsw xmm4, k4k5
+ pmaddubsw xmm6, k6k7
+
+ paddsw xmm0, xmm6
+ paddsw xmm0, xmm2
+ paddsw xmm0, xmm4
+ paddsw xmm0, krd
+
+ psraw xmm0, 7
+ packuswb xmm0, xmm0
+
+ add rsi, rdx
+ add rax, rdx
+%if %1
+ movd xmm1, [rdi]
+ pavgb xmm0, xmm1
+%endif
+ movd [rdi], xmm0
+
+%if ABI_IS_32BIT
+ add rdi, DWORD PTR arg(3) ;out_pitch
+%else
+ add rdi, r8
+%endif
+ dec rcx
+ jnz .loop
+%endm
+
+%macro VERTx8 1
+ mov rdx, arg(5) ;filter ptr
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;output_ptr
+ mov rcx, 0x0400040
+
+ movdqa xmm4, [rdx] ;load filters
+ movq xmm5, rcx
+ packsswb xmm4, xmm4
+ pshuflw xmm0, xmm4, 0b ;k0_k1
+ pshuflw xmm1, xmm4, 01010101b ;k2_k3
+ pshuflw xmm2, xmm4, 10101010b ;k4_k5
+ pshuflw xmm3, xmm4, 11111111b ;k6_k7
+
+ punpcklqdq xmm0, xmm0
+ punpcklqdq xmm1, xmm1
+ punpcklqdq xmm2, xmm2
+ punpcklqdq xmm3, xmm3
+
+ movdqa k0k1, xmm0
+ movdqa k2k3, xmm1
+ pshufd xmm5, xmm5, 0
+ movdqa k4k5, xmm2
+ movdqa k6k7, xmm3
+ movdqa krd, xmm5
+
+ movsxd rdx, DWORD PTR arg(1) ;pixels_per_line
+
+%if ABI_IS_32BIT=0
+ movsxd r8, DWORD PTR arg(3) ;out_pitch
+%endif
+ mov rax, rsi
+ movsxd rcx, DWORD PTR arg(4) ;output_height
+ add rax, rdx
+
+ lea rbx, [rdx + rdx*4]
+ add rbx, rdx ;pitch * 6
+
+.loop:
+ movq xmm0, [rsi] ;A
+ movq xmm1, [rsi + rdx] ;B
+ movq xmm2, [rsi + rdx * 2] ;C
+ movq xmm3, [rax + rdx * 2] ;D
+ movq xmm4, [rsi + rdx * 4] ;E
+ movq xmm5, [rax + rdx * 4] ;F
+
+ punpcklbw xmm0, xmm1 ;A B
+ punpcklbw xmm2, xmm3 ;C D
+ punpcklbw xmm4, xmm5 ;E F
+
+ movq xmm6, [rsi + rbx] ;G
+ movq xmm7, [rax + rbx] ;H
+
+ pmaddubsw xmm0, k0k1
+ pmaddubsw xmm2, k2k3
+ punpcklbw xmm6, xmm7 ;G H
+ pmaddubsw xmm4, k4k5
+ pmaddubsw xmm6, k6k7
+
+ paddsw xmm0, xmm6
+ paddsw xmm0, xmm2
+ paddsw xmm0, xmm4
+ paddsw xmm0, krd
+
+ psraw xmm0, 7
+ packuswb xmm0, xmm0
+
+ add rsi, rdx
+ add rax, rdx
+%if %1
+ movq xmm1, [rdi]
+ pavgb xmm0, xmm1
+%endif
+ movq [rdi], xmm0
+
+%if ABI_IS_32BIT
+ add rdi, DWORD PTR arg(3) ;out_pitch
+%else
+ add rdi, r8
+%endif
+ dec rcx
+ jnz .loop
+%endm
+
+
+%macro VERTx16 1
+ mov rdx, arg(5) ;filter ptr
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;output_ptr
+ mov rcx, 0x0400040
+
+ movdqa xmm4, [rdx] ;load filters
+ movq xmm5, rcx
+ packsswb xmm4, xmm4
+ pshuflw xmm0, xmm4, 0b ;k0_k1
+ pshuflw xmm1, xmm4, 01010101b ;k2_k3
+ pshuflw xmm2, xmm4, 10101010b ;k4_k5
+ pshuflw xmm3, xmm4, 11111111b ;k6_k7
+
+ punpcklqdq xmm0, xmm0
+ punpcklqdq xmm1, xmm1
+ punpcklqdq xmm2, xmm2
+ punpcklqdq xmm3, xmm3
+
+ movdqa k0k1, xmm0
+ movdqa k2k3, xmm1
+ pshufd xmm5, xmm5, 0
+ movdqa k4k5, xmm2
+ movdqa k6k7, xmm3
+ movdqa krd, xmm5
+
+ movsxd rdx, DWORD PTR arg(1) ;pixels_per_line
+
+%if ABI_IS_32BIT=0
+ movsxd r8, DWORD PTR arg(3) ;out_pitch
+%endif
+ mov rax, rsi
+ movsxd rcx, DWORD PTR arg(4) ;output_height
+ add rax, rdx
+
+ lea rbx, [rdx + rdx*4]
+ add rbx, rdx ;pitch * 6
+
+.loop:
+ movq xmm0, [rsi] ;A
+ movq xmm1, [rsi + rdx] ;B
+ movq xmm2, [rsi + rdx * 2] ;C
+ movq xmm3, [rax + rdx * 2] ;D
+ movq xmm4, [rsi + rdx * 4] ;E
+ movq xmm5, [rax + rdx * 4] ;F
+
+ punpcklbw xmm0, xmm1 ;A B
+ punpcklbw xmm2, xmm3 ;C D
+ punpcklbw xmm4, xmm5 ;E F
+
+ movq xmm6, [rsi + rbx] ;G
+ movq xmm7, [rax + rbx] ;H
+
+ pmaddubsw xmm0, k0k1
+ pmaddubsw xmm2, k2k3
+ punpcklbw xmm6, xmm7 ;G H
+ pmaddubsw xmm4, k4k5
+ pmaddubsw xmm6, k6k7
+
+ paddsw xmm0, xmm6
+ paddsw xmm0, xmm2
+ paddsw xmm0, xmm4
+ paddsw xmm0, krd
+
+ psraw xmm0, 7
+ packuswb xmm0, xmm0
+%if %1
+ movq xmm1, [rdi]
+ pavgb xmm0, xmm1
+%endif
+ movq [rdi], xmm0
+
+ movq xmm0, [rsi + 8] ;A
+ movq xmm1, [rsi + rdx + 8] ;B
+ movq xmm2, [rsi + rdx * 2 + 8] ;C
+ movq xmm3, [rax + rdx * 2 + 8] ;D
+ movq xmm4, [rsi + rdx * 4 + 8] ;E
+ movq xmm5, [rax + rdx * 4 + 8] ;F
+
+ punpcklbw xmm0, xmm1 ;A B
+ punpcklbw xmm2, xmm3 ;C D
+ punpcklbw xmm4, xmm5 ;E F
+
+
+ movq xmm6, [rsi + rbx + 8] ;G
+ movq xmm7, [rax + rbx + 8] ;H
+ punpcklbw xmm6, xmm7 ;G H
+
+
+ pmaddubsw xmm0, k0k1
+ pmaddubsw xmm2, k2k3
+ pmaddubsw xmm4, k4k5
+ pmaddubsw xmm6, k6k7
+
+ paddsw xmm0, xmm6
+ paddsw xmm0, xmm2
+ paddsw xmm0, xmm4
+ paddsw xmm0, krd
+
+ psraw xmm0, 7
+ packuswb xmm0, xmm0
+
+ add rsi, rdx
+ add rax, rdx
+%if %1
+ movq xmm1, [rdi+8]
+ pavgb xmm0, xmm1
+%endif
+
+ movq [rdi+8], xmm0
+
+%if ABI_IS_32BIT
+ add rdi, DWORD PTR arg(3) ;out_pitch
+%else
+ add rdi, r8
+%endif
+ dec rcx
+ jnz .loop
+%endm
+
+;void vp9_filter_block1d8_v8_ssse3
+;(
+; unsigned char *src_ptr,
+; unsigned int src_pitch,
+; unsigned char *output_ptr,
+; unsigned int out_pitch,
+; unsigned int output_height,
+; short *filter
+;)
+global sym(vp9_filter_block1d4_v8_ssse3) PRIVATE
+sym(vp9_filter_block1d4_v8_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ push rsi
+ push rdi
+ push rbx
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16*5
+ %define k0k1 [rsp + 16*0]
+ %define k2k3 [rsp + 16*1]
+ %define k4k5 [rsp + 16*2]
+ %define k6k7 [rsp + 16*3]
+ %define krd [rsp + 16*4]
+
+ VERTx4 0
+
+ add rsp, 16*5
+ pop rsp
+ pop rbx
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp9_filter_block1d8_v8_ssse3
+;(
+; unsigned char *src_ptr,
+; unsigned int src_pitch,
+; unsigned char *output_ptr,
+; unsigned int out_pitch,
+; unsigned int output_height,
+; short *filter
+;)
+global sym(vp9_filter_block1d8_v8_ssse3) PRIVATE
+sym(vp9_filter_block1d8_v8_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ push rsi
+ push rdi
+ push rbx
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16*5
+ %define k0k1 [rsp + 16*0]
+ %define k2k3 [rsp + 16*1]
+ %define k4k5 [rsp + 16*2]
+ %define k6k7 [rsp + 16*3]
+ %define krd [rsp + 16*4]
+
+ VERTx8 0
+
+ add rsp, 16*5
+ pop rsp
+ pop rbx
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp9_filter_block1d16_v8_ssse3
+;(
+; unsigned char *src_ptr,
+; unsigned int src_pitch,
+; unsigned char *output_ptr,
+; unsigned int out_pitch,
+; unsigned int output_height,
+; short *filter
+;)
+global sym(vp9_filter_block1d16_v8_ssse3) PRIVATE
+sym(vp9_filter_block1d16_v8_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ push rsi
+ push rdi
+ push rbx
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16*5
+ %define k0k1 [rsp + 16*0]
+ %define k2k3 [rsp + 16*1]
+ %define k4k5 [rsp + 16*2]
+ %define k6k7 [rsp + 16*3]
+ %define krd [rsp + 16*4]
+
+ VERTx16 0
+
+ add rsp, 16*5
+ pop rsp
+ pop rbx
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+global sym(vp9_filter_block1d4_v8_avg_ssse3) PRIVATE
+sym(vp9_filter_block1d4_v8_avg_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ push rsi
+ push rdi
+ push rbx
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16*5
+ %define k0k1 [rsp + 16*0]
+ %define k2k3 [rsp + 16*1]
+ %define k4k5 [rsp + 16*2]
+ %define k6k7 [rsp + 16*3]
+ %define krd [rsp + 16*4]
+
+ VERTx4 1
+
+ add rsp, 16*5
+ pop rsp
+ pop rbx
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+global sym(vp9_filter_block1d8_v8_avg_ssse3) PRIVATE
+sym(vp9_filter_block1d8_v8_avg_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ push rsi
+ push rdi
+ push rbx
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16*5
+ %define k0k1 [rsp + 16*0]
+ %define k2k3 [rsp + 16*1]
+ %define k4k5 [rsp + 16*2]
+ %define k6k7 [rsp + 16*3]
+ %define krd [rsp + 16*4]
+
+ VERTx8 1
+
+ add rsp, 16*5
+ pop rsp
+ pop rbx
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+global sym(vp9_filter_block1d16_v8_avg_ssse3) PRIVATE
+sym(vp9_filter_block1d16_v8_avg_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ push rsi
+ push rdi
+ push rbx
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16*5
+ %define k0k1 [rsp + 16*0]
+ %define k2k3 [rsp + 16*1]
+ %define k4k5 [rsp + 16*2]
+ %define k6k7 [rsp + 16*3]
+ %define krd [rsp + 16*4]
+
+ VERTx16 1
+
+ add rsp, 16*5
+ pop rsp
+ pop rbx
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+%macro HORIZx4 1
+ mov rdx, arg(5) ;filter ptr
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;output_ptr
+ mov rcx, 0x0400040
+
+ movdqa xmm4, [rdx] ;load filters
+ movq xmm5, rcx
+ packsswb xmm4, xmm4
+ pshuflw xmm0, xmm4, 0b ;k0_k1
+ pshuflw xmm1, xmm4, 01010101b ;k2_k3
+ pshuflw xmm2, xmm4, 10101010b ;k4_k5
+ pshuflw xmm3, xmm4, 11111111b ;k6_k7
+
+ punpcklqdq xmm0, xmm0
+ punpcklqdq xmm1, xmm1
+ punpcklqdq xmm2, xmm2
+ punpcklqdq xmm3, xmm3
+
+ movdqa k0k1, xmm0
+ movdqa k2k3, xmm1
+ pshufd xmm5, xmm5, 0
+ movdqa k4k5, xmm2
+ movdqa k6k7, xmm3
+ movdqa krd, xmm5
+
+ movsxd rax, dword ptr arg(1) ;src_pixels_per_line
+ movsxd rdx, dword ptr arg(3) ;output_pitch
+ movsxd rcx, dword ptr arg(4) ;output_height
+
+.loop:
+ movq xmm0, [rsi - 3] ; -3 -2 -1 0 1 2 3 4
+
+ movq xmm3, [rsi + 5] ; 5 6 7 8 9 10 11 12
+ punpcklqdq xmm0, xmm3
+
+ movdqa xmm1, xmm0
+ pshufb xmm0, [GLOBAL(shuf_t0t1)]
+ pmaddubsw xmm0, k0k1
+
+ movdqa xmm2, xmm1
+ pshufb xmm1, [GLOBAL(shuf_t2t3)]
+ pmaddubsw xmm1, k2k3
+
+ movdqa xmm4, xmm2
+ pshufb xmm2, [GLOBAL(shuf_t4t5)]
+ pmaddubsw xmm2, k4k5
+
+ pshufb xmm4, [GLOBAL(shuf_t6t7)]
+ pmaddubsw xmm4, k6k7
+
+ paddsw xmm0, xmm1
+ paddsw xmm0, xmm4
+ paddsw xmm0, xmm2
+ paddsw xmm0, krd
+ psraw xmm0, 7
+ packuswb xmm0, xmm0
+%if %1
+ movd xmm1, [rdi]
+ pavgb xmm0, xmm1
+%endif
+ lea rsi, [rsi + rax]
+ movd [rdi], xmm0
+
+ lea rdi, [rdi + rdx]
+ dec rcx
+ jnz .loop
+%endm
+
+%macro HORIZx8 1
+ mov rdx, arg(5) ;filter ptr
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;output_ptr
+ mov rcx, 0x0400040
+
+ movdqa xmm4, [rdx] ;load filters
+ movd xmm5, rcx
+ packsswb xmm4, xmm4
+ pshuflw xmm0, xmm4, 0b ;k0_k1
+ pshuflw xmm1, xmm4, 01010101b ;k2_k3
+ pshuflw xmm2, xmm4, 10101010b ;k4_k5
+ pshuflw xmm3, xmm4, 11111111b ;k6_k7
+
+ punpcklqdq xmm0, xmm0
+ punpcklqdq xmm1, xmm1
+ punpcklqdq xmm2, xmm2
+ punpcklqdq xmm3, xmm3
+
+ movdqa k0k1, xmm0
+ movdqa k2k3, xmm1
+ pshufd xmm5, xmm5, 0
+ movdqa k4k5, xmm2
+ movdqa k6k7, xmm3
+ movdqa krd, xmm5
+
+ movsxd rax, dword ptr arg(1) ;src_pixels_per_line
+ movsxd rdx, dword ptr arg(3) ;output_pitch
+ movsxd rcx, dword ptr arg(4) ;output_height
+
+.loop:
+ movq xmm0, [rsi - 3] ; -3 -2 -1 0 1 2 3 4
+
+ movq xmm3, [rsi + 5] ; 5 6 7 8 9 10 11 12
+ punpcklqdq xmm0, xmm3
+
+ movdqa xmm1, xmm0
+ pshufb xmm0, [GLOBAL(shuf_t0t1)]
+ pmaddubsw xmm0, k0k1
+
+ movdqa xmm2, xmm1
+ pshufb xmm1, [GLOBAL(shuf_t2t3)]
+ pmaddubsw xmm1, k2k3
+
+ movdqa xmm4, xmm2
+ pshufb xmm2, [GLOBAL(shuf_t4t5)]
+ pmaddubsw xmm2, k4k5
+
+ pshufb xmm4, [GLOBAL(shuf_t6t7)]
+ pmaddubsw xmm4, k6k7
+
+ paddsw xmm0, xmm1
+ paddsw xmm0, xmm4
+ paddsw xmm0, xmm2
+ paddsw xmm0, krd
+ psraw xmm0, 7
+ packuswb xmm0, xmm0
+%if %1
+ movq xmm1, [rdi]
+ pavgb xmm0, xmm1
+%endif
+
+ lea rsi, [rsi + rax]
+ movq [rdi], xmm0
+
+ lea rdi, [rdi + rdx]
+ dec rcx
+ jnz .loop
+%endm
+
+%macro HORIZx16 1
+ mov rdx, arg(5) ;filter ptr
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;output_ptr
+ mov rcx, 0x0400040
+
+ movdqa xmm4, [rdx] ;load filters
+ movq xmm5, rcx
+ packsswb xmm4, xmm4
+ pshuflw xmm0, xmm4, 0b ;k0_k1
+ pshuflw xmm1, xmm4, 01010101b ;k2_k3
+ pshuflw xmm2, xmm4, 10101010b ;k4_k5
+ pshuflw xmm3, xmm4, 11111111b ;k6_k7
+
+ punpcklqdq xmm0, xmm0
+ punpcklqdq xmm1, xmm1
+ punpcklqdq xmm2, xmm2
+ punpcklqdq xmm3, xmm3
+
+ movdqa k0k1, xmm0
+ movdqa k2k3, xmm1
+ pshufd xmm5, xmm5, 0
+ movdqa k4k5, xmm2
+ movdqa k6k7, xmm3
+ movdqa krd, xmm5
+
+ movsxd rax, dword ptr arg(1) ;src_pixels_per_line
+ movsxd rdx, dword ptr arg(3) ;output_pitch
+ movsxd rcx, dword ptr arg(4) ;output_height
+
+.loop:
+ movq xmm0, [rsi - 3] ; -3 -2 -1 0 1 2 3 4
+
+ movq xmm3, [rsi + 5] ; 5 6 7 8 9 10 11 12
+ punpcklqdq xmm0, xmm3
+
+ movdqa xmm1, xmm0
+ pshufb xmm0, [GLOBAL(shuf_t0t1)]
+ pmaddubsw xmm0, k0k1
+
+ movdqa xmm2, xmm1
+ pshufb xmm1, [GLOBAL(shuf_t2t3)]
+ pmaddubsw xmm1, k2k3
+
+ movdqa xmm4, xmm2
+ pshufb xmm2, [GLOBAL(shuf_t4t5)]
+ pmaddubsw xmm2, k4k5
+
+ pshufb xmm4, [GLOBAL(shuf_t6t7)]
+ pmaddubsw xmm4, k6k7
+
+ paddsw xmm0, xmm1
+ paddsw xmm0, xmm4
+ paddsw xmm0, xmm2
+ paddsw xmm0, krd
+ psraw xmm0, 7
+ packuswb xmm0, xmm0
+
+
+ movq xmm3, [rsi + 5]
+ movq xmm7, [rsi + 13]
+ punpcklqdq xmm3, xmm7
+
+ movdqa xmm1, xmm3
+ pshufb xmm3, [GLOBAL(shuf_t0t1)]
+ pmaddubsw xmm3, k0k1
+
+ movdqa xmm2, xmm1
+ pshufb xmm1, [GLOBAL(shuf_t2t3)]
+ pmaddubsw xmm1, k2k3
+
+ movdqa xmm4, xmm2
+ pshufb xmm2, [GLOBAL(shuf_t4t5)]
+ pmaddubsw xmm2, k4k5
+
+ pshufb xmm4, [GLOBAL(shuf_t6t7)]
+ pmaddubsw xmm4, k6k7
+
+ paddsw xmm3, xmm1
+ paddsw xmm3, xmm4
+ paddsw xmm3, xmm2
+ paddsw xmm3, krd
+ psraw xmm3, 7
+ packuswb xmm3, xmm3
+ punpcklqdq xmm0, xmm3
+%if %1
+ movdqa xmm1, [rdi]
+ pavgb xmm0, xmm1
+%endif
+
+ lea rsi, [rsi + rax]
+ movdqa [rdi], xmm0
+
+ lea rdi, [rdi + rdx]
+ dec rcx
+ jnz .loop
+%endm
+
+;void vp9_filter_block1d4_h8_ssse3
+;(
+; unsigned char *src_ptr,
+; unsigned int src_pixels_per_line,
+; unsigned char *output_ptr,
+; unsigned int output_pitch,
+; unsigned int output_height,
+; short *filter
+;)
+global sym(vp9_filter_block1d4_h8_ssse3) PRIVATE
+sym(vp9_filter_block1d4_h8_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16*5
+ %define k0k1 [rsp + 16*0]
+ %define k2k3 [rsp + 16*1]
+ %define k4k5 [rsp + 16*2]
+ %define k6k7 [rsp + 16*3]
+ %define krd [rsp + 16*4]
+
+ HORIZx4 0
+
+ add rsp, 16*5
+ pop rsp
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp9_filter_block1d8_h8_ssse3
+;(
+; unsigned char *src_ptr,
+; unsigned int src_pixels_per_line,
+; unsigned char *output_ptr,
+; unsigned int output_pitch,
+; unsigned int output_height,
+; short *filter
+;)
+global sym(vp9_filter_block1d8_h8_ssse3) PRIVATE
+sym(vp9_filter_block1d8_h8_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16*5
+ %define k0k1 [rsp + 16*0]
+ %define k2k3 [rsp + 16*1]
+ %define k4k5 [rsp + 16*2]
+ %define k6k7 [rsp + 16*3]
+ %define krd [rsp + 16*4]
+
+ HORIZx8 0
+
+ add rsp, 16*5
+ pop rsp
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp9_filter_block1d16_h8_ssse3
+;(
+; unsigned char *src_ptr,
+; unsigned int src_pixels_per_line,
+; unsigned char *output_ptr,
+; unsigned int output_pitch,
+; unsigned int output_height,
+; short *filter
+;)
+global sym(vp9_filter_block1d16_h8_ssse3) PRIVATE
+sym(vp9_filter_block1d16_h8_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16*5
+ %define k0k1 [rsp + 16*0]
+ %define k2k3 [rsp + 16*1]
+ %define k4k5 [rsp + 16*2]
+ %define k6k7 [rsp + 16*3]
+ %define krd [rsp + 16*4]
+
+ HORIZx16 0
+
+ add rsp, 16*5
+ pop rsp
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+global sym(vp9_filter_block1d4_h8_avg_ssse3) PRIVATE
+sym(vp9_filter_block1d4_h8_avg_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16*5
+ %define k0k1 [rsp + 16*0]
+ %define k2k3 [rsp + 16*1]
+ %define k4k5 [rsp + 16*2]
+ %define k6k7 [rsp + 16*3]
+ %define krd [rsp + 16*4]
+
+ HORIZx4 1
+
+ add rsp, 16*5
+ pop rsp
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+global sym(vp9_filter_block1d8_h8_avg_ssse3) PRIVATE
+sym(vp9_filter_block1d8_h8_avg_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16*5
+ %define k0k1 [rsp + 16*0]
+ %define k2k3 [rsp + 16*1]
+ %define k4k5 [rsp + 16*2]
+ %define k6k7 [rsp + 16*3]
+ %define krd [rsp + 16*4]
+
+ HORIZx8 1
+
+ add rsp, 16*5
+ pop rsp
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+global sym(vp9_filter_block1d16_h8_avg_ssse3) PRIVATE
+sym(vp9_filter_block1d16_h8_avg_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ ALIGN_STACK 16, rax
+ sub rsp, 16*5
+ %define k0k1 [rsp + 16*0]
+ %define k2k3 [rsp + 16*1]
+ %define k4k5 [rsp + 16*2]
+ %define k6k7 [rsp + 16*3]
+ %define krd [rsp + 16*4]
+
+ HORIZx16 1
+
+ add rsp, 16*5
+ pop rsp
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+SECTION_RODATA
+align 16
+shuf_t0t1:
+ db 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8
+align 16
+shuf_t2t3:
+ db 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10
+align 16
+shuf_t4t5:
+ db 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12
+align 16
+shuf_t6t7:
+ db 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14
diff --git a/libvpx/vp9/decoder/vp9_asm_dec_offsets.c b/libvpx/vp9/decoder/vp9_asm_dec_offsets.c
new file mode 100644
index 0000000..e4b9c97
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_asm_dec_offsets.c
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vpx_ports/asm_offsets.h"
+#include "vp9/decoder/vp9_onyxd_int.h"
+
+BEGIN
+
+END
+
+/* add asserts for any offset that is not supported by assembly code */
+/* add asserts for any size that is not supported by assembly code */
diff --git a/libvpx/vp9/decoder/vp9_dboolhuff.c b/libvpx/vp9/decoder/vp9_dboolhuff.c
new file mode 100644
index 0000000..df77d65
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_dboolhuff.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_ports/mem.h"
+#include "vpx_mem/vpx_mem.h"
+
+#include "vp9/decoder/vp9_dboolhuff.h"
+
+int vp9_reader_init(vp9_reader *r, const uint8_t *buffer, size_t size) {
+ int marker_bit;
+
+ r->buffer_end = buffer + size;
+ r->buffer = buffer;
+ r->value = 0;
+ r->count = -8;
+ r->range = 255;
+
+ if (size && !buffer)
+ return 1;
+
+ vp9_reader_fill(r);
+ marker_bit = vp9_read_bit(r);
+ return marker_bit != 0;
+}
+
+void vp9_reader_fill(vp9_reader *r) {
+ const uint8_t *const buffer_end = r->buffer_end;
+ const uint8_t *buffer = r->buffer;
+ VP9_BD_VALUE value = r->value;
+ int count = r->count;
+ int shift = VP9_BD_VALUE_SIZE - 8 - (count + 8);
+ int loop_end = 0;
+ const int bits_left = (int)((buffer_end - buffer)*CHAR_BIT);
+ const int x = shift + CHAR_BIT - bits_left;
+
+ if (x >= 0) {
+ count += VP9_LOTS_OF_BITS;
+ loop_end = x;
+ }
+
+ if (x < 0 || bits_left) {
+ while (shift >= loop_end) {
+ count += CHAR_BIT;
+ value |= (VP9_BD_VALUE)*buffer++ << shift;
+ shift -= CHAR_BIT;
+ }
+ }
+
+ r->buffer = buffer;
+ r->value = value;
+ r->count = count;
+}
+
+const uint8_t *vp9_reader_find_end(vp9_reader *r) {
+ // Find the end of the coded buffer
+ while (r->count > CHAR_BIT && r->count < VP9_BD_VALUE_SIZE) {
+ r->count -= CHAR_BIT;
+ r->buffer--;
+ }
+ return r->buffer;
+}
+
diff --git a/libvpx/vp9/decoder/vp9_dboolhuff.h b/libvpx/vp9/decoder/vp9_dboolhuff.h
new file mode 100644
index 0000000..b50aa35
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_dboolhuff.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_DECODER_VP9_DBOOLHUFF_H_
+#define VP9_DECODER_VP9_DBOOLHUFF_H_
+
+#include <stddef.h>
+#include <limits.h>
+
+#include "./vpx_config.h"
+#include "vpx_ports/mem.h"
+#include "vpx/vpx_integer.h"
+
+typedef size_t VP9_BD_VALUE;
+
+#define VP9_BD_VALUE_SIZE ((int)sizeof(VP9_BD_VALUE)*CHAR_BIT)
+
+// This is meant to be a large, positive constant that can still be efficiently
+// loaded as an immediate (on platforms like ARM, for example).
+// Even relatively modest values like 100 would work fine.
+#define VP9_LOTS_OF_BITS 0x40000000
+
+typedef struct {
+ const uint8_t *buffer_end;
+ const uint8_t *buffer;
+ VP9_BD_VALUE value;
+ int count;
+ unsigned int range;
+} vp9_reader;
+
+DECLARE_ALIGNED(16, extern const uint8_t, vp9_norm[256]);
+
+int vp9_reader_init(vp9_reader *r, const uint8_t *buffer, size_t size);
+
+void vp9_reader_fill(vp9_reader *r);
+
+const uint8_t *vp9_reader_find_end(vp9_reader *r);
+
+static int vp9_read(vp9_reader *br, int probability) {
+ unsigned int bit = 0;
+ VP9_BD_VALUE value;
+ VP9_BD_VALUE bigsplit;
+ int count;
+ unsigned int range;
+ unsigned int split = 1 + (((br->range - 1) * probability) >> 8);
+
+ if (br->count < 0)
+ vp9_reader_fill(br);
+
+ value = br->value;
+ count = br->count;
+
+ bigsplit = (VP9_BD_VALUE)split << (VP9_BD_VALUE_SIZE - 8);
+
+ range = split;
+
+ if (value >= bigsplit) {
+ range = br->range - split;
+ value = value - bigsplit;
+ bit = 1;
+ }
+
+ {
+ register unsigned int shift = vp9_norm[range];
+ range <<= shift;
+ value <<= shift;
+ count -= shift;
+ }
+ br->value = value;
+ br->count = count;
+ br->range = range;
+
+ return bit;
+}
+
+static int vp9_read_bit(vp9_reader *r) {
+ return vp9_read(r, 128); // vp9_prob_half
+}
+
+static int vp9_read_literal(vp9_reader *br, int bits) {
+ int z = 0, bit;
+
+ for (bit = bits - 1; bit >= 0; bit--)
+ z |= vp9_read_bit(br) << bit;
+
+ return z;
+}
+
+static int vp9_reader_has_error(vp9_reader *r) {
+ // Check if we have reached the end of the buffer.
+ //
+ // Variable 'count' stores the number of bits in the 'value' buffer, minus
+ // 8. The top byte is part of the algorithm, and the remainder is buffered
+ // to be shifted into it. So if count == 8, the top 16 bits of 'value' are
+ // occupied, 8 for the algorithm and 8 in the buffer.
+ //
+ // When reading a byte from the user's buffer, count is filled with 8 and
+ // one byte is filled into the value buffer. When we reach the end of the
+ // data, count is additionally filled with VP9_LOTS_OF_BITS. So when
+ // count == VP9_LOTS_OF_BITS - 1, the user's data has been exhausted.
+ //
+ // 1 if we have tried to decode bits after the end of stream was encountered.
+ // 0 No error.
+ return r->count > VP9_BD_VALUE_SIZE && r->count < VP9_LOTS_OF_BITS;
+}
+
+#endif // VP9_DECODER_VP9_DBOOLHUFF_H_
diff --git a/libvpx/vp9/decoder/vp9_decodemv.c b/libvpx/vp9/decoder/vp9_decodemv.c
new file mode 100644
index 0000000..b3d41be
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_decodemv.c
@@ -0,0 +1,823 @@
+/*
+ Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vp9/decoder/vp9_treereader.h"
+#include "vp9/common/vp9_entropymv.h"
+#include "vp9/common/vp9_entropymode.h"
+#include "vp9/common/vp9_reconinter.h"
+#include "vp9/decoder/vp9_onyxd_int.h"
+#include "vp9/common/vp9_findnearmv.h"
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_seg_common.h"
+#include "vp9/common/vp9_pred_common.h"
+#include "vp9/common/vp9_entropy.h"
+#include "vp9/decoder/vp9_decodemv.h"
+#include "vp9/decoder/vp9_decodframe.h"
+#include "vp9/common/vp9_mvref_common.h"
+#if CONFIG_DEBUG
+#include <assert.h>
+#endif
+
+// #define DEBUG_DEC_MV
+#ifdef DEBUG_DEC_MV
+int dec_mvcount = 0;
+#endif
+
+// #define DEC_DEBUG
+#ifdef DEC_DEBUG
+extern int dec_debug;
+#endif
+
+static MB_PREDICTION_MODE read_intra_mode(vp9_reader *r, const vp9_prob *p) {
+ MB_PREDICTION_MODE m = treed_read(r, vp9_intra_mode_tree, p);
+ return m;
+}
+
+static int read_mb_segid(vp9_reader *r, MACROBLOCKD *xd) {
+ return treed_read(r, vp9_segment_tree, xd->mb_segment_tree_probs);
+}
+
+static void set_segment_id(VP9_COMMON *cm, MB_MODE_INFO *mbmi,
+ int mi_row, int mi_col, int segment_id) {
+ const int mi_index = mi_row * cm->mi_cols + mi_col;
+ const BLOCK_SIZE_TYPE sb_type = mbmi->sb_type;
+ const int bw = 1 << mi_width_log2(sb_type);
+ const int bh = 1 << mi_height_log2(sb_type);
+ const int ymis = MIN(cm->mi_rows - mi_row, bh);
+ const int xmis = MIN(cm->mi_cols - mi_col, bw);
+ int x, y;
+
+ for (y = 0; y < ymis; y++) {
+ for (x = 0; x < xmis; x++) {
+ const int index = mi_index + (y * cm->mi_cols + x);
+ cm->last_frame_seg_map[index] = segment_id;
+ }
+ }
+}
+
+static TX_SIZE select_txfm_size(VP9_COMMON *cm, MACROBLOCKD *xd,
+ vp9_reader *r, BLOCK_SIZE_TYPE bsize) {
+ const int context = vp9_get_pred_context(cm, xd, PRED_TX_SIZE);
+ const vp9_prob *tx_probs = vp9_get_pred_probs(cm, xd, PRED_TX_SIZE);
+ TX_SIZE txfm_size = vp9_read(r, tx_probs[0]);
+ if (txfm_size != TX_4X4 && bsize >= BLOCK_SIZE_MB16X16) {
+ txfm_size += vp9_read(r, tx_probs[1]);
+ if (txfm_size != TX_8X8 && bsize >= BLOCK_SIZE_SB32X32)
+ txfm_size += vp9_read(r, tx_probs[2]);
+ }
+ if (bsize >= BLOCK_SIZE_SB32X32) {
+ cm->fc.tx_count_32x32p[context][txfm_size]++;
+ } else if (bsize >= BLOCK_SIZE_MB16X16) {
+ cm->fc.tx_count_16x16p[context][txfm_size]++;
+ } else {
+ cm->fc.tx_count_8x8p[context][txfm_size]++;
+ }
+ return txfm_size;
+}
+
+
+static void kfread_modes(VP9D_COMP *pbi, MODE_INFO *m,
+ int mi_row, int mi_col,
+ vp9_reader *r) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ const int mis = cm->mode_info_stride;
+
+ // Read segmentation map if it is being updated explicitly this frame
+ m->mbmi.segment_id = 0;
+ if (xd->segmentation_enabled && xd->update_mb_segmentation_map) {
+ m->mbmi.segment_id = read_mb_segid(r, xd);
+ set_segment_id(cm, &m->mbmi, mi_row, mi_col, m->mbmi.segment_id);
+ }
+
+ m->mbmi.mb_skip_coeff = vp9_segfeature_active(xd, m->mbmi.segment_id,
+ SEG_LVL_SKIP);
+ if (!m->mbmi.mb_skip_coeff) {
+ m->mbmi.mb_skip_coeff = vp9_read(r, vp9_get_pred_prob(cm, xd, PRED_MBSKIP));
+ cm->fc.mbskip_count[vp9_get_pred_context(cm, xd, PRED_MBSKIP)]
+ [m->mbmi.mb_skip_coeff]++;
+ }
+
+ if (cm->txfm_mode == TX_MODE_SELECT &&
+ m->mbmi.sb_type >= BLOCK_SIZE_SB8X8) {
+ m->mbmi.txfm_size = select_txfm_size(cm, xd, r, m->mbmi.sb_type);
+ } else if (cm->txfm_mode >= ALLOW_32X32 &&
+ m->mbmi.sb_type >= BLOCK_SIZE_SB32X32) {
+ m->mbmi.txfm_size = TX_32X32;
+ } else if (cm->txfm_mode >= ALLOW_16X16 &&
+ m->mbmi.sb_type >= BLOCK_SIZE_MB16X16) {
+ m->mbmi.txfm_size = TX_16X16;
+ } else if (cm->txfm_mode >= ALLOW_8X8 &&
+ m->mbmi.sb_type >= BLOCK_SIZE_SB8X8) {
+ m->mbmi.txfm_size = TX_8X8;
+ } else {
+ m->mbmi.txfm_size = TX_4X4;
+ }
+
+ // luma mode
+ m->mbmi.ref_frame[0] = INTRA_FRAME;
+ if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8) {
+ const MB_PREDICTION_MODE A = above_block_mode(m, 0, mis);
+ const MB_PREDICTION_MODE L = xd->left_available ?
+ left_block_mode(m, 0) : DC_PRED;
+ m->mbmi.mode = read_intra_mode(r, cm->kf_y_mode_prob[A][L]);
+ } else {
+ int idx, idy;
+ int bw = 1 << b_width_log2(m->mbmi.sb_type);
+ int bh = 1 << b_height_log2(m->mbmi.sb_type);
+
+ for (idy = 0; idy < 2; idy += bh) {
+ for (idx = 0; idx < 2; idx += bw) {
+ int ib = idy * 2 + idx;
+ int k;
+ const MB_PREDICTION_MODE A = above_block_mode(m, ib, mis);
+ const MB_PREDICTION_MODE L = (xd->left_available || idx) ?
+ left_block_mode(m, ib) : DC_PRED;
+ m->bmi[ib].as_mode.first =
+ read_intra_mode(r, cm->kf_y_mode_prob[A][L]);
+ for (k = 1; k < bh; ++k)
+ m->bmi[ib + k * 2].as_mode.first = m->bmi[ib].as_mode.first;
+ for (k = 1; k < bw; ++k)
+ m->bmi[ib + k].as_mode.first = m->bmi[ib].as_mode.first;
+ }
+ }
+ m->mbmi.mode = m->bmi[3].as_mode.first;
+ }
+
+ m->mbmi.uv_mode = read_intra_mode(r, cm->kf_uv_mode_prob[m->mbmi.mode]);
+}
+
+static int read_mv_component(vp9_reader *r,
+ const nmv_component *mvcomp, int usehp) {
+
+ int mag, d, fr, hp;
+ const int sign = vp9_read(r, mvcomp->sign);
+ const int mv_class = treed_read(r, vp9_mv_class_tree, mvcomp->classes);
+
+ // Integer part
+ if (mv_class == MV_CLASS_0) {
+ d = treed_read(r, vp9_mv_class0_tree, mvcomp->class0);
+ } else {
+ int i;
+ const int n = mv_class + CLASS0_BITS - 1; // number of bits
+
+ d = 0;
+ for (i = 0; i < n; ++i)
+ d |= vp9_read(r, mvcomp->bits[i]) << i;
+ }
+
+ // Fractional part
+ fr = treed_read(r, vp9_mv_fp_tree,
+ mv_class == MV_CLASS_0 ? mvcomp->class0_fp[d] : mvcomp->fp);
+
+
+ // High precision part (if hp is not used, the default value of the hp is 1)
+ hp = usehp ? vp9_read(r,
+ mv_class == MV_CLASS_0 ? mvcomp->class0_hp : mvcomp->hp)
+ : 1;
+
+ // result
+ mag = vp9_get_mv_mag(mv_class, (d << 3) | (fr << 1) | hp) + 1;
+ return sign ? -mag : mag;
+}
+
+static void update_nmv(vp9_reader *r, vp9_prob *const p,
+ const vp9_prob upd_p) {
+ if (vp9_read(r, upd_p)) {
+#ifdef LOW_PRECISION_MV_UPDATE
+ *p = (vp9_read_literal(r, 7) << 1) | 1;
+#else
+ *p = (vp9_read_literal(r, 8));
+#endif
+ }
+}
+
+static void read_nmvprobs(vp9_reader *r, nmv_context *mvctx,
+ int usehp) {
+ int i, j, k;
+
+#ifdef MV_GROUP_UPDATE
+ if (!vp9_read_bit(r))
+ return;
+#endif
+ for (j = 0; j < MV_JOINTS - 1; ++j)
+ update_nmv(r, &mvctx->joints[j], VP9_NMV_UPDATE_PROB);
+
+ for (i = 0; i < 2; ++i) {
+ update_nmv(r, &mvctx->comps[i].sign, VP9_NMV_UPDATE_PROB);
+ for (j = 0; j < MV_CLASSES - 1; ++j)
+ update_nmv(r, &mvctx->comps[i].classes[j], VP9_NMV_UPDATE_PROB);
+
+ for (j = 0; j < CLASS0_SIZE - 1; ++j)
+ update_nmv(r, &mvctx->comps[i].class0[j], VP9_NMV_UPDATE_PROB);
+
+ for (j = 0; j < MV_OFFSET_BITS; ++j)
+ update_nmv(r, &mvctx->comps[i].bits[j], VP9_NMV_UPDATE_PROB);
+ }
+
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < CLASS0_SIZE; ++j)
+ for (k = 0; k < 3; ++k)
+ update_nmv(r, &mvctx->comps[i].class0_fp[j][k], VP9_NMV_UPDATE_PROB);
+
+ for (j = 0; j < 3; ++j)
+ update_nmv(r, &mvctx->comps[i].fp[j], VP9_NMV_UPDATE_PROB);
+ }
+
+ if (usehp) {
+ for (i = 0; i < 2; ++i) {
+ update_nmv(r, &mvctx->comps[i].class0_hp, VP9_NMV_UPDATE_PROB);
+ update_nmv(r, &mvctx->comps[i].hp, VP9_NMV_UPDATE_PROB);
+ }
+ }
+}
+
+// Read the referncence frame
+static void read_ref_frame(VP9D_COMP *pbi, vp9_reader *r,
+ int segment_id, MV_REFERENCE_FRAME ref_frame[2]) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ const int seg_ref_active = vp9_segfeature_active(xd, segment_id,
+ SEG_LVL_REF_FRAME);
+
+ // Segment reference frame features not available.
+ if (!seg_ref_active) {
+ int is_comp;
+ int comp_ctx = vp9_get_pred_context(cm, xd, PRED_COMP_INTER_INTER);
+
+ if (cm->comp_pred_mode == HYBRID_PREDICTION) {
+ is_comp = vp9_read(r, cm->fc.comp_inter_prob[comp_ctx]);
+ cm->fc.comp_inter_count[comp_ctx][is_comp]++;
+ } else {
+ is_comp = cm->comp_pred_mode == COMP_PREDICTION_ONLY;
+ }
+
+ // FIXME(rbultje) I'm pretty sure this breaks segmentation ref frame coding
+ if (is_comp) {
+ int b, fix_ref_idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref];
+ int ref_ctx = vp9_get_pred_context(cm, xd, PRED_COMP_REF_P);
+
+ ref_frame[fix_ref_idx] = cm->comp_fixed_ref;
+ b = vp9_read(r, cm->fc.comp_ref_prob[ref_ctx]);
+ cm->fc.comp_ref_count[ref_ctx][b]++;
+ ref_frame[!fix_ref_idx] = cm->comp_var_ref[b];
+ } else {
+ int ref1_ctx = vp9_get_pred_context(cm, xd, PRED_SINGLE_REF_P1);
+ ref_frame[1] = NONE;
+ if (vp9_read(r, cm->fc.single_ref_prob[ref1_ctx][0])) {
+ int ref2_ctx = vp9_get_pred_context(cm, xd, PRED_SINGLE_REF_P2);
+ int b2 = vp9_read(r, cm->fc.single_ref_prob[ref2_ctx][1]);
+ ref_frame[0] = b2 ? ALTREF_FRAME : GOLDEN_FRAME;
+ cm->fc.single_ref_count[ref1_ctx][0][1]++;
+ cm->fc.single_ref_count[ref2_ctx][1][b2]++;
+ } else {
+ ref_frame[0] = LAST_FRAME;
+ cm->fc.single_ref_count[ref1_ctx][0][0]++;
+ }
+ }
+ } else {
+ ref_frame[0] = vp9_get_segdata(xd, segment_id, SEG_LVL_REF_FRAME);
+ ref_frame[1] = NONE;
+ }
+}
+
+static MB_PREDICTION_MODE read_sb_mv_ref(vp9_reader *r, const vp9_prob *p) {
+ return (MB_PREDICTION_MODE) treed_read(r, vp9_sb_mv_ref_tree, p);
+}
+
+#ifdef VPX_MODE_COUNT
+unsigned int vp9_mv_cont_count[5][4] = {
+ { 0, 0, 0, 0 },
+ { 0, 0, 0, 0 },
+ { 0, 0, 0, 0 },
+ { 0, 0, 0, 0 },
+ { 0, 0, 0, 0 }
+};
+#endif
+
+static void read_switchable_interp_probs(VP9_COMMON* const cm, vp9_reader *r) {
+ int i, j;
+ for (j = 0; j <= VP9_SWITCHABLE_FILTERS; ++j)
+ for (i = 0; i < VP9_SWITCHABLE_FILTERS - 1; ++i) {
+ if (vp9_read(r, VP9_MODE_UPDATE_PROB)) {
+ cm->fc.switchable_interp_prob[j][i] =
+ // vp9_read_prob(r);
+ vp9_read_prob_diff_update(r, cm->fc.switchable_interp_prob[j][i]);
+ }
+ }
+}
+
+static void read_inter_mode_probs(VP9_COMMON *const cm, vp9_reader *r) {
+ int i, j;
+ for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
+ for (j = 0; j < VP9_INTER_MODES - 1; ++j) {
+ if (vp9_read(r, VP9_MODE_UPDATE_PROB)) {
+ // cm->fc.inter_mode_probs[i][j] = vp9_read_prob(r);
+ cm->fc.inter_mode_probs[i][j] =
+ vp9_read_prob_diff_update(r, cm->fc.inter_mode_probs[i][j]);
+ }
+ }
+}
+
+static INLINE COMPPREDMODE_TYPE read_comp_pred_mode(vp9_reader *r) {
+ COMPPREDMODE_TYPE mode = vp9_read_bit(r);
+ if (mode)
+ mode += vp9_read_bit(r);
+ return mode;
+}
+
+static void mb_mode_mv_init(VP9D_COMP *pbi, vp9_reader *r) {
+ VP9_COMMON *const cm = &pbi->common;
+
+ if ((cm->frame_type != KEY_FRAME) && (!cm->intra_only)) {
+ nmv_context *const nmvc = &pbi->common.fc.nmvc;
+ MACROBLOCKD *const xd = &pbi->mb;
+ int i, j;
+
+ read_inter_mode_probs(cm, r);
+
+ if (cm->mcomp_filter_type == SWITCHABLE)
+ read_switchable_interp_probs(cm, r);
+
+ for (i = 0; i < INTRA_INTER_CONTEXTS; i++) {
+ if (vp9_read(r, VP9_MODE_UPDATE_PROB))
+ cm->fc.intra_inter_prob[i] =
+ vp9_read_prob_diff_update(r, cm->fc.intra_inter_prob[i]);
+ }
+
+ if (cm->allow_comp_inter_inter) {
+ cm->comp_pred_mode = read_comp_pred_mode(r);
+ if (cm->comp_pred_mode == HYBRID_PREDICTION)
+ for (i = 0; i < COMP_INTER_CONTEXTS; i++)
+ if (vp9_read(r, VP9_MODE_UPDATE_PROB))
+ cm->fc.comp_inter_prob[i] =
+ vp9_read_prob_diff_update(r, cm->fc.comp_inter_prob[i]);
+ } else {
+ cm->comp_pred_mode = SINGLE_PREDICTION_ONLY;
+ }
+
+ if (cm->comp_pred_mode != COMP_PREDICTION_ONLY)
+ for (i = 0; i < REF_CONTEXTS; i++) {
+ if (vp9_read(r, VP9_MODE_UPDATE_PROB))
+ cm->fc.single_ref_prob[i][0] =
+ vp9_read_prob_diff_update(r, cm->fc.single_ref_prob[i][0]);
+ if (vp9_read(r, VP9_MODE_UPDATE_PROB))
+ cm->fc.single_ref_prob[i][1] =
+ vp9_read_prob_diff_update(r, cm->fc.single_ref_prob[i][1]);
+ }
+
+ if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY)
+ for (i = 0; i < REF_CONTEXTS; i++)
+ if (vp9_read(r, VP9_MODE_UPDATE_PROB))
+ cm->fc.comp_ref_prob[i] =
+ vp9_read_prob_diff_update(r, cm->fc.comp_ref_prob[i]);
+
+ // VP9_INTRA_MODES
+ for (j = 0; j < BLOCK_SIZE_GROUPS; j++) {
+ for (i = 0; i < VP9_INTRA_MODES - 1; ++i) {
+ if (vp9_read(r, VP9_MODE_UPDATE_PROB)) {
+ cm->fc.y_mode_prob[j][i] =
+ vp9_read_prob_diff_update(r, cm->fc.y_mode_prob[j][i]);
+ }
+ }
+ }
+ for (j = 0; j < NUM_PARTITION_CONTEXTS; ++j) {
+ for (i = 0; i < PARTITION_TYPES - 1; ++i) {
+ if (vp9_read(r, VP9_MODE_UPDATE_PROB)) {
+ cm->fc.partition_prob[INTER_FRAME][j][i] =
+ vp9_read_prob_diff_update(r,
+ cm->fc.partition_prob[INTER_FRAME][j][i]);
+ }
+ }
+ }
+
+ read_nmvprobs(r, nmvc, xd->allow_high_precision_mv);
+ }
+}
+
+// This function either reads the segment id for the current macroblock from
+// the bitstream or if the value is temporally predicted asserts the predicted
+// value
+static int read_mb_segment_id(VP9D_COMP *pbi, int mi_row, int mi_col,
+ vp9_reader *r) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ MODE_INFO *const mi = xd->mode_info_context;
+ MB_MODE_INFO *const mbmi = &mi->mbmi;
+
+ if (!xd->segmentation_enabled)
+ return 0; // Default for disabled segmentation
+
+ if (xd->update_mb_segmentation_map) {
+ int segment_id;
+
+ if (cm->temporal_update) {
+ // Temporal coding of the segment id for this mb is enabled.
+ // Get the context based probability for reading the
+ // prediction status flag
+ const vp9_prob pred_prob = vp9_get_pred_prob(cm, xd, PRED_SEG_ID);
+ const int pred_flag = vp9_read(r, pred_prob);
+ vp9_set_pred_flag(xd, PRED_SEG_ID, pred_flag);
+
+ // If the value is flagged as correctly predicted
+ // then use the predicted value, otherwise decode it explicitly
+ segment_id = pred_flag ? vp9_get_pred_mi_segid(cm, mbmi->sb_type,
+ mi_row, mi_col)
+ : read_mb_segid(r, xd);
+ } else {
+ segment_id = read_mb_segid(r, xd); // Normal unpredicted coding mode
+ }
+
+ set_segment_id(cm, mbmi, mi_row, mi_col, segment_id); // Side effect
+ return segment_id;
+ } else {
+ return vp9_get_pred_mi_segid(cm, mbmi->sb_type, mi_row, mi_col);
+ }
+}
+
+
+static INLINE void assign_and_clamp_mv(int_mv *dst, const int_mv *src,
+ int mb_to_left_edge,
+ int mb_to_right_edge,
+ int mb_to_top_edge,
+ int mb_to_bottom_edge) {
+ dst->as_int = src->as_int;
+ clamp_mv(dst, mb_to_left_edge, mb_to_right_edge, mb_to_top_edge,
+ mb_to_bottom_edge);
+}
+
+static INLINE void decode_mv(vp9_reader *r, MV *mv, const MV *ref,
+ const nmv_context *ctx,
+ nmv_context_counts *counts,
+ int usehp) {
+ const MV_JOINT_TYPE j = treed_read(r, vp9_mv_joint_tree, ctx->joints);
+ MV diff = {0, 0};
+
+ usehp = usehp && vp9_use_nmv_hp(ref);
+ if (mv_joint_vertical(j))
+ diff.row = read_mv_component(r, &ctx->comps[0], usehp);
+
+ if (mv_joint_horizontal(j))
+ diff.col = read_mv_component(r, &ctx->comps[1], usehp);
+
+ vp9_increment_nmv(&diff, ref, counts, usehp);
+
+ mv->row = diff.row + ref->row;
+ mv->col = diff.col + ref->col;
+}
+
+static INLINE INTERPOLATIONFILTERTYPE read_switchable_filter_type(
+ VP9D_COMP *pbi, vp9_reader *r) {
+ const int index = treed_read(r, vp9_switchable_interp_tree,
+ vp9_get_pred_probs(&pbi->common, &pbi->mb,
+ PRED_SWITCHABLE_INTERP));
+ ++pbi->common.fc.switchable_interp_count
+ [vp9_get_pred_context(
+ &pbi->common, &pbi->mb, PRED_SWITCHABLE_INTERP)][index];
+ return vp9_switchable_interp[index];
+}
+
+static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
+ int mi_row, int mi_col,
+ vp9_reader *r) {
+ VP9_COMMON *const cm = &pbi->common;
+ nmv_context *const nmvc = &cm->fc.nmvc;
+ MACROBLOCKD *const xd = &pbi->mb;
+
+ int_mv *const mv0 = &mbmi->mv[0];
+ int_mv *const mv1 = &mbmi->mv[1];
+ BLOCK_SIZE_TYPE bsize = mi->mbmi.sb_type;
+ int bw = 1 << b_width_log2(bsize);
+ int bh = 1 << b_height_log2(bsize);
+
+ int mb_to_left_edge, mb_to_right_edge, mb_to_top_edge, mb_to_bottom_edge;
+ int j, idx, idy;
+
+ mbmi->ref_frame[1] = NONE;
+
+ // Make sure the MACROBLOCKD mode info pointer is pointed at the
+ // correct entry for the current macroblock.
+ xd->mode_info_context = mi;
+
+ // Distance of Mb to the various image edges.
+ // These specified to 8th pel as they are always compared to MV values
+ // that are in 1/8th pel units
+ set_mi_row_col(cm, xd, mi_row, 1 << mi_height_log2(bsize),
+ mi_col, 1 << mi_width_log2(bsize));
+
+ mb_to_top_edge = xd->mb_to_top_edge - LEFT_TOP_MARGIN;
+ mb_to_bottom_edge = xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN;
+ mb_to_left_edge = xd->mb_to_left_edge - LEFT_TOP_MARGIN;
+ mb_to_right_edge = xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN;
+
+ // Read the macroblock segment id.
+ mbmi->segment_id = read_mb_segment_id(pbi, mi_row, mi_col, r);
+
+ mbmi->mb_skip_coeff = vp9_segfeature_active(xd, mbmi->segment_id,
+ SEG_LVL_SKIP);
+ if (!mbmi->mb_skip_coeff) {
+ mbmi->mb_skip_coeff = vp9_read(r, vp9_get_pred_prob(cm, xd, PRED_MBSKIP));
+ cm->fc.mbskip_count[vp9_get_pred_context(cm, xd, PRED_MBSKIP)]
+ [mbmi->mb_skip_coeff]++;
+ }
+
+ // Read the reference frame
+ if (!vp9_segfeature_active(xd, mbmi->segment_id, SEG_LVL_REF_FRAME)) {
+ mbmi->ref_frame[0] =
+ vp9_read(r, vp9_get_pred_prob(cm, xd, PRED_INTRA_INTER));
+ cm->fc.intra_inter_count[vp9_get_pred_context(cm, xd, PRED_INTRA_INTER)]
+ [mbmi->ref_frame[0] != INTRA_FRAME]++;
+ } else {
+ mbmi->ref_frame[0] =
+ vp9_get_segdata(xd, mbmi->segment_id, SEG_LVL_REF_FRAME) != INTRA_FRAME;
+ }
+
+ if (cm->txfm_mode == TX_MODE_SELECT &&
+ (mbmi->mb_skip_coeff == 0 || mbmi->ref_frame[0] == INTRA_FRAME) &&
+ bsize >= BLOCK_SIZE_SB8X8) {
+ mbmi->txfm_size = select_txfm_size(cm, xd, r, bsize);
+ } else if (bsize >= BLOCK_SIZE_SB32X32 &&
+ cm->txfm_mode >= ALLOW_32X32) {
+ mbmi->txfm_size = TX_32X32;
+ } else if (cm->txfm_mode >= ALLOW_16X16 &&
+ bsize >= BLOCK_SIZE_MB16X16) {
+ mbmi->txfm_size = TX_16X16;
+ } else if (cm->txfm_mode >= ALLOW_8X8 && (bsize >= BLOCK_SIZE_SB8X8)) {
+ mbmi->txfm_size = TX_8X8;
+ } else {
+ mbmi->txfm_size = TX_4X4;
+ }
+
+ // If reference frame is an Inter frame
+ if (mbmi->ref_frame[0] != INTRA_FRAME) {
+ int_mv nearest, nearby, best_mv;
+ int_mv nearest_second, nearby_second, best_mv_second;
+ vp9_prob *mv_ref_p;
+
+ read_ref_frame(pbi, r, mbmi->segment_id, mbmi->ref_frame);
+
+ {
+#ifdef DEC_DEBUG
+ if (dec_debug)
+ printf("%d %d\n", xd->mode_info_context->mbmi.mv[0].as_mv.row,
+ xd->mode_info_context->mbmi.mv[0].as_mv.col);
+#endif
+ vp9_find_mv_refs(cm, xd, mi, xd->prev_mode_info_context,
+ mbmi->ref_frame[0], mbmi->ref_mvs[mbmi->ref_frame[0]],
+ cm->ref_frame_sign_bias);
+
+ mv_ref_p = cm->fc.inter_mode_probs[
+ mbmi->mb_mode_context[mbmi->ref_frame[0]]];
+
+ // If the segment level skip mode enabled
+ if (vp9_segfeature_active(xd, mbmi->segment_id, SEG_LVL_SKIP)) {
+ mbmi->mode = ZEROMV;
+ } else if (bsize >= BLOCK_SIZE_SB8X8) {
+ mbmi->mode = read_sb_mv_ref(r, mv_ref_p);
+ vp9_accum_mv_refs(cm, mbmi->mode,
+ mbmi->mb_mode_context[mbmi->ref_frame[0]]);
+ }
+
+ if (bsize < BLOCK_SIZE_SB8X8 || mbmi->mode != ZEROMV) {
+ vp9_find_best_ref_mvs(xd,
+ mbmi->ref_mvs[mbmi->ref_frame[0]],
+ &nearest, &nearby);
+
+ best_mv.as_int = mbmi->ref_mvs[mbmi->ref_frame[0]][0].as_int;
+ }
+
+#ifdef DEC_DEBUG
+ if (dec_debug)
+ printf("[D %d %d] %d %d %d %d\n", ref_frame,
+ mbmi->mb_mode_context[ref_frame],
+ mv_ref_p[0], mv_ref_p[1], mv_ref_p[2], mv_ref_p[3]);
+#endif
+ }
+
+ mbmi->interp_filter = cm->mcomp_filter_type == SWITCHABLE
+ ? read_switchable_filter_type(pbi, r)
+ : cm->mcomp_filter_type;
+
+ if (mbmi->ref_frame[1] > INTRA_FRAME) {
+ vp9_find_mv_refs(cm, xd, mi, xd->prev_mode_info_context,
+ mbmi->ref_frame[1],
+ mbmi->ref_mvs[mbmi->ref_frame[1]],
+ cm->ref_frame_sign_bias);
+
+ if (bsize < BLOCK_SIZE_SB8X8 || mbmi->mode != ZEROMV) {
+ vp9_find_best_ref_mvs(xd,
+ mbmi->ref_mvs[mbmi->ref_frame[1]],
+ &nearest_second,
+ &nearby_second);
+ best_mv_second.as_int = mbmi->ref_mvs[mbmi->ref_frame[1]][0].as_int;
+ }
+ }
+
+ mbmi->uv_mode = DC_PRED;
+ if (mbmi->sb_type < BLOCK_SIZE_SB8X8) {
+ for (idy = 0; idy < 2; idy += bh) {
+ for (idx = 0; idx < 2; idx += bw) {
+ int_mv blockmv, secondmv;
+ int blockmode;
+ int i;
+ j = idy * 2 + idx;
+
+ blockmode = read_sb_mv_ref(r, mv_ref_p);
+ vp9_accum_mv_refs(cm, blockmode,
+ mbmi->mb_mode_context[mbmi->ref_frame[0]]);
+ if (blockmode == NEARESTMV || blockmode == NEARMV) {
+ MV_REFERENCE_FRAME rf2 = mbmi->ref_frame[1];
+ vp9_append_sub8x8_mvs_for_idx(cm, xd, &nearest, &nearby, j, 0);
+ if (rf2 > 0) {
+ vp9_append_sub8x8_mvs_for_idx(cm, xd, &nearest_second,
+ &nearby_second, j, 1);
+ }
+ }
+
+ switch (blockmode) {
+ case NEWMV:
+ decode_mv(r, &blockmv.as_mv, &best_mv.as_mv, nmvc,
+ &cm->fc.NMVcount, xd->allow_high_precision_mv);
+
+ if (mbmi->ref_frame[1] > 0)
+ decode_mv(r, &secondmv.as_mv, &best_mv_second.as_mv, nmvc,
+ &cm->fc.NMVcount, xd->allow_high_precision_mv);
+
+#ifdef VPX_MODE_COUNT
+ vp9_mv_cont_count[mv_contz][3]++;
+#endif
+ break;
+ case NEARESTMV:
+ blockmv.as_int = nearest.as_int;
+ if (mbmi->ref_frame[1] > 0)
+ secondmv.as_int = nearest_second.as_int;
+#ifdef VPX_MODE_COUNT
+ vp9_mv_cont_count[mv_contz][0]++;
+#endif
+ break;
+ case NEARMV:
+ blockmv.as_int = nearby.as_int;
+ if (mbmi->ref_frame[1] > 0)
+ secondmv.as_int = nearby_second.as_int;
+#ifdef VPX_MODE_COUNT
+ vp9_mv_cont_count[mv_contz][1]++;
+#endif
+ break;
+ case ZEROMV:
+ blockmv.as_int = 0;
+ if (mbmi->ref_frame[1] > 0)
+ secondmv.as_int = 0;
+#ifdef VPX_MODE_COUNT
+ vp9_mv_cont_count[mv_contz][2]++;
+#endif
+ break;
+ default:
+ break;
+ }
+ mi->bmi[j].as_mv[0].as_int = blockmv.as_int;
+ if (mbmi->ref_frame[1] > 0)
+ mi->bmi[j].as_mv[1].as_int = secondmv.as_int;
+
+ for (i = 1; i < bh; ++i)
+ vpx_memcpy(&mi->bmi[j + i * 2], &mi->bmi[j], sizeof(mi->bmi[j]));
+ for (i = 1; i < bw; ++i)
+ vpx_memcpy(&mi->bmi[j + i], &mi->bmi[j], sizeof(mi->bmi[j]));
+ mi->mbmi.mode = blockmode;
+ }
+ }
+
+ mv0->as_int = mi->bmi[3].as_mv[0].as_int;
+ mv1->as_int = mi->bmi[3].as_mv[1].as_int;
+ } else {
+ switch (mbmi->mode) {
+ case NEARMV:
+ // Clip "next_nearest" so that it does not extend to far out of image
+ assign_and_clamp_mv(mv0, &nearby, mb_to_left_edge,
+ mb_to_right_edge,
+ mb_to_top_edge,
+ mb_to_bottom_edge);
+ if (mbmi->ref_frame[1] > 0)
+ assign_and_clamp_mv(mv1, &nearby_second, mb_to_left_edge,
+ mb_to_right_edge,
+ mb_to_top_edge,
+ mb_to_bottom_edge);
+ break;
+
+ case NEARESTMV:
+ // Clip "next_nearest" so that it does not extend to far out of image
+ assign_and_clamp_mv(mv0, &nearest, mb_to_left_edge,
+ mb_to_right_edge,
+ mb_to_top_edge,
+ mb_to_bottom_edge);
+ if (mbmi->ref_frame[1] > 0)
+ assign_and_clamp_mv(mv1, &nearest_second, mb_to_left_edge,
+ mb_to_right_edge,
+ mb_to_top_edge,
+ mb_to_bottom_edge);
+ break;
+
+ case ZEROMV:
+ mv0->as_int = 0;
+ if (mbmi->ref_frame[1] > 0)
+ mv1->as_int = 0;
+ break;
+
+ case NEWMV:
+ decode_mv(r, &mv0->as_mv, &best_mv.as_mv, nmvc, &cm->fc.NMVcount,
+ xd->allow_high_precision_mv);
+ if (mbmi->ref_frame[1] > 0)
+ decode_mv(r, &mv1->as_mv, &best_mv_second.as_mv, nmvc,
+ &cm->fc.NMVcount, xd->allow_high_precision_mv);
+ break;
+ default:
+#if CONFIG_DEBUG
+ assert(0);
+#endif
+ break;
+ }
+ }
+ } else {
+ // required for left and above block mv
+ mv0->as_int = 0;
+
+ if (bsize >= BLOCK_SIZE_SB8X8) {
+ const BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type;
+ const int bwl = b_width_log2(bsize), bhl = b_height_log2(bsize);
+ const int bsl = MIN(bwl, bhl);
+ mbmi->mode = read_intra_mode(r, cm->fc.y_mode_prob[MIN(3, bsl)]);
+ cm->fc.y_mode_counts[MIN(3, bsl)][mbmi->mode]++;
+ } else {
+ int idx, idy;
+ for (idy = 0; idy < 2; idy += bh) {
+ for (idx = 0; idx < 2; idx += bw) {
+ int ib = idy * 2 + idx, k;
+ int m = read_intra_mode(r, cm->fc.y_mode_prob[0]);
+ mi->bmi[ib].as_mode.first = m;
+ cm->fc.y_mode_counts[0][m]++;
+ for (k = 1; k < bh; ++k)
+ mi->bmi[ib + k * 2].as_mode.first = m;
+ for (k = 1; k < bw; ++k)
+ mi->bmi[ib + k].as_mode.first = m;
+ }
+ }
+ mbmi->mode = mi->bmi[3].as_mode.first;
+ }
+
+ mbmi->uv_mode = read_intra_mode(r, cm->fc.uv_mode_prob[mbmi->mode]);
+ cm->fc.uv_mode_counts[mbmi->mode][mbmi->uv_mode]++;
+ }
+}
+
+void vp9_decode_mode_mvs_init(VP9D_COMP* const pbi, vp9_reader *r) {
+ VP9_COMMON *cm = &pbi->common;
+ int k;
+
+ // TODO(jkoleszar): does this clear more than MBSKIP_CONTEXTS? Maybe remove.
+ // vpx_memset(cm->fc.mbskip_probs, 0, sizeof(cm->fc.mbskip_probs));
+ for (k = 0; k < MBSKIP_CONTEXTS; ++k) {
+ if (vp9_read(r, VP9_MODE_UPDATE_PROB)) {
+ cm->fc.mbskip_probs[k] =
+ vp9_read_prob_diff_update(r, cm->fc.mbskip_probs[k]);
+ }
+ // cm->fc.mbskip_probs[k] = vp9_read_prob(r);
+ }
+
+ mb_mode_mv_init(pbi, r);
+}
+
+void vp9_decode_mb_mode_mv(VP9D_COMP* const pbi,
+ MACROBLOCKD* const xd,
+ int mi_row,
+ int mi_col,
+ vp9_reader *r) {
+ VP9_COMMON *const cm = &pbi->common;
+ MODE_INFO *mi = xd->mode_info_context;
+ MB_MODE_INFO *const mbmi = &mi->mbmi;
+
+ if ((cm->frame_type == KEY_FRAME) || cm->intra_only) {
+ kfread_modes(pbi, mi, mi_row, mi_col, r);
+ } else {
+ read_mb_modes_mv(pbi, mi, &mi->mbmi, mi_row, mi_col, r);
+ }
+
+ if (1) {
+ const int bw = 1 << mi_width_log2(mbmi->sb_type);
+ const int bh = 1 << mi_height_log2(mbmi->sb_type);
+ const int y_mis = MIN(bh, cm->mi_rows - mi_row);
+ const int x_mis = MIN(bw, cm->mi_cols - mi_col);
+ const int mis = cm->mode_info_stride;
+ int x, y;
+
+ for (y = 0; y < y_mis; y++)
+ for (x = !y; x < x_mis; x++)
+ mi[y * mis + x] = *mi;
+ }
+}
diff --git a/libvpx/vp9/decoder/vp9_decodemv.h b/libvpx/vp9/decoder/vp9_decodemv.h
new file mode 100644
index 0000000..bf5e83c
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_decodemv.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_DECODER_VP9_DECODEMV_H_
+#define VP9_DECODER_VP9_DECODEMV_H_
+
+#include "vp9/decoder/vp9_onyxd_int.h"
+
+void vp9_decode_mb_mode_mv(VP9D_COMP* const pbi,
+ MACROBLOCKD* const xd,
+ int mb_row,
+ int mb_col,
+ vp9_reader *r);
+void vp9_decode_mode_mvs_init(VP9D_COMP* const pbi, vp9_reader *r);
+
+#endif // VP9_DECODER_VP9_DECODEMV_H_
diff --git a/libvpx/vp9/decoder/vp9_decodframe.c b/libvpx/vp9/decoder/vp9_decodframe.c
new file mode 100644
index 0000000..49b181d
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_decodframe.c
@@ -0,0 +1,1199 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+
+#include "./vp9_rtcd.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_scale/vpx_scale.h"
+
+#include "vp9/common/vp9_extend.h"
+#include "vp9/common/vp9_modecont.h"
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_reconintra.h"
+#include "vp9/common/vp9_reconinter.h"
+#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_alloccommon.h"
+#include "vp9/common/vp9_entropymode.h"
+#include "vp9/common/vp9_quant_common.h"
+#include "vp9/common/vp9_seg_common.h"
+#include "vp9/common/vp9_tile_common.h"
+
+#include "vp9/decoder/vp9_dboolhuff.h"
+#include "vp9/decoder/vp9_decodframe.h"
+#include "vp9/decoder/vp9_detokenize.h"
+#include "vp9/decoder/vp9_decodemv.h"
+#include "vp9/decoder/vp9_onyxd_int.h"
+#include "vp9/decoder/vp9_read_bit_buffer.h"
+
+
+// #define DEC_DEBUG
+#ifdef DEC_DEBUG
+int dec_debug = 0;
+#endif
+
+static int read_be32(const uint8_t *p) {
+ return (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3];
+}
+
+// len == 0 is not allowed
+static int read_is_valid(const uint8_t *start, size_t len,
+ const uint8_t *end) {
+ return start + len > start && start + len <= end;
+}
+
+static void setup_txfm_mode(VP9_COMMON *pc, int lossless, vp9_reader *r) {
+ if (lossless) {
+ pc->txfm_mode = ONLY_4X4;
+ } else {
+ pc->txfm_mode = vp9_read_literal(r, 2);
+ if (pc->txfm_mode == ALLOW_32X32)
+ pc->txfm_mode += vp9_read_bit(r);
+ if (pc->txfm_mode == TX_MODE_SELECT) {
+ int i, j;
+ for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
+ for (j = 0; j < TX_SIZE_MAX_SB - 3; ++j) {
+ if (vp9_read(r, VP9_MODE_UPDATE_PROB))
+ pc->fc.tx_probs_8x8p[i][j] =
+ vp9_read_prob_diff_update(r, pc->fc.tx_probs_8x8p[i][j]);
+ }
+ }
+ for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
+ for (j = 0; j < TX_SIZE_MAX_SB - 2; ++j) {
+ if (vp9_read(r, VP9_MODE_UPDATE_PROB))
+ pc->fc.tx_probs_16x16p[i][j] =
+ vp9_read_prob_diff_update(r, pc->fc.tx_probs_16x16p[i][j]);
+ }
+ }
+ for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
+ for (j = 0; j < TX_SIZE_MAX_SB - 1; ++j) {
+ if (vp9_read(r, VP9_MODE_UPDATE_PROB))
+ pc->fc.tx_probs_32x32p[i][j] =
+ vp9_read_prob_diff_update(r, pc->fc.tx_probs_32x32p[i][j]);
+ }
+ }
+ }
+ }
+}
+
+static int get_unsigned_bits(unsigned int num_values) {
+ int cat = 0;
+ if (num_values <= 1)
+ return 0;
+ num_values--;
+ while (num_values > 0) {
+ cat++;
+ num_values >>= 1;
+ }
+ return cat;
+}
+
+static int inv_recenter_nonneg(int v, int m) {
+ if (v > 2 * m)
+ return v;
+
+ return v % 2 ? m - (v + 1) / 2 : m + v / 2;
+}
+
+static int decode_uniform(vp9_reader *r, int n) {
+ int v;
+ const int l = get_unsigned_bits(n);
+ const int m = (1 << l) - n;
+ if (!l)
+ return 0;
+
+ v = vp9_read_literal(r, l - 1);
+ return v < m ? v : (v << 1) - m + vp9_read_bit(r);
+}
+
+static int decode_term_subexp(vp9_reader *r, int k, int num_syms) {
+ int i = 0, mk = 0, word;
+ while (1) {
+ const int b = i ? k + i - 1 : k;
+ const int a = 1 << b;
+ if (num_syms <= mk + 3 * a) {
+ word = decode_uniform(r, num_syms - mk) + mk;
+ break;
+ } else {
+ if (vp9_read_bit(r)) {
+ i++;
+ mk += a;
+ } else {
+ word = vp9_read_literal(r, b) + mk;
+ break;
+ }
+ }
+ }
+ return word;
+}
+
+static int decode_unsigned_max(struct vp9_read_bit_buffer *rb, int max) {
+ const int data = vp9_rb_read_literal(rb, get_unsigned_bits(max));
+ return data > max ? max : data;
+}
+
+static int merge_index(int v, int n, int modulus) {
+ int max1 = (n - 1 - modulus / 2) / modulus + 1;
+ if (v < max1) {
+ v = v * modulus + modulus / 2;
+ } else {
+ int w;
+ v -= max1;
+ w = v;
+ v += (v + modulus - modulus / 2) / modulus;
+ while (v % modulus == modulus / 2 ||
+ w != v - (v + modulus - modulus / 2) / modulus) v++;
+ }
+ return v;
+}
+
+static int inv_remap_prob(int v, int m) {
+ const int n = 255;
+
+ v = merge_index(v, n - 1, MODULUS_PARAM);
+ m--;
+ if ((m << 1) <= n) {
+ return 1 + inv_recenter_nonneg(v + 1, m);
+ } else {
+ return n - inv_recenter_nonneg(v + 1, n - 1 - m);
+ }
+}
+
+vp9_prob vp9_read_prob_diff_update(vp9_reader *r, int oldp) {
+ int delp = decode_term_subexp(r, SUBEXP_PARAM, 255);
+ return (vp9_prob)inv_remap_prob(delp, oldp);
+}
+
+void vp9_init_dequantizer(VP9_COMMON *pc) {
+ int q;
+
+ for (q = 0; q < QINDEX_RANGE; q++) {
+ // DC value
+ pc->y_dequant[q][0] = vp9_dc_quant(q, pc->y_dc_delta_q);
+ pc->uv_dequant[q][0] = vp9_dc_quant(q, pc->uv_dc_delta_q);
+
+ // AC values
+ pc->y_dequant[q][1] = vp9_ac_quant(q, 0);
+ pc->uv_dequant[q][1] = vp9_ac_quant(q, pc->uv_ac_delta_q);
+ }
+}
+
+static void mb_init_dequantizer(VP9_COMMON *pc, MACROBLOCKD *xd) {
+ int i;
+ const int segment_id = xd->mode_info_context->mbmi.segment_id;
+ xd->q_index = vp9_get_qindex(xd, segment_id, pc->base_qindex);
+
+ xd->plane[0].dequant = pc->y_dequant[xd->q_index];
+ for (i = 1; i < MAX_MB_PLANE; i++)
+ xd->plane[i].dequant = pc->uv_dequant[xd->q_index];
+}
+
+static void decode_block(int plane, int block, BLOCK_SIZE_TYPE bsize,
+ int ss_txfrm_size, void *arg) {
+ MACROBLOCKD* const xd = arg;
+ struct macroblockd_plane *pd = &xd->plane[plane];
+ int16_t* const qcoeff = BLOCK_OFFSET(pd->qcoeff, block, 16);
+ const int stride = pd->dst.stride;
+ const int raster_block = txfrm_block_to_raster_block(xd, bsize, plane,
+ block, ss_txfrm_size);
+ uint8_t* const dst = raster_block_offset_uint8(xd, bsize, plane,
+ raster_block,
+ pd->dst.buf, stride);
+
+ TX_TYPE tx_type;
+
+ switch (ss_txfrm_size / 2) {
+ case TX_4X4:
+ tx_type = plane == 0 ? get_tx_type_4x4(xd, raster_block) : DCT_DCT;
+ if (tx_type == DCT_DCT)
+ xd->itxm_add(qcoeff, dst, stride, pd->eobs[block]);
+ else
+ vp9_iht_add_c(tx_type, qcoeff, dst, stride, pd->eobs[block]);
+ break;
+ case TX_8X8:
+ tx_type = plane == 0 ? get_tx_type_8x8(xd, raster_block) : DCT_DCT;
+ vp9_iht_add_8x8_c(tx_type, qcoeff, dst, stride, pd->eobs[block]);
+ break;
+ case TX_16X16:
+ tx_type = plane == 0 ? get_tx_type_16x16(xd, raster_block) : DCT_DCT;
+ vp9_iht_add_16x16_c(tx_type, qcoeff, dst, stride, pd->eobs[block]);
+ break;
+ case TX_32X32:
+ vp9_idct_add_32x32(qcoeff, dst, stride, pd->eobs[block]);
+ break;
+ }
+}
+
+static void decode_block_intra(int plane, int block, BLOCK_SIZE_TYPE bsize,
+ int ss_txfrm_size, void *arg) {
+ MACROBLOCKD* const xd = arg;
+ struct macroblockd_plane *pd = &xd->plane[plane];
+
+ const int raster_block = txfrm_block_to_raster_block(xd, bsize, plane,
+ block, ss_txfrm_size);
+ uint8_t* const dst = raster_block_offset_uint8(xd, bsize, plane,
+ raster_block,
+ pd->dst.buf, pd->dst.stride);
+ const TX_SIZE tx_size = (TX_SIZE)(ss_txfrm_size / 2);
+ int b_mode;
+ int plane_b_size;
+ const int tx_ib = raster_block >> tx_size;
+ const int mode = plane == 0 ? xd->mode_info_context->mbmi.mode
+ : xd->mode_info_context->mbmi.uv_mode;
+
+
+ if (plane == 0 && xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
+ assert(bsize == BLOCK_SIZE_SB8X8);
+ b_mode = xd->mode_info_context->bmi[raster_block].as_mode.first;
+ } else {
+ b_mode = mode;
+ }
+
+ if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0)
+ extend_for_intra(xd, plane, block, bsize, ss_txfrm_size);
+
+ plane_b_size = b_width_log2(bsize) - pd->subsampling_x;
+ vp9_predict_intra_block(xd, tx_ib, plane_b_size, tx_size, b_mode,
+ dst, pd->dst.stride);
+
+ // Early exit if there are no coefficients
+ if (xd->mode_info_context->mbmi.mb_skip_coeff)
+ return;
+
+ decode_block(plane, block, bsize, ss_txfrm_size, arg);
+}
+
+static void decode_atom(VP9D_COMP *pbi, MACROBLOCKD *xd,
+ int mi_row, int mi_col,
+ vp9_reader *r, BLOCK_SIZE_TYPE bsize) {
+ MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
+
+ assert(mbmi->ref_frame[0] != INTRA_FRAME);
+
+ if ((pbi->common.frame_type != KEY_FRAME) && (!pbi->common.intra_only))
+ vp9_setup_interp_filters(xd, mbmi->interp_filter, &pbi->common);
+
+ // prediction
+ vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+
+ if (mbmi->mb_skip_coeff) {
+ vp9_reset_sb_tokens_context(xd, bsize);
+ } else {
+ if (xd->segmentation_enabled)
+ mb_init_dequantizer(&pbi->common, xd);
+
+ if (!vp9_reader_has_error(r))
+ vp9_decode_tokens(pbi, r, bsize);
+
+ foreach_transformed_block(xd, bsize, decode_block, xd);
+ }
+}
+
+static void decode_sb_intra(VP9D_COMP *pbi, MACROBLOCKD *xd,
+ int mi_row, int mi_col,
+ vp9_reader *r, BLOCK_SIZE_TYPE bsize) {
+ MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
+ if (mbmi->mb_skip_coeff) {
+ vp9_reset_sb_tokens_context(xd, bsize);
+ } else {
+ if (xd->segmentation_enabled)
+ mb_init_dequantizer(&pbi->common, xd);
+
+ if (!vp9_reader_has_error(r))
+ vp9_decode_tokens(pbi, r, bsize);
+ }
+
+ foreach_transformed_block(xd, bsize, decode_block_intra, xd);
+}
+
+
+static void decode_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, int mi_row, int mi_col,
+ vp9_reader *r, BLOCK_SIZE_TYPE bsize) {
+ const int bwl = mi_width_log2(bsize), bhl = mi_height_log2(bsize);
+ const int bw = 1 << bwl, bh = 1 << bhl;
+ int n, eobtotal;
+ VP9_COMMON *const pc = &pbi->common;
+ MODE_INFO *const mi = xd->mode_info_context;
+ MB_MODE_INFO *const mbmi = &mi->mbmi;
+ const int mis = pc->mode_info_stride;
+
+ assert(mbmi->sb_type == bsize);
+ assert(mbmi->ref_frame[0] != INTRA_FRAME);
+
+ if (pbi->common.frame_type != KEY_FRAME)
+ vp9_setup_interp_filters(xd, mbmi->interp_filter, pc);
+
+ // generate prediction
+ vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+
+ if (mbmi->mb_skip_coeff) {
+ vp9_reset_sb_tokens_context(xd, bsize);
+ } else {
+ // re-initialize macroblock dequantizer before detokenization
+ if (xd->segmentation_enabled)
+ mb_init_dequantizer(pc, xd);
+
+ // dequantization and idct
+ eobtotal = vp9_decode_tokens(pbi, r, bsize);
+ if (eobtotal == 0) { // skip loopfilter
+ for (n = 0; n < bw * bh; n++) {
+ const int x_idx = n & (bw - 1), y_idx = n >> bwl;
+
+ if (mi_col + x_idx < pc->mi_cols && mi_row + y_idx < pc->mi_rows)
+ mi[y_idx * mis + x_idx].mbmi.mb_skip_coeff = 1;
+ }
+ } else {
+ foreach_transformed_block(xd, bsize, decode_block, xd);
+ }
+ }
+}
+
+static void set_offsets(VP9D_COMP *pbi, BLOCK_SIZE_TYPE bsize,
+ int mi_row, int mi_col) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ const int bh = 1 << mi_height_log2(bsize);
+ const int bw = 1 << mi_width_log2(bsize);
+ const int mi_idx = mi_row * cm->mode_info_stride + mi_col;
+ int i;
+
+ xd->mode_info_context = cm->mi + mi_idx;
+ xd->mode_info_context->mbmi.sb_type = bsize;
+ // Special case: if prev_mi is NULL, the previous mode info context
+ // cannot be used.
+ xd->prev_mode_info_context = cm->prev_mi ? cm->prev_mi + mi_idx : NULL;
+
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ struct macroblockd_plane *pd = &xd->plane[i];
+ pd->above_context = cm->above_context[i] +
+ (mi_col * 2 >> pd->subsampling_x);
+ pd->left_context = cm->left_context[i] +
+ (((mi_row * 2) & 15) >> pd->subsampling_y);
+ }
+ xd->above_seg_context = cm->above_seg_context + mi_col;
+ xd->left_seg_context = cm->left_seg_context + (mi_row & MI_MASK);
+
+ // Distance of Mb to the various image edges. These are specified to 8th pel
+ // as they are always compared to values that are in 1/8th pel units
+ set_mi_row_col(cm, xd, mi_row, bh, mi_col, bw);
+
+ setup_dst_planes(xd, &cm->yv12_fb[cm->new_fb_idx], mi_row, mi_col);
+}
+
+static void set_refs(VP9D_COMP *pbi, int mi_row, int mi_col) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
+
+ if (mbmi->ref_frame[0] > INTRA_FRAME) {
+ // Select the appropriate reference frame for this MB
+ const int fb_idx = cm->active_ref_idx[mbmi->ref_frame[0] - 1];
+ const YV12_BUFFER_CONFIG *cfg = &cm->yv12_fb[fb_idx];
+ xd->scale_factor[0] = cm->active_ref_scale[mbmi->ref_frame[0] - 1];
+ xd->scale_factor_uv[0] = cm->active_ref_scale[mbmi->ref_frame[0] - 1];
+ setup_pre_planes(xd, cfg, NULL, mi_row, mi_col,
+ xd->scale_factor, xd->scale_factor_uv);
+ xd->corrupted |= cfg->corrupted;
+
+ if (mbmi->ref_frame[1] > INTRA_FRAME) {
+ // Select the appropriate reference frame for this MB
+ const int second_fb_idx = cm->active_ref_idx[mbmi->ref_frame[1] - 1];
+ const YV12_BUFFER_CONFIG *second_cfg = &cm->yv12_fb[second_fb_idx];
+ xd->scale_factor[1] = cm->active_ref_scale[mbmi->ref_frame[1] - 1];
+ xd->scale_factor_uv[1] = cm->active_ref_scale[mbmi->ref_frame[1] - 1];
+ setup_pre_planes(xd, NULL, second_cfg, mi_row, mi_col,
+ xd->scale_factor, xd->scale_factor_uv);
+ xd->corrupted |= second_cfg->corrupted;
+ }
+ }
+}
+
+static void decode_modes_b(VP9D_COMP *pbi, int mi_row, int mi_col,
+ vp9_reader *r, BLOCK_SIZE_TYPE bsize) {
+ MACROBLOCKD *const xd = &pbi->mb;
+
+ if (bsize < BLOCK_SIZE_SB8X8)
+ if (xd->ab_index > 0)
+ return;
+ set_offsets(pbi, bsize, mi_row, mi_col);
+ vp9_decode_mb_mode_mv(pbi, xd, mi_row, mi_col, r);
+ set_refs(pbi, mi_row, mi_col);
+
+ if (xd->mode_info_context->mbmi.ref_frame[0] == INTRA_FRAME)
+ decode_sb_intra(pbi, xd, mi_row, mi_col, r, (bsize < BLOCK_SIZE_SB8X8) ?
+ BLOCK_SIZE_SB8X8 : bsize);
+ else if (bsize < BLOCK_SIZE_SB8X8)
+ decode_atom(pbi, xd, mi_row, mi_col, r, BLOCK_SIZE_SB8X8);
+ else
+ decode_sb(pbi, xd, mi_row, mi_col, r, bsize);
+
+ xd->corrupted |= vp9_reader_has_error(r);
+}
+
+static void decode_modes_sb(VP9D_COMP *pbi, int mi_row, int mi_col,
+ vp9_reader* r, BLOCK_SIZE_TYPE bsize) {
+ VP9_COMMON *const pc = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ int bs = (1 << mi_width_log2(bsize)) / 2, n;
+ PARTITION_TYPE partition = PARTITION_NONE;
+ BLOCK_SIZE_TYPE subsize;
+
+ if (mi_row >= pc->mi_rows || mi_col >= pc->mi_cols)
+ return;
+
+ if (bsize < BLOCK_SIZE_SB8X8)
+ if (xd->ab_index != 0)
+ return;
+
+ if (bsize >= BLOCK_SIZE_SB8X8) {
+ int pl;
+ int idx = check_bsize_coverage(pc, xd, mi_row, mi_col, bsize);
+ // read the partition information
+ xd->left_seg_context = pc->left_seg_context + (mi_row & MI_MASK);
+ xd->above_seg_context = pc->above_seg_context + mi_col;
+ pl = partition_plane_context(xd, bsize);
+
+ if (idx == 0)
+ partition = treed_read(r, vp9_partition_tree,
+ pc->fc.partition_prob[pc->frame_type][pl]);
+ else if (idx > 0 &&
+ !vp9_read(r, pc->fc.partition_prob[pc->frame_type][pl][idx]))
+ partition = (idx == 1) ? PARTITION_HORZ : PARTITION_VERT;
+ else
+ partition = PARTITION_SPLIT;
+
+ pc->fc.partition_counts[pl][partition]++;
+ }
+
+ subsize = get_subsize(bsize, partition);
+ *(get_sb_index(xd, subsize)) = 0;
+
+ switch (partition) {
+ case PARTITION_NONE:
+ decode_modes_b(pbi, mi_row, mi_col, r, subsize);
+ break;
+ case PARTITION_HORZ:
+ decode_modes_b(pbi, mi_row, mi_col, r, subsize);
+ *(get_sb_index(xd, subsize)) = 1;
+ if (mi_row + bs < pc->mi_rows)
+ decode_modes_b(pbi, mi_row + bs, mi_col, r, subsize);
+ break;
+ case PARTITION_VERT:
+ decode_modes_b(pbi, mi_row, mi_col, r, subsize);
+ *(get_sb_index(xd, subsize)) = 1;
+ if (mi_col + bs < pc->mi_cols)
+ decode_modes_b(pbi, mi_row, mi_col + bs, r, subsize);
+ break;
+ case PARTITION_SPLIT:
+ for (n = 0; n < 4; n++) {
+ int j = n >> 1, i = n & 0x01;
+ *(get_sb_index(xd, subsize)) = n;
+ decode_modes_sb(pbi, mi_row + j * bs, mi_col + i * bs, r, subsize);
+ }
+ break;
+ default:
+ assert(0);
+ }
+ // update partition context
+ if (bsize >= BLOCK_SIZE_SB8X8 &&
+ (bsize == BLOCK_SIZE_SB8X8 || partition != PARTITION_SPLIT)) {
+ set_partition_seg_context(pc, xd, mi_row, mi_col);
+ update_partition_context(xd, subsize, bsize);
+ }
+}
+
+static void setup_token_decoder(VP9D_COMP *pbi,
+ const uint8_t *data, size_t read_size,
+ vp9_reader *r) {
+ VP9_COMMON *pc = &pbi->common;
+ const uint8_t *data_end = pbi->source + pbi->source_sz;
+
+ // Validate the calculated partition length. If the buffer
+ // described by the partition can't be fully read, then restrict
+ // it to the portion that can be (for EC mode) or throw an error.
+ if (!read_is_valid(data, read_size, data_end))
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Truncated packet or corrupt tile length");
+
+ if (vp9_reader_init(r, data, read_size))
+ vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate bool decoder %d", 1);
+}
+
+static void read_coef_probs_common(FRAME_CONTEXT *fc, TX_SIZE tx_size,
+ vp9_reader *r) {
+ vp9_coeff_probs_model *coef_probs = fc->coef_probs[tx_size];
+
+ if (vp9_read_bit(r)) {
+ int i, j, k, l, m;
+ for (i = 0; i < BLOCK_TYPES; i++) {
+ for (j = 0; j < REF_TYPES; j++) {
+ for (k = 0; k < COEF_BANDS; k++) {
+ for (l = 0; l < PREV_COEF_CONTEXTS; l++) {
+ if (l >= 3 && k == 0)
+ continue;
+
+ for (m = 0; m < UNCONSTRAINED_NODES; m++) {
+ vp9_prob *const p = coef_probs[i][j][k][l] + m;
+
+ if (vp9_read(r, VP9_COEF_UPDATE_PROB))
+ *p = vp9_read_prob_diff_update(r, *p);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+static void read_coef_probs(VP9D_COMP *pbi, vp9_reader *r) {
+ const TXFM_MODE txfm_mode = pbi->common.txfm_mode;
+ FRAME_CONTEXT *const fc = &pbi->common.fc;
+
+ read_coef_probs_common(fc, TX_4X4, r);
+
+ if (txfm_mode > ONLY_4X4)
+ read_coef_probs_common(fc, TX_8X8, r);
+
+ if (txfm_mode > ALLOW_8X8)
+ read_coef_probs_common(fc, TX_16X16, r);
+
+ if (txfm_mode > ALLOW_16X16)
+ read_coef_probs_common(fc, TX_32X32, r);
+}
+
+static void setup_segmentation(VP9D_COMP *pbi, struct vp9_read_bit_buffer *rb) {
+ int i, j;
+
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+
+ xd->update_mb_segmentation_map = 0;
+ xd->update_mb_segmentation_data = 0;
+
+ xd->segmentation_enabled = vp9_rb_read_bit(rb);
+ if (!xd->segmentation_enabled)
+ return;
+
+ // Segmentation map update
+ xd->update_mb_segmentation_map = vp9_rb_read_bit(rb);
+ if (xd->update_mb_segmentation_map) {
+ for (i = 0; i < MB_SEG_TREE_PROBS; i++)
+ xd->mb_segment_tree_probs[i] = vp9_rb_read_bit(rb) ?
+ vp9_rb_read_literal(rb, 8) : MAX_PROB;
+
+ cm->temporal_update = vp9_rb_read_bit(rb);
+ if (cm->temporal_update) {
+ for (i = 0; i < PREDICTION_PROBS; i++)
+ cm->segment_pred_probs[i] = vp9_rb_read_bit(rb) ?
+ vp9_rb_read_literal(rb, 8) : MAX_PROB;
+ } else {
+ for (i = 0; i < PREDICTION_PROBS; i++)
+ cm->segment_pred_probs[i] = MAX_PROB;
+ }
+ }
+
+ // Segmentation data update
+ xd->update_mb_segmentation_data = vp9_rb_read_bit(rb);
+ if (xd->update_mb_segmentation_data) {
+ xd->mb_segment_abs_delta = vp9_rb_read_bit(rb);
+
+ vp9_clearall_segfeatures(xd);
+
+ for (i = 0; i < MAX_MB_SEGMENTS; i++) {
+ for (j = 0; j < SEG_LVL_MAX; j++) {
+ int data = 0;
+ const int feature_enabled = vp9_rb_read_bit(rb);
+ if (feature_enabled) {
+ vp9_enable_segfeature(xd, i, j);
+ data = decode_unsigned_max(rb, vp9_seg_feature_data_max(j));
+ if (vp9_is_segfeature_signed(j))
+ data = vp9_rb_read_bit(rb) ? -data : data;
+ }
+ vp9_set_segdata(xd, i, j, data);
+ }
+ }
+ }
+}
+
+static void setup_loopfilter(VP9D_COMP *pbi, struct vp9_read_bit_buffer *rb) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+
+ cm->filter_level = vp9_rb_read_literal(rb, 6);
+ cm->sharpness_level = vp9_rb_read_literal(rb, 3);
+
+ // Read in loop filter deltas applied at the MB level based on mode or ref
+ // frame.
+ xd->mode_ref_lf_delta_update = 0;
+
+ xd->mode_ref_lf_delta_enabled = vp9_rb_read_bit(rb);
+ if (xd->mode_ref_lf_delta_enabled) {
+ xd->mode_ref_lf_delta_update = vp9_rb_read_bit(rb);
+ if (xd->mode_ref_lf_delta_update) {
+ int i;
+
+ for (i = 0; i < MAX_REF_LF_DELTAS; i++) {
+ if (vp9_rb_read_bit(rb)) {
+ const int value = vp9_rb_read_literal(rb, 6);
+ xd->ref_lf_deltas[i] = vp9_rb_read_bit(rb) ? -value : value;
+ }
+ }
+
+ for (i = 0; i < MAX_MODE_LF_DELTAS; i++) {
+ if (vp9_rb_read_bit(rb)) {
+ const int value = vp9_rb_read_literal(rb, 6);
+ xd->mode_lf_deltas[i] = vp9_rb_read_bit(rb) ? -value : value;
+ }
+ }
+ }
+ }
+}
+
+static int read_delta_q(struct vp9_read_bit_buffer *rb, int *delta_q) {
+ const int old = *delta_q;
+ if (vp9_rb_read_bit(rb)) {
+ const int value = vp9_rb_read_literal(rb, 4);
+ *delta_q = vp9_rb_read_bit(rb) ? -value : value;
+ }
+ return old != *delta_q;
+}
+
+static void setup_quantization(VP9D_COMP *pbi, struct vp9_read_bit_buffer *rb) {
+ MACROBLOCKD *const xd = &pbi->mb;
+ VP9_COMMON *const cm = &pbi->common;
+ int update = 0;
+
+ cm->base_qindex = vp9_rb_read_literal(rb, QINDEX_BITS);
+ update |= read_delta_q(rb, &cm->y_dc_delta_q);
+ update |= read_delta_q(rb, &cm->uv_dc_delta_q);
+ update |= read_delta_q(rb, &cm->uv_ac_delta_q);
+ if (update)
+ vp9_init_dequantizer(cm);
+
+ xd->lossless = cm->base_qindex == 0 &&
+ cm->y_dc_delta_q == 0 &&
+ cm->uv_dc_delta_q == 0 &&
+ cm->uv_ac_delta_q == 0;
+ if (xd->lossless) {
+ xd->itxm_add = vp9_idct_add_lossless_c;
+ } else {
+ xd->itxm_add = vp9_idct_add;
+ }
+}
+
+static INTERPOLATIONFILTERTYPE read_interp_filter_type(
+ struct vp9_read_bit_buffer *rb) {
+ return vp9_rb_read_bit(rb) ? SWITCHABLE
+ : vp9_rb_read_literal(rb, 2);
+}
+
+static void read_frame_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb,
+ int *width, int *height) {
+ const int w = vp9_rb_read_literal(rb, 16) + 1;
+ const int h = vp9_rb_read_literal(rb, 16) + 1;
+ *width = w;
+ *height = h;
+}
+
+static void setup_display_size(VP9D_COMP *pbi, struct vp9_read_bit_buffer *rb) {
+ VP9_COMMON *const cm = &pbi->common;
+ cm->display_width = cm->width;
+ cm->display_height = cm->height;
+ if (vp9_rb_read_bit(rb))
+ read_frame_size(cm, rb, &cm->display_width, &cm->display_height);
+}
+
+static void apply_frame_size(VP9D_COMP *pbi, int width, int height) {
+ VP9_COMMON *cm = &pbi->common;
+
+ if (cm->width != width || cm->height != height) {
+ if (!pbi->initial_width || !pbi->initial_height) {
+ if (vp9_alloc_frame_buffers(cm, width, height))
+ vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate frame buffers");
+ pbi->initial_width = width;
+ pbi->initial_height = height;
+ } else {
+ if (width > pbi->initial_width)
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Frame width too large");
+
+ if (height > pbi->initial_height)
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Frame height too large");
+ }
+
+ cm->width = width;
+ cm->height = height;
+
+ vp9_update_frame_size(cm);
+ }
+
+ vp9_realloc_frame_buffer(&cm->yv12_fb[cm->new_fb_idx], cm->width, cm->height,
+ cm->subsampling_x, cm->subsampling_y,
+ VP9BORDERINPIXELS);
+}
+
+static void setup_frame_size(VP9D_COMP *pbi,
+ struct vp9_read_bit_buffer *rb) {
+ VP9_COMMON *const cm = &pbi->common;
+ int width, height;
+ read_frame_size(cm, rb, &width, &height);
+ setup_display_size(pbi, rb);
+ apply_frame_size(pbi, width, height);
+}
+
+static void setup_frame_size_with_refs(VP9D_COMP *pbi,
+ struct vp9_read_bit_buffer *rb) {
+ VP9_COMMON *const cm = &pbi->common;
+
+ int width, height;
+ int found = 0, i;
+ for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) {
+ if (vp9_rb_read_bit(rb)) {
+ YV12_BUFFER_CONFIG *cfg = &cm->yv12_fb[cm->active_ref_idx[i]];
+ width = cfg->y_crop_width;
+ height = cfg->y_crop_height;
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ read_frame_size(cm, rb, &width, &height);
+
+ if (!width || !height)
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Referenced frame with invalid size");
+
+ setup_display_size(pbi, rb);
+ apply_frame_size(pbi, width, height);
+}
+
+static void update_frame_context(FRAME_CONTEXT *fc) {
+ vp9_copy(fc->pre_coef_probs, fc->coef_probs);
+ vp9_copy(fc->pre_y_mode_prob, fc->y_mode_prob);
+ vp9_copy(fc->pre_uv_mode_prob, fc->uv_mode_prob);
+ vp9_copy(fc->pre_partition_prob, fc->partition_prob[1]);
+ vp9_copy(fc->pre_intra_inter_prob, fc->intra_inter_prob);
+ vp9_copy(fc->pre_comp_inter_prob, fc->comp_inter_prob);
+ vp9_copy(fc->pre_single_ref_prob, fc->single_ref_prob);
+ vp9_copy(fc->pre_comp_ref_prob, fc->comp_ref_prob);
+ fc->pre_nmvc = fc->nmvc;
+ vp9_copy(fc->pre_switchable_interp_prob, fc->switchable_interp_prob);
+ vp9_copy(fc->pre_inter_mode_probs, fc->inter_mode_probs);
+ vp9_copy(fc->pre_tx_probs_8x8p, fc->tx_probs_8x8p);
+ vp9_copy(fc->pre_tx_probs_16x16p, fc->tx_probs_16x16p);
+ vp9_copy(fc->pre_tx_probs_32x32p, fc->tx_probs_32x32p);
+ vp9_copy(fc->pre_mbskip_probs, fc->mbskip_probs);
+
+ vp9_zero(fc->coef_counts);
+ vp9_zero(fc->eob_branch_counts);
+ vp9_zero(fc->y_mode_counts);
+ vp9_zero(fc->uv_mode_counts);
+ vp9_zero(fc->NMVcount);
+ vp9_zero(fc->inter_mode_counts);
+ vp9_zero(fc->partition_counts);
+ vp9_zero(fc->switchable_interp_count);
+ vp9_zero(fc->intra_inter_count);
+ vp9_zero(fc->comp_inter_count);
+ vp9_zero(fc->single_ref_count);
+ vp9_zero(fc->comp_ref_count);
+ vp9_zero(fc->tx_count_8x8p);
+ vp9_zero(fc->tx_count_16x16p);
+ vp9_zero(fc->tx_count_32x32p);
+ vp9_zero(fc->mbskip_count);
+}
+
+static void decode_tile(VP9D_COMP *pbi, vp9_reader *r) {
+ VP9_COMMON *const pc = &pbi->common;
+ int mi_row, mi_col;
+
+ for (mi_row = pc->cur_tile_mi_row_start;
+ mi_row < pc->cur_tile_mi_row_end; mi_row += 64 / MI_SIZE) {
+ // For a SB there are 2 left contexts, each pertaining to a MB row within
+ vpx_memset(&pc->left_context, 0, sizeof(pc->left_context));
+ vpx_memset(pc->left_seg_context, 0, sizeof(pc->left_seg_context));
+ for (mi_col = pc->cur_tile_mi_col_start;
+ mi_col < pc->cur_tile_mi_col_end; mi_col += 64 / MI_SIZE)
+ decode_modes_sb(pbi, mi_row, mi_col, r, BLOCK_SIZE_SB64X64);
+ }
+}
+
+static void setup_tile_info(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
+ int delta_log2_tiles;
+
+ vp9_get_tile_n_bits(cm, &cm->log2_tile_columns, &delta_log2_tiles);
+ while (delta_log2_tiles--) {
+ if (vp9_rb_read_bit(rb)) {
+ cm->log2_tile_columns++;
+ } else {
+ break;
+ }
+ }
+
+ cm->log2_tile_rows = vp9_rb_read_bit(rb);
+ if (cm->log2_tile_rows)
+ cm->log2_tile_rows += vp9_rb_read_bit(rb);
+
+ cm->tile_columns = 1 << cm->log2_tile_columns;
+ cm->tile_rows = 1 << cm->log2_tile_rows;
+}
+
+static void decode_tiles(VP9D_COMP *pbi,
+ const uint8_t *data, size_t first_partition_size,
+ vp9_reader *residual_bc) {
+ VP9_COMMON *const pc = &pbi->common;
+
+ const uint8_t *data_ptr = data + first_partition_size;
+ const uint8_t* const data_end = pbi->source + pbi->source_sz;
+ int tile_row, tile_col;
+
+ // Note: this memset assumes above_context[0], [1] and [2]
+ // are allocated as part of the same buffer.
+ vpx_memset(pc->above_context[0], 0, sizeof(ENTROPY_CONTEXT) * 2 *
+ MAX_MB_PLANE * mi_cols_aligned_to_sb(pc));
+
+ vpx_memset(pc->above_seg_context, 0, sizeof(PARTITION_CONTEXT) *
+ mi_cols_aligned_to_sb(pc));
+
+ if (pbi->oxcf.inv_tile_order) {
+ const int n_cols = pc->tile_columns;
+ const uint8_t *data_ptr2[4][1 << 6];
+ vp9_reader bc_bak = {0};
+
+ // pre-initialize the offsets, we're going to read in inverse order
+ data_ptr2[0][0] = data_ptr;
+ for (tile_row = 0; tile_row < pc->tile_rows; tile_row++) {
+ if (tile_row) {
+ const int size = read_be32(data_ptr2[tile_row - 1][n_cols - 1]);
+ data_ptr2[tile_row - 1][n_cols - 1] += 4;
+ data_ptr2[tile_row][0] = data_ptr2[tile_row - 1][n_cols - 1] + size;
+ }
+
+ for (tile_col = 1; tile_col < n_cols; tile_col++) {
+ const int size = read_be32(data_ptr2[tile_row][tile_col - 1]);
+ data_ptr2[tile_row][tile_col - 1] += 4;
+ data_ptr2[tile_row][tile_col] =
+ data_ptr2[tile_row][tile_col - 1] + size;
+ }
+ }
+
+ for (tile_row = 0; tile_row < pc->tile_rows; tile_row++) {
+ vp9_get_tile_row_offsets(pc, tile_row);
+ for (tile_col = n_cols - 1; tile_col >= 0; tile_col--) {
+ vp9_get_tile_col_offsets(pc, tile_col);
+ setup_token_decoder(pbi, data_ptr2[tile_row][tile_col],
+ data_end - data_ptr2[tile_row][tile_col],
+ residual_bc);
+ decode_tile(pbi, residual_bc);
+ if (tile_row == pc->tile_rows - 1 && tile_col == n_cols - 1)
+ bc_bak = *residual_bc;
+ }
+ }
+ *residual_bc = bc_bak;
+ } else {
+ int has_more;
+
+ for (tile_row = 0; tile_row < pc->tile_rows; tile_row++) {
+ vp9_get_tile_row_offsets(pc, tile_row);
+ for (tile_col = 0; tile_col < pc->tile_columns; tile_col++) {
+ size_t size;
+
+ vp9_get_tile_col_offsets(pc, tile_col);
+
+ has_more = tile_col < pc->tile_columns - 1 ||
+ tile_row < pc->tile_rows - 1;
+ if (has_more) {
+ if (!read_is_valid(data_ptr, 4, data_end))
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Truncated packet or corrupt tile length");
+
+ size = read_be32(data_ptr);
+ data_ptr += 4;
+ } else {
+ size = data_end - data_ptr;
+ }
+
+ setup_token_decoder(pbi, data_ptr, size, residual_bc);
+ decode_tile(pbi, residual_bc);
+ data_ptr += size;
+ }
+ }
+ }
+}
+
+static void check_sync_code(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
+ if (vp9_rb_read_literal(rb, 8) != SYNC_CODE_0 ||
+ vp9_rb_read_literal(rb, 8) != SYNC_CODE_1 ||
+ vp9_rb_read_literal(rb, 8) != SYNC_CODE_2) {
+ vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ "Invalid frame sync code");
+ }
+}
+
+static void error_handler(void *data, size_t bit_offset) {
+ VP9_COMMON *const cm = (VP9_COMMON *)data;
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet");
+}
+
+static void setup_inter_inter(VP9_COMMON *cm) {
+ int i;
+
+ cm->allow_comp_inter_inter = 0;
+ for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) {
+ cm->allow_comp_inter_inter |= i > 0 &&
+ cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1];
+ }
+
+ if (cm->allow_comp_inter_inter) {
+ // which one is always-on in comp inter-inter?
+ if (cm->ref_frame_sign_bias[LAST_FRAME] ==
+ cm->ref_frame_sign_bias[GOLDEN_FRAME]) {
+ cm->comp_fixed_ref = ALTREF_FRAME;
+ cm->comp_var_ref[0] = LAST_FRAME;
+ cm->comp_var_ref[1] = GOLDEN_FRAME;
+ } else if (cm->ref_frame_sign_bias[LAST_FRAME] ==
+ cm->ref_frame_sign_bias[ALTREF_FRAME]) {
+ cm->comp_fixed_ref = GOLDEN_FRAME;
+ cm->comp_var_ref[0] = LAST_FRAME;
+ cm->comp_var_ref[1] = ALTREF_FRAME;
+ } else {
+ cm->comp_fixed_ref = LAST_FRAME;
+ cm->comp_var_ref[0] = GOLDEN_FRAME;
+ cm->comp_var_ref[1] = ALTREF_FRAME;
+ }
+ }
+}
+
+#define RESERVED \
+ if (vp9_rb_read_bit(rb)) \
+ vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, \
+ "Reserved bit must be unset")
+
+static size_t read_uncompressed_header(VP9D_COMP *pbi,
+ struct vp9_read_bit_buffer *rb) {
+ VP9_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ int i;
+
+ cm->last_frame_type = cm->frame_type;
+
+ if (vp9_rb_read_literal(rb, 2) != 0x2)
+ vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ "Invalid frame marker");
+
+ cm->version = vp9_rb_read_bit(rb);
+ RESERVED;
+
+ if (vp9_rb_read_bit(rb)) {
+ // show an existing frame directly
+ int frame_to_show = cm->ref_frame_map[vp9_rb_read_literal(rb, 3)];
+ ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->new_fb_idx, frame_to_show);
+ pbi->refresh_frame_flags = 0;
+ cm->filter_level = 0;
+ return 0;
+ }
+
+ cm->frame_type = (FRAME_TYPE) vp9_rb_read_bit(rb);
+ cm->show_frame = vp9_rb_read_bit(rb);
+ cm->error_resilient_mode = vp9_rb_read_bit(rb);
+
+ if (cm->frame_type == KEY_FRAME) {
+ int csp;
+
+ check_sync_code(cm, rb);
+
+ csp = vp9_rb_read_literal(rb, 3); // colorspace
+ if (csp != 7) { // != sRGB
+ vp9_rb_read_bit(rb); // [16,235] (including xvycc) vs [0,255] range
+ if (cm->version == 1) {
+ cm->subsampling_x = vp9_rb_read_bit(rb);
+ cm->subsampling_y = vp9_rb_read_bit(rb);
+ vp9_rb_read_bit(rb); // has extra plane
+ } else {
+ cm->subsampling_y = cm->subsampling_x = 1;
+ }
+ } else {
+ if (cm->version == 1) {
+ cm->subsampling_y = cm->subsampling_x = 0;
+ vp9_rb_read_bit(rb); // has extra plane
+ } else {
+ vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ "RGB not supported in profile 0");
+ }
+ }
+
+ pbi->refresh_frame_flags = (1 << NUM_REF_FRAMES) - 1;
+
+ for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i)
+ cm->active_ref_idx[i] = cm->new_fb_idx;
+
+ setup_frame_size(pbi, rb);
+ } else {
+ cm->intra_only = cm->show_frame ? 0 : vp9_rb_read_bit(rb);
+
+ cm->reset_frame_context = cm->error_resilient_mode ?
+ 0 : vp9_rb_read_literal(rb, 2);
+
+ if (cm->intra_only) {
+ check_sync_code(cm, rb);
+
+ pbi->refresh_frame_flags = vp9_rb_read_literal(rb, NUM_REF_FRAMES);
+ setup_frame_size(pbi, rb);
+ } else {
+ pbi->refresh_frame_flags = vp9_rb_read_literal(rb, NUM_REF_FRAMES);
+
+ for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) {
+ const int ref = vp9_rb_read_literal(rb, NUM_REF_FRAMES_LG2);
+ cm->active_ref_idx[i] = cm->ref_frame_map[ref];
+ cm->ref_frame_sign_bias[LAST_FRAME + i] = vp9_rb_read_bit(rb);
+ }
+
+ setup_frame_size_with_refs(pbi, rb);
+
+ xd->allow_high_precision_mv = vp9_rb_read_bit(rb);
+ cm->mcomp_filter_type = read_interp_filter_type(rb);
+
+ for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i)
+ vp9_setup_scale_factors(cm, i);
+
+ setup_inter_inter(cm);
+ }
+ }
+
+ if (!cm->error_resilient_mode) {
+ cm->refresh_frame_context = vp9_rb_read_bit(rb);
+ cm->frame_parallel_decoding_mode = vp9_rb_read_bit(rb);
+ } else {
+ cm->refresh_frame_context = 0;
+ cm->frame_parallel_decoding_mode = 1;
+ }
+
+ cm->frame_context_idx = vp9_rb_read_literal(rb, NUM_FRAME_CONTEXTS_LG2);
+
+ if (cm->frame_type == KEY_FRAME || cm->error_resilient_mode || cm->intra_only)
+ vp9_setup_past_independence(cm, xd);
+
+ setup_loopfilter(pbi, rb);
+ setup_quantization(pbi, rb);
+ setup_segmentation(pbi, rb);
+
+ setup_tile_info(cm, rb);
+
+ return vp9_rb_read_literal(rb, 16);
+}
+
+int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
+ int i;
+ vp9_reader header_bc, residual_bc;
+ VP9_COMMON *const pc = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+
+ const uint8_t *data = pbi->source;
+ const uint8_t *data_end = pbi->source + pbi->source_sz;
+
+ struct vp9_read_bit_buffer rb = { data, data_end, 0,
+ pc, error_handler };
+ const size_t first_partition_size = read_uncompressed_header(pbi, &rb);
+ const int keyframe = pc->frame_type == KEY_FRAME;
+ YV12_BUFFER_CONFIG *new_fb = &pc->yv12_fb[pc->new_fb_idx];
+
+ if (!first_partition_size) {
+ // showing a frame directly
+ *p_data_end = data + 1;
+ return 0;
+ }
+ data += vp9_rb_bytes_read(&rb);
+ xd->corrupted = 0;
+ new_fb->corrupted = 0;
+
+ if (!pbi->decoded_key_frame && !keyframe)
+ return -1;
+
+ if (!read_is_valid(data, first_partition_size, data_end))
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Truncated packet or corrupt header length");
+
+ xd->mode_info_context = pc->mi;
+ xd->prev_mode_info_context = pc->prev_mi;
+ xd->frame_type = pc->frame_type;
+ xd->mode_info_stride = pc->mode_info_stride;
+
+ if (vp9_reader_init(&header_bc, data, first_partition_size))
+ vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate bool decoder 0");
+
+ mb_init_dequantizer(pc, &pbi->mb); // MB level dequantizer setup
+
+ if (!keyframe)
+ vp9_setup_interp_filters(xd, pc->mcomp_filter_type, pc);
+
+ pc->fc = pc->frame_contexts[pc->frame_context_idx];
+
+ update_frame_context(&pc->fc);
+
+ setup_txfm_mode(pc, xd->lossless, &header_bc);
+
+ read_coef_probs(pbi, &header_bc);
+
+ // Initialize xd pointers. Any reference should do for xd->pre, so use 0.
+ setup_pre_planes(xd, &pc->yv12_fb[pc->active_ref_idx[0]], NULL,
+ 0, 0, NULL, NULL);
+ setup_dst_planes(xd, new_fb, 0, 0);
+
+ // Create the segmentation map structure and set to 0
+ if (!pc->last_frame_seg_map)
+ CHECK_MEM_ERROR(pc->last_frame_seg_map,
+ vpx_calloc((pc->mi_rows * pc->mi_cols), 1));
+
+ vp9_setup_block_dptrs(xd, pc->subsampling_x, pc->subsampling_y);
+
+ // clear out the coeff buffer
+ for (i = 0; i < MAX_MB_PLANE; ++i)
+ vp9_zero(xd->plane[i].qcoeff);
+
+ set_prev_mi(pc);
+
+ vp9_decode_mode_mvs_init(pbi, &header_bc);
+
+ decode_tiles(pbi, data, first_partition_size, &residual_bc);
+
+ pc->last_width = pc->width;
+ pc->last_height = pc->height;
+
+ new_fb->corrupted = vp9_reader_has_error(&header_bc) | xd->corrupted;
+
+ if (!pbi->decoded_key_frame) {
+ if (keyframe && !new_fb->corrupted)
+ pbi->decoded_key_frame = 1;
+ else
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "A stream must start with a complete key frame");
+ }
+
+ // Adaptation
+ if (!pc->error_resilient_mode && !pc->frame_parallel_decoding_mode) {
+ vp9_adapt_coef_probs(pc);
+
+ if ((!keyframe) && (!pc->intra_only)) {
+ vp9_adapt_mode_probs(pc);
+ vp9_adapt_mode_context(pc);
+ vp9_adapt_nmv_probs(pc, xd->allow_high_precision_mv);
+ }
+ }
+
+ if (pc->refresh_frame_context)
+ pc->frame_contexts[pc->frame_context_idx] = pc->fc;
+
+ *p_data_end = vp9_reader_find_end(&residual_bc);
+ return 0;
+}
diff --git a/libvpx/vp9/decoder/vp9_decodframe.h b/libvpx/vp9/decoder/vp9_decodframe.h
new file mode 100644
index 0000000..66e951d
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_decodframe.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_DECODER_VP9_DECODFRAME_H_
+#define VP9_DECODER_VP9_DECODFRAME_H_
+
+struct VP9Common;
+struct VP9Decompressor;
+
+void vp9_init_dequantizer(struct VP9Common *pc);
+int vp9_decode_frame(struct VP9Decompressor *cpi, const uint8_t **p_data_end);
+vp9_prob vp9_read_prob_diff_update(vp9_reader *r, int oldp);
+
+#endif // VP9_DECODER_VP9_DECODFRAME_H_
diff --git a/libvpx/vp9/decoder/vp9_detokenize.c b/libvpx/vp9/decoder/vp9_detokenize.c
new file mode 100644
index 0000000..3bbb212
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_detokenize.c
@@ -0,0 +1,347 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/mem.h"
+
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_seg_common.h"
+
+#include "vp9/decoder/vp9_detokenize.h"
+#include "vp9/decoder/vp9_onyxd_int.h"
+
+#if CONFIG_BALANCED_COEFTREE
+#define ZERO_CONTEXT_NODE 0
+#define EOB_CONTEXT_NODE 1
+#else
+#define EOB_CONTEXT_NODE 0
+#define ZERO_CONTEXT_NODE 1
+#endif
+
+#define ONE_CONTEXT_NODE 2
+#define LOW_VAL_CONTEXT_NODE 3
+#define TWO_CONTEXT_NODE 4
+#define THREE_CONTEXT_NODE 5
+#define HIGH_LOW_CONTEXT_NODE 6
+#define CAT_ONE_CONTEXT_NODE 7
+#define CAT_THREEFOUR_CONTEXT_NODE 8
+#define CAT_THREE_CONTEXT_NODE 9
+#define CAT_FIVE_CONTEXT_NODE 10
+
+#define CAT1_MIN_VAL 5
+#define CAT2_MIN_VAL 7
+#define CAT3_MIN_VAL 11
+#define CAT4_MIN_VAL 19
+#define CAT5_MIN_VAL 35
+#define CAT6_MIN_VAL 67
+#define CAT1_PROB0 159
+#define CAT2_PROB0 145
+#define CAT2_PROB1 165
+
+#define CAT3_PROB0 140
+#define CAT3_PROB1 148
+#define CAT3_PROB2 173
+
+#define CAT4_PROB0 135
+#define CAT4_PROB1 140
+#define CAT4_PROB2 155
+#define CAT4_PROB3 176
+
+#define CAT5_PROB0 130
+#define CAT5_PROB1 134
+#define CAT5_PROB2 141
+#define CAT5_PROB3 157
+#define CAT5_PROB4 180
+
+static const vp9_prob cat6_prob[15] = {
+ 254, 254, 254, 252, 249, 243, 230, 196, 177, 153, 140, 133, 130, 129, 0
+};
+
+DECLARE_ALIGNED(16, extern const uint8_t,
+ vp9_pt_energy_class[MAX_ENTROPY_TOKENS]);
+#define INCREMENT_COUNT(token) \
+ do { \
+ coef_counts[type][ref][band][pt] \
+ [token >= TWO_TOKEN ? \
+ (token == DCT_EOB_TOKEN ? DCT_EOB_MODEL_TOKEN : TWO_TOKEN) : \
+ token]++; \
+ token_cache[scan[c]] = vp9_pt_energy_class[token]; \
+ } while (0)
+
+#define WRITE_COEF_CONTINUE(val, token) \
+ { \
+ qcoeff_ptr[scan[c]] = vp9_read_and_apply_sign(r, val) * \
+ dq[c > 0] / (1 + (txfm_size == TX_32X32)); \
+ INCREMENT_COUNT(token); \
+ c++; \
+ continue; \
+ }
+
+#define ADJUST_COEF(prob, bits_count) \
+ do { \
+ if (vp9_read(r, prob)) \
+ val += 1 << bits_count; \
+ } while (0);
+
+static int decode_coefs(FRAME_CONTEXT *fc, const MACROBLOCKD *xd,
+ vp9_reader *r, int block_idx,
+ PLANE_TYPE type, int seg_eob, int16_t *qcoeff_ptr,
+ TX_SIZE txfm_size, const int16_t *dq,
+ ENTROPY_CONTEXT *A, ENTROPY_CONTEXT *L) {
+ ENTROPY_CONTEXT above_ec, left_ec;
+ int pt, c = 0, pad, default_eob;
+ int band;
+ vp9_prob (*coef_probs)[PREV_COEF_CONTEXTS][UNCONSTRAINED_NODES];
+ vp9_prob coef_probs_full[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
+ uint8_t load_map[COEF_BANDS][PREV_COEF_CONTEXTS] = {
+ {0, 0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0, 0},
+ };
+
+ vp9_prob *prob;
+ vp9_coeff_count_model *coef_counts;
+ const int ref = xd->mode_info_context->mbmi.ref_frame[0] != INTRA_FRAME;
+ TX_TYPE tx_type = DCT_DCT;
+ const int *scan, *nb;
+ uint8_t token_cache[1024];
+ const uint8_t * band_translate;
+#if CONFIG_BALANCED_COEFTREE
+ int skip_eob_node = 0;
+#endif
+
+ coef_probs = fc->coef_probs[txfm_size][type][ref];
+ coef_counts = fc->coef_counts[txfm_size];
+ switch (txfm_size) {
+ default:
+ case TX_4X4: {
+ tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
+ get_tx_type_4x4(xd, block_idx) : DCT_DCT;
+ scan = get_scan_4x4(tx_type);
+ above_ec = A[0] != 0;
+ left_ec = L[0] != 0;
+ default_eob = 16;
+ band_translate = vp9_coefband_trans_4x4;
+ break;
+ }
+ case TX_8X8: {
+ const BLOCK_SIZE_TYPE sb_type = xd->mode_info_context->mbmi.sb_type;
+ const int sz = 1 + b_width_log2(sb_type);
+ const int x = block_idx & ((1 << sz) - 1);
+ const int y = block_idx - x;
+ tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
+ get_tx_type_8x8(xd, y + (x >> 1)) : DCT_DCT;
+ scan = get_scan_8x8(tx_type);
+ above_ec = (A[0] + A[1]) != 0;
+ left_ec = (L[0] + L[1]) != 0;
+ default_eob = 64;
+ band_translate = vp9_coefband_trans_8x8plus;
+ break;
+ }
+ case TX_16X16: {
+ const BLOCK_SIZE_TYPE sb_type = xd->mode_info_context->mbmi.sb_type;
+ const int sz = 2 + b_width_log2(sb_type);
+ const int x = block_idx & ((1 << sz) - 1);
+ const int y = block_idx - x;
+ tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
+ get_tx_type_16x16(xd, y + (x >> 2)) : DCT_DCT;
+ scan = get_scan_16x16(tx_type);
+ above_ec = (A[0] + A[1] + A[2] + A[3]) != 0;
+ left_ec = (L[0] + L[1] + L[2] + L[3]) != 0;
+ default_eob = 256;
+ band_translate = vp9_coefband_trans_8x8plus;
+ break;
+ }
+ case TX_32X32:
+ scan = vp9_default_scan_32x32;
+ above_ec = (A[0] + A[1] + A[2] + A[3] + A[4] + A[5] + A[6] + A[7]) != 0;
+ left_ec = (L[0] + L[1] + L[2] + L[3] + L[4] + L[5] + L[6] + L[7]) != 0;
+ default_eob = 1024;
+ band_translate = vp9_coefband_trans_8x8plus;
+ break;
+ }
+
+ pt = combine_entropy_contexts(above_ec, left_ec);
+ nb = vp9_get_coef_neighbors_handle(scan, &pad);
+
+ while (1) {
+ int val;
+ const uint8_t *cat6 = cat6_prob;
+ if (c >= seg_eob)
+ break;
+ if (c)
+ pt = vp9_get_coef_context(scan, nb, pad, token_cache,
+ c, default_eob);
+ band = get_coef_band(band_translate, c);
+ prob = coef_probs[band][pt];
+#if !CONFIG_BALANCED_COEFTREE
+ fc->eob_branch_counts[txfm_size][type][ref][band][pt]++;
+ if (!vp9_read(r, prob[EOB_CONTEXT_NODE]))
+ break;
+
+SKIP_START:
+#endif
+ if (c >= seg_eob)
+ break;
+ if (c)
+ pt = vp9_get_coef_context(scan, nb, pad, token_cache,
+ c, default_eob);
+ band = get_coef_band(band_translate, c);
+ prob = coef_probs[band][pt];
+
+ if (!vp9_read(r, prob[ZERO_CONTEXT_NODE])) {
+ INCREMENT_COUNT(ZERO_TOKEN);
+ ++c;
+#if CONFIG_BALANCED_COEFTREE
+ skip_eob_node = 1;
+ continue;
+#else
+ goto SKIP_START;
+#endif
+ }
+#if CONFIG_BALANCED_COEFTREE
+ if (!skip_eob_node) {
+ fc->eob_branch_counts[txfm_size][type][ref][band][pt]++;
+ if (!vp9_read(r, prob[EOB_CONTEXT_NODE]))
+ break;
+ }
+ skip_eob_node = 0;
+#endif
+
+ // ONE_CONTEXT_NODE_0_
+ if (!vp9_read(r, prob[ONE_CONTEXT_NODE])) {
+ WRITE_COEF_CONTINUE(1, ONE_TOKEN);
+ }
+ // Load full probabilities if not already loaded
+ if (!load_map[band][pt]) {
+ vp9_model_to_full_probs(coef_probs[band][pt],
+ coef_probs_full[band][pt]);
+ load_map[band][pt] = 1;
+ }
+ prob = coef_probs_full[band][pt];
+ // LOW_VAL_CONTEXT_NODE_0_
+ if (!vp9_read(r, prob[LOW_VAL_CONTEXT_NODE])) {
+ if (!vp9_read(r, prob[TWO_CONTEXT_NODE])) {
+ WRITE_COEF_CONTINUE(2, TWO_TOKEN);
+ }
+ if (!vp9_read(r, prob[THREE_CONTEXT_NODE])) {
+ WRITE_COEF_CONTINUE(3, THREE_TOKEN);
+ }
+ WRITE_COEF_CONTINUE(4, FOUR_TOKEN);
+ }
+ // HIGH_LOW_CONTEXT_NODE_0_
+ if (!vp9_read(r, prob[HIGH_LOW_CONTEXT_NODE])) {
+ if (!vp9_read(r, prob[CAT_ONE_CONTEXT_NODE])) {
+ val = CAT1_MIN_VAL;
+ ADJUST_COEF(CAT1_PROB0, 0);
+ WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY1);
+ }
+ val = CAT2_MIN_VAL;
+ ADJUST_COEF(CAT2_PROB1, 1);
+ ADJUST_COEF(CAT2_PROB0, 0);
+ WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY2);
+ }
+ // CAT_THREEFOUR_CONTEXT_NODE_0_
+ if (!vp9_read(r, prob[CAT_THREEFOUR_CONTEXT_NODE])) {
+ if (!vp9_read(r, prob[CAT_THREE_CONTEXT_NODE])) {
+ val = CAT3_MIN_VAL;
+ ADJUST_COEF(CAT3_PROB2, 2);
+ ADJUST_COEF(CAT3_PROB1, 1);
+ ADJUST_COEF(CAT3_PROB0, 0);
+ WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY3);
+ }
+ val = CAT4_MIN_VAL;
+ ADJUST_COEF(CAT4_PROB3, 3);
+ ADJUST_COEF(CAT4_PROB2, 2);
+ ADJUST_COEF(CAT4_PROB1, 1);
+ ADJUST_COEF(CAT4_PROB0, 0);
+ WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY4);
+ }
+ // CAT_FIVE_CONTEXT_NODE_0_:
+ if (!vp9_read(r, prob[CAT_FIVE_CONTEXT_NODE])) {
+ val = CAT5_MIN_VAL;
+ ADJUST_COEF(CAT5_PROB4, 4);
+ ADJUST_COEF(CAT5_PROB3, 3);
+ ADJUST_COEF(CAT5_PROB2, 2);
+ ADJUST_COEF(CAT5_PROB1, 1);
+ ADJUST_COEF(CAT5_PROB0, 0);
+ WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY5);
+ }
+ val = 0;
+ while (*cat6) {
+ val = (val << 1) | vp9_read(r, *cat6++);
+ }
+ val += CAT6_MIN_VAL;
+ WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY6);
+ }
+
+ if (c < seg_eob)
+ coef_counts[type][ref][band][pt][DCT_EOB_MODEL_TOKEN]++;
+
+
+ return c;
+}
+
+static int get_eob(MACROBLOCKD* const xd, int segment_id, int eob_max) {
+ return vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP) ? 0 : eob_max;
+}
+
+struct decode_block_args {
+ VP9D_COMP *pbi;
+ vp9_reader *r;
+ int *eobtotal;
+};
+
+static void decode_block(int plane, int block,
+ BLOCK_SIZE_TYPE bsize,
+ int ss_txfrm_size,
+ void *argv) {
+ const struct decode_block_args* const arg = argv;
+ const int bw = b_width_log2(bsize);
+
+ // find the maximum eob for this transform size, adjusted by segment
+ MACROBLOCKD *xd = &arg->pbi->mb;
+ struct macroblockd_plane* pd = &xd->plane[plane];
+ const int segment_id = xd->mode_info_context->mbmi.segment_id;
+ const TX_SIZE ss_tx_size = ss_txfrm_size / 2;
+ const int seg_eob = get_eob(xd, segment_id, 16 << ss_txfrm_size);
+ const int off = block >> ss_txfrm_size;
+ const int mod = bw - ss_tx_size - pd->subsampling_x;
+ const int aoff = (off & ((1 << mod) - 1)) << ss_tx_size;
+ const int loff = (off >> mod) << ss_tx_size;
+
+ ENTROPY_CONTEXT *A = pd->above_context + aoff;
+ ENTROPY_CONTEXT *L = pd->left_context + loff;
+ const int eob = decode_coefs(&arg->pbi->common.fc, xd, arg->r, block,
+ pd->plane_type, seg_eob,
+ BLOCK_OFFSET(pd->qcoeff, block, 16),
+ ss_tx_size, pd->dequant, A, L);
+
+ if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0) {
+ set_contexts_on_border(xd, bsize, plane, ss_tx_size, eob, aoff, loff, A, L);
+ } else {
+ int pt;
+ for (pt = 0; pt < (1 << ss_tx_size); pt++)
+ A[pt] = L[pt] = eob > 0;
+ }
+ pd->eobs[block] = eob;
+ *arg->eobtotal += eob;
+}
+
+int vp9_decode_tokens(VP9D_COMP *pbi, vp9_reader *r, BLOCK_SIZE_TYPE bsize) {
+ int eobtotal = 0;
+ struct decode_block_args args = {pbi, r, &eobtotal};
+ foreach_transformed_block(&pbi->mb, bsize, decode_block, &args);
+ return eobtotal;
+}
diff --git a/libvpx/vp9/decoder/vp9_detokenize.h b/libvpx/vp9/decoder/vp9_detokenize.h
new file mode 100644
index 0000000..d46b596
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_detokenize.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_DECODER_VP9_DETOKENIZE_H_
+#define VP9_DECODER_VP9_DETOKENIZE_H_
+
+#include "vp9/decoder/vp9_onyxd_int.h"
+
+int vp9_decode_tokens(VP9D_COMP* pbi, vp9_reader *r, BLOCK_SIZE_TYPE bsize);
+
+#endif // VP9_DECODER_VP9_DETOKENIZE_H_
diff --git a/libvpx/vp9/decoder/vp9_idct_blk.c b/libvpx/vp9/decoder/vp9_idct_blk.c
new file mode 100644
index 0000000..c52963c
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_idct_blk.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp9_rtcd.h"
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/decoder/vp9_idct_blk.h"
+
+static void add_constant_residual(const int16_t diff, uint8_t *dest, int stride,
+ int width, int height) {
+ int r, c;
+
+ for (r = 0; r < height; r++) {
+ for (c = 0; c < width; c++)
+ dest[c] = clip_pixel(diff + dest[c]);
+
+ dest += stride;
+ }
+}
+
+void vp9_add_constant_residual_8x8_c(const int16_t diff, uint8_t *dest,
+ int stride) {
+ add_constant_residual(diff, dest, stride, 8, 8);
+}
+
+void vp9_add_constant_residual_16x16_c(const int16_t diff, uint8_t *dest,
+ int stride) {
+ add_constant_residual(diff, dest, stride, 16, 16);
+}
+
+void vp9_add_constant_residual_32x32_c(const int16_t diff, uint8_t *dest,
+ int stride) {
+ add_constant_residual(diff, dest, stride, 32, 32);
+}
+
+void vp9_iht_add_c(TX_TYPE tx_type, int16_t *input, uint8_t *dest, int stride,
+ int eob) {
+ if (tx_type == DCT_DCT) {
+ vp9_idct_add(input, dest, stride, eob);
+ } else {
+ vp9_short_iht4x4_add(input, dest, stride, tx_type);
+ vpx_memset(input, 0, 32);
+ }
+}
+
+void vp9_iht_add_8x8_c(TX_TYPE tx_type, int16_t *input, uint8_t *dest,
+ int stride, int eob) {
+ if (tx_type == DCT_DCT) {
+ vp9_idct_add_8x8(input, dest, stride, eob);
+ } else {
+ if (eob > 0) {
+ vp9_short_iht8x8_add(input, dest, stride, tx_type);
+ vpx_memset(input, 0, 128);
+ }
+ }
+}
+
+void vp9_idct_add_c(int16_t *input, uint8_t *dest, int stride, int eob) {
+ if (eob > 1) {
+ vp9_short_idct4x4_add(input, dest, stride);
+ vpx_memset(input, 0, 32);
+ } else {
+ vp9_dc_only_idct_add(input[0], dest, dest, stride, stride);
+ ((int *)input)[0] = 0;
+ }
+}
+
+void vp9_idct_add_lossless_c(int16_t *input, uint8_t *dest, int stride,
+ int eob) {
+ if (eob > 1) {
+ vp9_short_iwalsh4x4_add(input, dest, stride);
+ vpx_memset(input, 0, 32);
+ } else {
+ vp9_short_iwalsh4x4_1_add_c(input, dest, stride);
+ ((int *)input)[0] = 0;
+ }
+}
+
+void vp9_idct_add_8x8_c(int16_t *input, uint8_t *dest, int stride, int eob) {
+ // If dc is 1, then input[0] is the reconstructed value, do not need
+ // dequantization. Also, when dc is 1, dc is counted in eobs, namely eobs >=1.
+
+ // The calculation can be simplified if there are not many non-zero dct
+ // coefficients. Use eobs to decide what to do.
+ // TODO(yunqingwang): "eobs = 1" case is also handled in vp9_short_idct8x8_c.
+ // Combine that with code here.
+ if (eob) {
+ if (eob == 1) {
+ // DC only DCT coefficient
+ int16_t in = input[0];
+ int16_t out;
+
+ // Note: the idct1 will need to be modified accordingly whenever
+ // vp9_short_idct8x8_c() is modified.
+ vp9_short_idct1_8x8_c(&in, &out);
+ input[0] = 0;
+
+ vp9_add_constant_residual_8x8(out, dest, stride);
+ } else {
+ vp9_short_idct8x8_add(input, dest, stride);
+ vpx_memset(input, 0, 128);
+ }
+ }
+}
+
+void vp9_iht_add_16x16_c(TX_TYPE tx_type, int16_t *input, uint8_t *dest,
+ int stride, int eob) {
+ if (tx_type == DCT_DCT) {
+ vp9_idct_add_16x16(input, dest, stride, eob);
+ } else {
+ if (eob > 0) {
+ vp9_short_iht16x16_add(input, dest, stride, tx_type);
+ vpx_memset(input, 0, 512);
+ }
+ }
+}
+
+void vp9_idct_add_16x16_c(int16_t *input, uint8_t *dest, int stride, int eob) {
+ /* The calculation can be simplified if there are not many non-zero dct
+ * coefficients. Use eobs to separate different cases. */
+ if (eob) {
+ if (eob == 1) {
+ /* DC only DCT coefficient. */
+ int16_t in = input[0];
+ int16_t out;
+ /* Note: the idct1 will need to be modified accordingly whenever
+ * vp9_short_idct16x16() is modified. */
+ vp9_short_idct1_16x16_c(&in, &out);
+ input[0] = 0;
+
+ vp9_add_constant_residual_16x16(out, dest, stride);
+ } else {
+ vp9_short_idct16x16_add(input, dest, stride);
+ vpx_memset(input, 0, 512);
+ }
+ }
+}
+
+void vp9_idct_add_32x32_c(int16_t *input, uint8_t *dest, int stride, int eob) {
+ DECLARE_ALIGNED_ARRAY(16, int16_t, output, 1024);
+
+ if (eob) {
+ if (eob == 1) {
+ vp9_short_idct1_32x32(input, output);
+ vp9_add_constant_residual_32x32(output[0], dest, stride);
+ input[0] = 0;
+ } else {
+ vp9_short_idct32x32_add(input, dest, stride);
+ vpx_memset(input, 0, 2048);
+ }
+ }
+}
+
diff --git a/libvpx/vp9/decoder/vp9_idct_blk.h b/libvpx/vp9/decoder/vp9_idct_blk.h
new file mode 100644
index 0000000..1810bd0
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_idct_blk.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_DECODER_VP9_IDCT_BLK_H_
+#define VP9_DECODER_VP9_IDCT_BLK_H_
+
+#include "vp9/common/vp9_blockd.h"
+
+
+void vp9_idct_add_lossless_c(int16_t *input, unsigned char *dest, int stride,
+ int eob);
+
+void vp9_iht_add_c(TX_TYPE tx_type, int16_t *input, unsigned char *dest,
+ int stride, int eob);
+
+void vp9_iht_add_8x8_c(TX_TYPE tx_type, int16_t *input, unsigned char *dest,
+ int stride, int eob);
+
+void vp9_iht_add_16x16_c(TX_TYPE tx_type, int16_t *input, unsigned char *dest,
+ int stride, int eob);
+
+#endif // VP9_DECODER_VP9_IDCT_BLK_H_
diff --git a/libvpx/vp9/decoder/vp9_onyxd.h b/libvpx/vp9/decoder/vp9_onyxd.h
new file mode 100644
index 0000000..cd5b750
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_onyxd.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_ONYXD_H_
+#define VP9_COMMON_VP9_ONYXD_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "vpx_scale/yv12config.h"
+#include "vp9/common/vp9_ppflags.h"
+#include "vpx/vpx_codec.h"
+
+typedef void *VP9D_PTR;
+
+typedef struct {
+ int width;
+ int height;
+ int version;
+ int postprocess;
+ int max_threads;
+ int inv_tile_order;
+ int input_partition;
+} VP9D_CONFIG;
+
+typedef enum {
+ VP9_LAST_FLAG = 1,
+ VP9_GOLD_FLAG = 2,
+ VP9_ALT_FLAG = 4
+} VP9_REFFRAME;
+
+void vp9_initialize_dec();
+
+int vp9_receive_compressed_data(VP9D_PTR comp,
+ uint64_t size, const uint8_t **dest,
+ int64_t time_stamp);
+
+int vp9_get_raw_frame(VP9D_PTR comp, YV12_BUFFER_CONFIG *sd,
+ int64_t *time_stamp, int64_t *time_end_stamp,
+ vp9_ppflags_t *flags);
+
+vpx_codec_err_t vp9_copy_reference_dec(VP9D_PTR comp,
+ VP9_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd);
+
+vpx_codec_err_t vp9_set_reference_dec(VP9D_PTR comp,
+ VP9_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd);
+
+int vp9_get_reference_dec(VP9D_PTR ptr, int index, YV12_BUFFER_CONFIG **fb);
+
+
+VP9D_PTR vp9_create_decompressor(VP9D_CONFIG *oxcf);
+
+void vp9_remove_decompressor(VP9D_PTR comp);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // VP9_COMMON_VP9_ONYXD_H_
diff --git a/libvpx/vp9/decoder/vp9_onyxd_if.c b/libvpx/vp9/decoder/vp9_onyxd_if.c
new file mode 100644
index 0000000..3cef88b
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_onyxd_if.c
@@ -0,0 +1,433 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include <stdio.h>
+#include <assert.h>
+
+#include "vp9/common/vp9_onyxc_int.h"
+#if CONFIG_POSTPROC
+#include "vp9/common/vp9_postproc.h"
+#endif
+#include "vp9/decoder/vp9_onyxd.h"
+#include "vp9/decoder/vp9_onyxd_int.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp9/common/vp9_alloccommon.h"
+#include "vp9/common/vp9_loopfilter.h"
+#include "vp9/common/vp9_quant_common.h"
+#include "vpx_scale/vpx_scale.h"
+#include "vp9/common/vp9_systemdependent.h"
+#include "vpx_ports/vpx_timer.h"
+#include "vp9/decoder/vp9_decodframe.h"
+#include "vp9/decoder/vp9_detokenize.h"
+#include "./vpx_scale_rtcd.h"
+
+#define WRITE_RECON_BUFFER 0
+#if WRITE_RECON_BUFFER == 1
+static void recon_write_yuv_frame(const char *name,
+ const YV12_BUFFER_CONFIG *s,
+ int w, int _h) {
+ FILE *yuv_file = fopen(name, "ab");
+ const uint8_t *src = s->y_buffer;
+ int h = _h;
+
+ do {
+ fwrite(src, w, 1, yuv_file);
+ src += s->y_stride;
+ } while (--h);
+
+ src = s->u_buffer;
+ h = (_h + 1) >> 1;
+ w = (w + 1) >> 1;
+
+ do {
+ fwrite(src, w, 1, yuv_file);
+ src += s->uv_stride;
+ } while (--h);
+
+ src = s->v_buffer;
+ h = (_h + 1) >> 1;
+
+ do {
+ fwrite(src, w, 1, yuv_file);
+ src += s->uv_stride;
+ } while (--h);
+
+ fclose(yuv_file);
+}
+#endif
+#if WRITE_RECON_BUFFER == 2
+void write_dx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame) {
+
+ // write the frame
+ FILE *yframe;
+ int i;
+ char filename[255];
+
+ sprintf(filename, "dx\\y%04d.raw", this_frame);
+ yframe = fopen(filename, "wb");
+
+ for (i = 0; i < frame->y_height; i++)
+ fwrite(frame->y_buffer + i * frame->y_stride,
+ frame->y_width, 1, yframe);
+
+ fclose(yframe);
+ sprintf(filename, "dx\\u%04d.raw", this_frame);
+ yframe = fopen(filename, "wb");
+
+ for (i = 0; i < frame->uv_height; i++)
+ fwrite(frame->u_buffer + i * frame->uv_stride,
+ frame->uv_width, 1, yframe);
+
+ fclose(yframe);
+ sprintf(filename, "dx\\v%04d.raw", this_frame);
+ yframe = fopen(filename, "wb");
+
+ for (i = 0; i < frame->uv_height; i++)
+ fwrite(frame->v_buffer + i * frame->uv_stride,
+ frame->uv_width, 1, yframe);
+
+ fclose(yframe);
+}
+#endif
+
+void vp9_initialize_dec() {
+ static int init_done = 0;
+
+ if (!init_done) {
+ vp9_initialize_common();
+ vp9_init_quant_tables();
+ init_done = 1;
+ }
+}
+
+VP9D_PTR vp9_create_decompressor(VP9D_CONFIG *oxcf) {
+ VP9D_COMP *const pbi = vpx_memalign(32, sizeof(VP9D_COMP));
+
+ if (!pbi)
+ return NULL;
+
+ vpx_memset(pbi, 0, sizeof(VP9D_COMP));
+
+ if (setjmp(pbi->common.error.jmp)) {
+ pbi->common.error.setjmp = 0;
+ vp9_remove_decompressor(pbi);
+ return NULL;
+ }
+
+ pbi->common.error.setjmp = 1;
+ vp9_initialize_dec();
+
+ vp9_create_common(&pbi->common);
+
+ pbi->oxcf = *oxcf;
+ pbi->common.current_video_frame = 0;
+ pbi->ready_for_new_data = 1;
+
+ // vp9_init_dequantizer() is first called here. Add check in
+ // frame_init_dequantizer() to avoid unnecessary calling of
+ // vp9_init_dequantizer() for every frame.
+ vp9_init_dequantizer(&pbi->common);
+
+ vp9_loop_filter_init(&pbi->common);
+
+ pbi->common.error.setjmp = 0;
+ pbi->decoded_key_frame = 0;
+
+ return pbi;
+}
+
+void vp9_remove_decompressor(VP9D_PTR ptr) {
+ VP9D_COMP *const pbi = (VP9D_COMP *)ptr;
+
+ if (!pbi)
+ return;
+
+ if (pbi->common.last_frame_seg_map)
+ vpx_free(pbi->common.last_frame_seg_map);
+
+ vp9_remove_common(&pbi->common);
+ vpx_free(pbi->mbc);
+ vpx_free(pbi);
+}
+
+static int equal_dimensions(YV12_BUFFER_CONFIG *a, YV12_BUFFER_CONFIG *b) {
+ return a->y_height == b->y_height && a->y_width == b->y_width &&
+ a->uv_height == b->uv_height && a->uv_width == b->uv_width;
+}
+
+vpx_codec_err_t vp9_copy_reference_dec(VP9D_PTR ptr,
+ VP9_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd) {
+ VP9D_COMP *pbi = (VP9D_COMP *) ptr;
+ VP9_COMMON *cm = &pbi->common;
+ int ref_fb_idx;
+
+ /* TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
+ * encoder is using the frame buffers for. This is just a stub to keep the
+ * vpxenc --test-decode functionality working, and will be replaced in a
+ * later commit that adds VP9-specific controls for this functionality.
+ */
+ if (ref_frame_flag == VP9_LAST_FLAG) {
+ ref_fb_idx = pbi->common.ref_frame_map[0];
+ } else {
+ vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR,
+ "Invalid reference frame");
+ return pbi->common.error.error_code;
+ }
+
+ if (!equal_dimensions(&cm->yv12_fb[ref_fb_idx], sd)) {
+ vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR,
+ "Incorrect buffer dimensions");
+ } else {
+ vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd);
+ }
+
+ return pbi->common.error.error_code;
+}
+
+
+vpx_codec_err_t vp9_set_reference_dec(VP9D_PTR ptr, VP9_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd) {
+ VP9D_COMP *pbi = (VP9D_COMP *) ptr;
+ VP9_COMMON *cm = &pbi->common;
+ int *ref_fb_ptr = NULL;
+
+ /* TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
+ * encoder is using the frame buffers for. This is just a stub to keep the
+ * vpxenc --test-decode functionality working, and will be replaced in a
+ * later commit that adds VP9-specific controls for this functionality.
+ */
+ if (ref_frame_flag == VP9_LAST_FLAG)
+ ref_fb_ptr = &pbi->common.active_ref_idx[0];
+ else if (ref_frame_flag == VP9_GOLD_FLAG)
+ ref_fb_ptr = &pbi->common.active_ref_idx[1];
+ else if (ref_frame_flag == VP9_ALT_FLAG)
+ ref_fb_ptr = &pbi->common.active_ref_idx[2];
+ else {
+ vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR,
+ "Invalid reference frame");
+ return pbi->common.error.error_code;
+ }
+
+ if (!equal_dimensions(&cm->yv12_fb[*ref_fb_ptr], sd)) {
+ vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR,
+ "Incorrect buffer dimensions");
+ } else {
+ // Find an empty frame buffer.
+ const int free_fb = get_free_fb(cm);
+ // Decrease fb_idx_ref_cnt since it will be increased again in
+ // ref_cnt_fb() below.
+ cm->fb_idx_ref_cnt[free_fb]--;
+
+ // Manage the reference counters and copy image.
+ ref_cnt_fb(cm->fb_idx_ref_cnt, ref_fb_ptr, free_fb);
+ vp8_yv12_copy_frame(sd, &cm->yv12_fb[*ref_fb_ptr]);
+ }
+
+ return pbi->common.error.error_code;
+}
+
+
+int vp9_get_reference_dec(VP9D_PTR ptr, int index, YV12_BUFFER_CONFIG **fb) {
+ VP9D_COMP *pbi = (VP9D_COMP *) ptr;
+ VP9_COMMON *cm = &pbi->common;
+
+ if (index < 0 || index >= NUM_REF_FRAMES)
+ return -1;
+
+ *fb = &cm->yv12_fb[cm->ref_frame_map[index]];
+ return 0;
+}
+
+/* If any buffer updating is signaled it should be done here. */
+static void swap_frame_buffers(VP9D_COMP *pbi) {
+ int ref_index = 0, mask;
+
+ for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) {
+ if (mask & 1) {
+ ref_cnt_fb(pbi->common.fb_idx_ref_cnt,
+ &pbi->common.ref_frame_map[ref_index],
+ pbi->common.new_fb_idx);
+ }
+ ++ref_index;
+ }
+
+ pbi->common.frame_to_show = &pbi->common.yv12_fb[pbi->common.new_fb_idx];
+ pbi->common.fb_idx_ref_cnt[pbi->common.new_fb_idx]--;
+
+ /* Invalidate these references until the next frame starts. */
+ for (ref_index = 0; ref_index < 3; ref_index++)
+ pbi->common.active_ref_idx[ref_index] = INT_MAX;
+}
+
+int vp9_receive_compressed_data(VP9D_PTR ptr,
+ uint64_t size, const uint8_t **psource,
+ int64_t time_stamp) {
+ VP9D_COMP *pbi = (VP9D_COMP *) ptr;
+ VP9_COMMON *cm = &pbi->common;
+ const uint8_t *source = *psource;
+ int retcode = 0;
+
+ /*if(pbi->ready_for_new_data == 0)
+ return -1;*/
+
+ if (ptr == 0)
+ return -1;
+
+ pbi->common.error.error_code = VPX_CODEC_OK;
+
+ pbi->source = source;
+ pbi->source_sz = size;
+
+ if (pbi->source_sz == 0) {
+ /* This is used to signal that we are missing frames.
+ * We do not know if the missing frame(s) was supposed to update
+ * any of the reference buffers, but we act conservative and
+ * mark only the last buffer as corrupted.
+ *
+ * TODO(jkoleszar): Error concealment is undefined and non-normative
+ * at this point, but if it becomes so, [0] may not always be the correct
+ * thing to do here.
+ */
+ if (cm->active_ref_idx[0] != INT_MAX)
+ cm->yv12_fb[cm->active_ref_idx[0]].corrupted = 1;
+ }
+
+ cm->new_fb_idx = get_free_fb(cm);
+
+ if (setjmp(pbi->common.error.jmp)) {
+ pbi->common.error.setjmp = 0;
+
+ /* We do not know if the missing frame(s) was supposed to update
+ * any of the reference buffers, but we act conservative and
+ * mark only the last buffer as corrupted.
+ *
+ * TODO(jkoleszar): Error concealment is undefined and non-normative
+ * at this point, but if it becomes so, [0] may not always be the correct
+ * thing to do here.
+ */
+ if (cm->active_ref_idx[0] != INT_MAX)
+ cm->yv12_fb[cm->active_ref_idx[0]].corrupted = 1;
+
+ if (cm->fb_idx_ref_cnt[cm->new_fb_idx] > 0)
+ cm->fb_idx_ref_cnt[cm->new_fb_idx]--;
+
+ return -1;
+ }
+
+ pbi->common.error.setjmp = 1;
+
+ retcode = vp9_decode_frame(pbi, psource);
+
+ if (retcode < 0) {
+ pbi->common.error.error_code = VPX_CODEC_ERROR;
+ pbi->common.error.setjmp = 0;
+ if (cm->fb_idx_ref_cnt[cm->new_fb_idx] > 0)
+ cm->fb_idx_ref_cnt[cm->new_fb_idx]--;
+ return retcode;
+ }
+
+ {
+ swap_frame_buffers(pbi);
+
+#if WRITE_RECON_BUFFER == 2
+ if (cm->show_frame)
+ write_dx_frame_to_file(cm->frame_to_show,
+ cm->current_video_frame);
+ else
+ write_dx_frame_to_file(cm->frame_to_show,
+ cm->current_video_frame + 1000);
+#endif
+
+ if (cm->filter_level) {
+ /* Apply the loop filter if appropriate. */
+ vp9_loop_filter_frame(cm, &pbi->mb, cm->filter_level, 0);
+ }
+
+#if WRITE_RECON_BUFFER == 2
+ if (cm->show_frame)
+ write_dx_frame_to_file(cm->frame_to_show,
+ cm->current_video_frame + 2000);
+ else
+ write_dx_frame_to_file(cm->frame_to_show,
+ cm->current_video_frame + 3000);
+#endif
+
+ vp9_extend_frame_borders(cm->frame_to_show,
+ cm->subsampling_x, cm->subsampling_y);
+ }
+
+#if WRITE_RECON_BUFFER == 1
+ if (cm->show_frame)
+ recon_write_yuv_frame("recon.yuv", cm->frame_to_show,
+ cm->width, cm->height);
+#endif
+
+ vp9_clear_system_state();
+
+ cm->last_show_frame = cm->show_frame;
+ if (cm->show_frame) {
+ // current mip will be the prev_mip for the next frame
+ MODE_INFO *temp = cm->prev_mip;
+ cm->prev_mip = cm->mip;
+ cm->mip = temp;
+
+ // update the upper left visible macroblock ptrs
+ cm->mi = cm->mip + cm->mode_info_stride + 1;
+ cm->prev_mi = cm->prev_mip + cm->mode_info_stride + 1;
+
+ cm->current_video_frame++;
+ }
+
+ pbi->ready_for_new_data = 0;
+ pbi->last_time_stamp = time_stamp;
+ pbi->source_sz = 0;
+
+ pbi->common.error.setjmp = 0;
+ return retcode;
+}
+
+int vp9_get_raw_frame(VP9D_PTR ptr, YV12_BUFFER_CONFIG *sd,
+ int64_t *time_stamp, int64_t *time_end_stamp,
+ vp9_ppflags_t *flags) {
+ int ret = -1;
+ VP9D_COMP *pbi = (VP9D_COMP *) ptr;
+
+ if (pbi->ready_for_new_data == 1)
+ return ret;
+
+ /* ie no raw frame to show!!! */
+ if (pbi->common.show_frame == 0)
+ return ret;
+
+ pbi->ready_for_new_data = 1;
+ *time_stamp = pbi->last_time_stamp;
+ *time_end_stamp = 0;
+
+ sd->clrtype = pbi->common.clr_type;
+#if CONFIG_POSTPROC
+ ret = vp9_post_proc_frame(&pbi->common, sd, flags);
+#else
+
+ if (pbi->common.frame_to_show) {
+ *sd = *pbi->common.frame_to_show;
+ sd->y_width = pbi->common.width;
+ sd->y_height = pbi->common.height;
+ sd->uv_height = pbi->common.height / 2;
+ ret = 0;
+ } else {
+ ret = -1;
+ }
+
+#endif /*!CONFIG_POSTPROC*/
+ vp9_clear_system_state();
+ return ret;
+}
diff --git a/libvpx/vp9/decoder/vp9_onyxd_int.h b/libvpx/vp9/decoder/vp9_onyxd_int.h
new file mode 100644
index 0000000..8698570
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_onyxd_int.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_DECODER_VP9_ONYXD_INT_H_
+#define VP9_DECODER_VP9_ONYXD_INT_H_
+#include "./vpx_config.h"
+#include "vp9/decoder/vp9_onyxd.h"
+#include "vp9/decoder/vp9_treereader.h"
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/decoder/vp9_idct_blk.h"
+
+// #define DEC_DEBUG
+
+typedef struct VP9Decompressor {
+ DECLARE_ALIGNED(16, MACROBLOCKD, mb);
+
+ DECLARE_ALIGNED(16, VP9_COMMON, common);
+
+ VP9D_CONFIG oxcf;
+
+ const uint8_t *source;
+ uint32_t source_sz;
+
+ vp9_reader *mbc;
+ int64_t last_time_stamp;
+ int ready_for_new_data;
+
+ int refresh_frame_flags;
+ vp9_prob prob_skip_false;
+
+ int decoded_key_frame;
+
+ int initial_width;
+ int initial_height;
+} VP9D_COMP;
+
+
+#if CONFIG_DEBUG
+#define CHECK_MEM_ERROR(lval,expr) do {\
+ lval = (expr); \
+ if(!lval) \
+ vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR,\
+ "Failed to allocate "#lval" at %s:%d", \
+ __FILE__,__LINE__);\
+ } while(0)
+#else
+#define CHECK_MEM_ERROR(lval,expr) do {\
+ lval = (expr); \
+ if(!lval) \
+ vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR,\
+ "Failed to allocate "#lval);\
+ } while(0)
+#endif
+
+#endif // VP9_DECODER_VP9_TREEREADER_H_
diff --git a/libvpx/vp9/decoder/vp9_read_bit_buffer.h b/libvpx/vp9/decoder/vp9_read_bit_buffer.h
new file mode 100644
index 0000000..f243cb4
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_read_bit_buffer.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_READ_BIT_BUFFER_
+#define VP9_READ_BIT_BUFFER_
+
+#include <limits.h>
+
+#include "vpx/vpx_integer.h"
+
+typedef void (*vp9_rb_error_handler)(void *data, size_t bit_offset);
+
+struct vp9_read_bit_buffer {
+ const uint8_t *bit_buffer;
+ const uint8_t *bit_buffer_end;
+ size_t bit_offset;
+
+ void *error_handler_data;
+ vp9_rb_error_handler error_handler;
+};
+
+static size_t vp9_rb_bytes_read(struct vp9_read_bit_buffer *rb) {
+ return rb->bit_offset / CHAR_BIT + (rb->bit_offset % CHAR_BIT > 0);
+}
+
+static int vp9_rb_read_bit(struct vp9_read_bit_buffer *rb) {
+ const size_t off = rb->bit_offset;
+ const size_t p = off / CHAR_BIT;
+ const int q = CHAR_BIT - 1 - (int)off % CHAR_BIT;
+ if (rb->bit_buffer + p >= rb->bit_buffer_end) {
+ rb->error_handler(rb->error_handler_data, rb->bit_offset);
+ return 0;
+ } else {
+ const int bit = (rb->bit_buffer[p] & (1 << q)) >> q;
+ rb->bit_offset = off + 1;
+ return bit;
+ }
+}
+
+static int vp9_rb_read_literal(struct vp9_read_bit_buffer *rb, int bits) {
+ int value = 0, bit;
+ for (bit = bits - 1; bit >= 0; bit--)
+ value |= vp9_rb_read_bit(rb) << bit;
+ return value;
+}
+
+#endif // VP9_READ_BIT_BUFFER_
diff --git a/libvpx/vp9/decoder/vp9_treereader.h b/libvpx/vp9/decoder/vp9_treereader.h
new file mode 100644
index 0000000..4535688
--- /dev/null
+++ b/libvpx/vp9/decoder/vp9_treereader.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_DECODER_VP9_TREEREADER_H_
+#define VP9_DECODER_VP9_TREEREADER_H_
+
+#include "vp9/common/vp9_treecoder.h"
+#include "vp9/decoder/vp9_dboolhuff.h"
+
+#define vp9_read_prob(r) ((vp9_prob)vp9_read_literal(r, 8))
+#define vp9_read_and_apply_sign(r, value) (vp9_read_bit(r) ? -(value) : (value))
+
+// Intent of tree data structure is to make decoding trivial.
+static int treed_read(vp9_reader *const r, /* !!! must return a 0 or 1 !!! */
+ vp9_tree t,
+ const vp9_prob *const p) {
+ register vp9_tree_index i = 0;
+
+ while ((i = t[ i + vp9_read(r, p[i >> 1])]) > 0);
+
+ return -i;
+}
+
+#endif // VP9_DECODER_VP9_TREEREADER_H_
diff --git a/libvpx/vp9/decoder/x86/vp9_dequantize_sse2.c b/libvpx/vp9/decoder/x86/vp9_dequantize_sse2.c
new file mode 100644
index 0000000..54ec67f
--- /dev/null
+++ b/libvpx/vp9/decoder/x86/vp9_dequantize_sse2.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <emmintrin.h> // SSE2
+#include "./vpx_config.h"
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_idct.h"
+
+void vp9_add_constant_residual_8x8_sse2(const int16_t diff, uint8_t *dest,
+ int stride) {
+ uint8_t abs_diff;
+ __m128i d;
+
+ // Prediction data.
+ __m128i p0 = _mm_loadl_epi64((const __m128i *)(dest + 0 * stride));
+ __m128i p1 = _mm_loadl_epi64((const __m128i *)(dest + 1 * stride));
+ __m128i p2 = _mm_loadl_epi64((const __m128i *)(dest + 2 * stride));
+ __m128i p3 = _mm_loadl_epi64((const __m128i *)(dest + 3 * stride));
+ __m128i p4 = _mm_loadl_epi64((const __m128i *)(dest + 4 * stride));
+ __m128i p5 = _mm_loadl_epi64((const __m128i *)(dest + 5 * stride));
+ __m128i p6 = _mm_loadl_epi64((const __m128i *)(dest + 6 * stride));
+ __m128i p7 = _mm_loadl_epi64((const __m128i *)(dest + 7 * stride));
+
+ p0 = _mm_unpacklo_epi64(p0, p1);
+ p2 = _mm_unpacklo_epi64(p2, p3);
+ p4 = _mm_unpacklo_epi64(p4, p5);
+ p6 = _mm_unpacklo_epi64(p6, p7);
+
+ // Clip diff value to [0, 255] range. Then, do addition or subtraction
+ // according to its sign.
+ if (diff >= 0) {
+ abs_diff = (diff > 255) ? 255 : diff;
+ d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0);
+
+ p0 = _mm_adds_epu8(p0, d);
+ p2 = _mm_adds_epu8(p2, d);
+ p4 = _mm_adds_epu8(p4, d);
+ p6 = _mm_adds_epu8(p6, d);
+ } else {
+ abs_diff = (diff < -255) ? 255 : -diff;
+ d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0);
+
+ p0 = _mm_subs_epu8(p0, d);
+ p2 = _mm_subs_epu8(p2, d);
+ p4 = _mm_subs_epu8(p4, d);
+ p6 = _mm_subs_epu8(p6, d);
+ }
+
+ _mm_storel_epi64((__m128i *)(dest + 0 * stride), p0);
+ p0 = _mm_srli_si128(p0, 8);
+ _mm_storel_epi64((__m128i *)(dest + 1 * stride), p0);
+
+ _mm_storel_epi64((__m128i *)(dest + 2 * stride), p2);
+ p2 = _mm_srli_si128(p2, 8);
+ _mm_storel_epi64((__m128i *)(dest + 3 * stride), p2);
+
+ _mm_storel_epi64((__m128i *)(dest + 4 * stride), p4);
+ p4 = _mm_srli_si128(p4, 8);
+ _mm_storel_epi64((__m128i *)(dest + 5 * stride), p4);
+
+ _mm_storel_epi64((__m128i *)(dest + 6 * stride), p6);
+ p6 = _mm_srli_si128(p6, 8);
+ _mm_storel_epi64((__m128i *)(dest + 7 * stride), p6);
+}
+
+void vp9_add_constant_residual_16x16_sse2(const int16_t diff, uint8_t *dest,
+ int stride) {
+ uint8_t abs_diff;
+ __m128i d;
+
+ // Prediction data.
+ __m128i p0 = _mm_load_si128((const __m128i *)(dest + 0 * stride));
+ __m128i p1 = _mm_load_si128((const __m128i *)(dest + 1 * stride));
+ __m128i p2 = _mm_load_si128((const __m128i *)(dest + 2 * stride));
+ __m128i p3 = _mm_load_si128((const __m128i *)(dest + 3 * stride));
+ __m128i p4 = _mm_load_si128((const __m128i *)(dest + 4 * stride));
+ __m128i p5 = _mm_load_si128((const __m128i *)(dest + 5 * stride));
+ __m128i p6 = _mm_load_si128((const __m128i *)(dest + 6 * stride));
+ __m128i p7 = _mm_load_si128((const __m128i *)(dest + 7 * stride));
+ __m128i p8 = _mm_load_si128((const __m128i *)(dest + 8 * stride));
+ __m128i p9 = _mm_load_si128((const __m128i *)(dest + 9 * stride));
+ __m128i p10 = _mm_load_si128((const __m128i *)(dest + 10 * stride));
+ __m128i p11 = _mm_load_si128((const __m128i *)(dest + 11 * stride));
+ __m128i p12 = _mm_load_si128((const __m128i *)(dest + 12 * stride));
+ __m128i p13 = _mm_load_si128((const __m128i *)(dest + 13 * stride));
+ __m128i p14 = _mm_load_si128((const __m128i *)(dest + 14 * stride));
+ __m128i p15 = _mm_load_si128((const __m128i *)(dest + 15 * stride));
+
+ // Clip diff value to [0, 255] range. Then, do addition or subtraction
+ // according to its sign.
+ if (diff >= 0) {
+ abs_diff = (diff > 255) ? 255 : diff;
+ d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0);
+
+ p0 = _mm_adds_epu8(p0, d);
+ p1 = _mm_adds_epu8(p1, d);
+ p2 = _mm_adds_epu8(p2, d);
+ p3 = _mm_adds_epu8(p3, d);
+ p4 = _mm_adds_epu8(p4, d);
+ p5 = _mm_adds_epu8(p5, d);
+ p6 = _mm_adds_epu8(p6, d);
+ p7 = _mm_adds_epu8(p7, d);
+ p8 = _mm_adds_epu8(p8, d);
+ p9 = _mm_adds_epu8(p9, d);
+ p10 = _mm_adds_epu8(p10, d);
+ p11 = _mm_adds_epu8(p11, d);
+ p12 = _mm_adds_epu8(p12, d);
+ p13 = _mm_adds_epu8(p13, d);
+ p14 = _mm_adds_epu8(p14, d);
+ p15 = _mm_adds_epu8(p15, d);
+ } else {
+ abs_diff = (diff < -255) ? 255 : -diff;
+ d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0);
+
+ p0 = _mm_subs_epu8(p0, d);
+ p1 = _mm_subs_epu8(p1, d);
+ p2 = _mm_subs_epu8(p2, d);
+ p3 = _mm_subs_epu8(p3, d);
+ p4 = _mm_subs_epu8(p4, d);
+ p5 = _mm_subs_epu8(p5, d);
+ p6 = _mm_subs_epu8(p6, d);
+ p7 = _mm_subs_epu8(p7, d);
+ p8 = _mm_subs_epu8(p8, d);
+ p9 = _mm_subs_epu8(p9, d);
+ p10 = _mm_subs_epu8(p10, d);
+ p11 = _mm_subs_epu8(p11, d);
+ p12 = _mm_subs_epu8(p12, d);
+ p13 = _mm_subs_epu8(p13, d);
+ p14 = _mm_subs_epu8(p14, d);
+ p15 = _mm_subs_epu8(p15, d);
+ }
+
+ // Store results
+ _mm_store_si128((__m128i *)(dest + 0 * stride), p0);
+ _mm_store_si128((__m128i *)(dest + 1 * stride), p1);
+ _mm_store_si128((__m128i *)(dest + 2 * stride), p2);
+ _mm_store_si128((__m128i *)(dest + 3 * stride), p3);
+ _mm_store_si128((__m128i *)(dest + 4 * stride), p4);
+ _mm_store_si128((__m128i *)(dest + 5 * stride), p5);
+ _mm_store_si128((__m128i *)(dest + 6 * stride), p6);
+ _mm_store_si128((__m128i *)(dest + 7 * stride), p7);
+ _mm_store_si128((__m128i *)(dest + 8 * stride), p8);
+ _mm_store_si128((__m128i *)(dest + 9 * stride), p9);
+ _mm_store_si128((__m128i *)(dest + 10 * stride), p10);
+ _mm_store_si128((__m128i *)(dest + 11 * stride), p11);
+ _mm_store_si128((__m128i *)(dest + 12 * stride), p12);
+ _mm_store_si128((__m128i *)(dest + 13 * stride), p13);
+ _mm_store_si128((__m128i *)(dest + 14 * stride), p14);
+ _mm_store_si128((__m128i *)(dest + 15 * stride), p15);
+}
+
+void vp9_add_constant_residual_32x32_sse2(const int16_t diff, uint8_t *dest,
+ int stride) {
+ uint8_t abs_diff;
+ __m128i d;
+ int i = 8;
+
+ if (diff >= 0) {
+ abs_diff = (diff > 255) ? 255 : diff;
+ d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0);
+ } else {
+ abs_diff = (diff < -255) ? 255 : -diff;
+ d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0);
+ }
+
+ do {
+ // Prediction data.
+ __m128i p0 = _mm_load_si128((const __m128i *)(dest + 0 * stride));
+ __m128i p1 = _mm_load_si128((const __m128i *)(dest + 0 * stride + 16));
+ __m128i p2 = _mm_load_si128((const __m128i *)(dest + 1 * stride));
+ __m128i p3 = _mm_load_si128((const __m128i *)(dest + 1 * stride + 16));
+ __m128i p4 = _mm_load_si128((const __m128i *)(dest + 2 * stride));
+ __m128i p5 = _mm_load_si128((const __m128i *)(dest + 2 * stride + 16));
+ __m128i p6 = _mm_load_si128((const __m128i *)(dest + 3 * stride));
+ __m128i p7 = _mm_load_si128((const __m128i *)(dest + 3 * stride + 16));
+
+ // Clip diff value to [0, 255] range. Then, do addition or subtraction
+ // according to its sign.
+ if (diff >= 0) {
+ p0 = _mm_adds_epu8(p0, d);
+ p1 = _mm_adds_epu8(p1, d);
+ p2 = _mm_adds_epu8(p2, d);
+ p3 = _mm_adds_epu8(p3, d);
+ p4 = _mm_adds_epu8(p4, d);
+ p5 = _mm_adds_epu8(p5, d);
+ p6 = _mm_adds_epu8(p6, d);
+ p7 = _mm_adds_epu8(p7, d);
+ } else {
+ p0 = _mm_subs_epu8(p0, d);
+ p1 = _mm_subs_epu8(p1, d);
+ p2 = _mm_subs_epu8(p2, d);
+ p3 = _mm_subs_epu8(p3, d);
+ p4 = _mm_subs_epu8(p4, d);
+ p5 = _mm_subs_epu8(p5, d);
+ p6 = _mm_subs_epu8(p6, d);
+ p7 = _mm_subs_epu8(p7, d);
+ }
+
+ // Store results
+ _mm_store_si128((__m128i *)(dest + 0 * stride), p0);
+ _mm_store_si128((__m128i *)(dest + 0 * stride + 16), p1);
+ _mm_store_si128((__m128i *)(dest + 1 * stride), p2);
+ _mm_store_si128((__m128i *)(dest + 1 * stride + 16), p3);
+ _mm_store_si128((__m128i *)(dest + 2 * stride), p4);
+ _mm_store_si128((__m128i *)(dest + 2 * stride + 16), p5);
+ _mm_store_si128((__m128i *)(dest + 3 * stride), p6);
+ _mm_store_si128((__m128i *)(dest + 3 * stride + 16), p7);
+
+ dest += 4 * stride;
+ } while (--i);
+}
diff --git a/libvpx/vp9/encoder/vp9_asm_enc_offsets.c b/libvpx/vp9/encoder/vp9_asm_enc_offsets.c
new file mode 100644
index 0000000..921e8f0
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_asm_enc_offsets.c
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vpx_ports/asm_offsets.h"
+
+BEGIN
+
+
+END
diff --git a/libvpx/vp9/encoder/vp9_bitstream.c b/libvpx/vp9/encoder/vp9_bitstream.c
new file mode 100644
index 0000000..09ab2db
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_bitstream.c
@@ -0,0 +1,1821 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <limits.h>
+
+#include "vpx/vpx_encoder.h"
+#include "vpx_mem/vpx_mem.h"
+
+#include "vp9/common/vp9_entropymode.h"
+#include "vp9/common/vp9_entropymv.h"
+#include "vp9/common/vp9_findnearmv.h"
+#include "vp9/common/vp9_tile_common.h"
+#include "vp9/common/vp9_seg_common.h"
+#include "vp9/common/vp9_pred_common.h"
+#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_entropymv.h"
+#include "vp9/common/vp9_mvref_common.h"
+#include "vp9/common/vp9_treecoder.h"
+#include "vp9/common/vp9_systemdependent.h"
+#include "vp9/common/vp9_pragmas.h"
+
+#include "vp9/encoder/vp9_mcomp.h"
+#include "vp9/encoder/vp9_encodemv.h"
+#include "vp9/encoder/vp9_bitstream.h"
+#include "vp9/encoder/vp9_segmentation.h"
+#include "vp9/encoder/vp9_write_bit_buffer.h"
+
+
+#if defined(SECTIONBITS_OUTPUT)
+unsigned __int64 Sectionbits[500];
+#endif
+
+#ifdef ENTROPY_STATS
+int intra_mode_stats[VP9_INTRA_MODES]
+ [VP9_INTRA_MODES]
+ [VP9_INTRA_MODES];
+vp9_coeff_stats tree_update_hist[TX_SIZE_MAX_SB][BLOCK_TYPES];
+
+extern unsigned int active_section;
+#endif
+
+#define vp9_cost_upd ((int)(vp9_cost_one(upd) - vp9_cost_zero(upd)) >> 8)
+#define vp9_cost_upd256 ((int)(vp9_cost_one(upd) - vp9_cost_zero(upd)))
+
+#ifdef MODE_STATS
+int64_t tx_count_32x32p_stats[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB];
+int64_t tx_count_16x16p_stats[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 1];
+int64_t tx_count_8x8p_stats[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 2];
+int64_t switchable_interp_stats[VP9_SWITCHABLE_FILTERS+1]
+ [VP9_SWITCHABLE_FILTERS];
+
+void init_tx_count_stats() {
+ vp9_zero(tx_count_32x32p_stats);
+ vp9_zero(tx_count_16x16p_stats);
+ vp9_zero(tx_count_8x8p_stats);
+}
+
+void init_switchable_interp_stats() {
+ vp9_zero(switchable_interp_stats);
+}
+
+static void update_tx_count_stats(VP9_COMMON *cm) {
+ int i, j;
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
+ for (j = 0; j < TX_SIZE_MAX_SB; j++) {
+ tx_count_32x32p_stats[i][j] += cm->fc.tx_count_32x32p[i][j];
+ }
+ }
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
+ for (j = 0; j < TX_SIZE_MAX_SB - 1; j++) {
+ tx_count_16x16p_stats[i][j] += cm->fc.tx_count_16x16p[i][j];
+ }
+ }
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
+ for (j = 0; j < TX_SIZE_MAX_SB - 2; j++) {
+ tx_count_8x8p_stats[i][j] += cm->fc.tx_count_8x8p[i][j];
+ }
+ }
+}
+
+static void update_switchable_interp_stats(VP9_COMMON *cm) {
+ int i, j;
+ for (i = 0; i < VP9_SWITCHABLE_FILTERS+1; ++i)
+ for (j = 0; j < VP9_SWITCHABLE_FILTERS; ++j) {
+ switchable_interp_stats[i][j] += cm->fc.switchable_interp_count[i][j];
+ }
+}
+
+void write_tx_count_stats() {
+ int i, j;
+ FILE *fp = fopen("tx_count.bin", "wb");
+ fwrite(tx_count_32x32p_stats, sizeof(tx_count_32x32p_stats), 1, fp);
+ fwrite(tx_count_16x16p_stats, sizeof(tx_count_16x16p_stats), 1, fp);
+ fwrite(tx_count_8x8p_stats, sizeof(tx_count_8x8p_stats), 1, fp);
+ fclose(fp);
+
+ printf(
+ "vp9_default_tx_count_32x32p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB] = {\n");
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
+ printf(" { ");
+ for (j = 0; j < TX_SIZE_MAX_SB; j++) {
+ printf("%"PRId64", ", tx_count_32x32p_stats[i][j]);
+ }
+ printf("},\n");
+ }
+ printf("};\n");
+ printf(
+ "vp9_default_tx_count_16x16p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB-1] = {\n");
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
+ printf(" { ");
+ for (j = 0; j < TX_SIZE_MAX_SB - 1; j++) {
+ printf("%"PRId64", ", tx_count_16x16p_stats[i][j]);
+ }
+ printf("},\n");
+ }
+ printf("};\n");
+ printf(
+ "vp9_default_tx_count_8x8p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB-2] = {\n");
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
+ printf(" { ");
+ for (j = 0; j < TX_SIZE_MAX_SB - 2; j++) {
+ printf("%"PRId64", ", tx_count_8x8p_stats[i][j]);
+ }
+ printf("},\n");
+ }
+ printf("};\n");
+}
+
+void write_switchable_interp_stats() {
+ int i, j;
+ FILE *fp = fopen("switchable_interp.bin", "wb");
+ fwrite(switchable_interp_stats, sizeof(switchable_interp_stats), 1, fp);
+ fclose(fp);
+
+ printf(
+ "vp9_default_switchable_filter_count[VP9_SWITCHABLE_FILTERS+1]"
+ "[VP9_SWITCHABLE_FILTERS] = {\n");
+ for (i = 0; i < VP9_SWITCHABLE_FILTERS+1; i++) {
+ printf(" { ");
+ for (j = 0; j < VP9_SWITCHABLE_FILTERS; j++) {
+ printf("%"PRId64", ", switchable_interp_stats[i][j]);
+ }
+ printf("},\n");
+ }
+ printf("};\n");
+}
+#endif
+
+static int update_bits[255];
+
+static INLINE void write_be32(uint8_t *p, int value) {
+ p[0] = value >> 24;
+ p[1] = value >> 16;
+ p[2] = value >> 8;
+ p[3] = value;
+}
+
+
+
+int recenter_nonneg(int v, int m) {
+ if (v > (m << 1))
+ return v;
+ else if (v >= m)
+ return ((v - m) << 1);
+ else
+ return ((m - v) << 1) - 1;
+}
+
+static int get_unsigned_bits(unsigned num_values) {
+ int cat = 0;
+ if ((num_values--) <= 1) return 0;
+ while (num_values > 0) {
+ cat++;
+ num_values >>= 1;
+ }
+ return cat;
+}
+
+void vp9_encode_unsigned_max(struct vp9_write_bit_buffer *wb,
+ int data, int max) {
+ vp9_wb_write_literal(wb, data, get_unsigned_bits(max));
+}
+
+void encode_uniform(vp9_writer *w, int v, int n) {
+ int l = get_unsigned_bits(n);
+ int m;
+ if (l == 0)
+ return;
+ m = (1 << l) - n;
+ if (v < m) {
+ vp9_write_literal(w, v, l - 1);
+ } else {
+ vp9_write_literal(w, m + ((v - m) >> 1), l - 1);
+ vp9_write_literal(w, (v - m) & 1, 1);
+ }
+}
+
+int count_uniform(int v, int n) {
+ int l = get_unsigned_bits(n);
+ int m;
+ if (l == 0) return 0;
+ m = (1 << l) - n;
+ if (v < m)
+ return l - 1;
+ else
+ return l;
+}
+
+void encode_term_subexp(vp9_writer *w, int word, int k, int num_syms) {
+ int i = 0;
+ int mk = 0;
+ while (1) {
+ int b = (i ? k + i - 1 : k);
+ int a = (1 << b);
+ if (num_syms <= mk + 3 * a) {
+ encode_uniform(w, word - mk, num_syms - mk);
+ break;
+ } else {
+ int t = (word >= mk + a);
+ vp9_write_literal(w, t, 1);
+ if (t) {
+ i = i + 1;
+ mk += a;
+ } else {
+ vp9_write_literal(w, word - mk, b);
+ break;
+ }
+ }
+ }
+}
+
+int count_term_subexp(int word, int k, int num_syms) {
+ int count = 0;
+ int i = 0;
+ int mk = 0;
+ while (1) {
+ int b = (i ? k + i - 1 : k);
+ int a = (1 << b);
+ if (num_syms <= mk + 3 * a) {
+ count += count_uniform(word - mk, num_syms - mk);
+ break;
+ } else {
+ int t = (word >= mk + a);
+ count++;
+ if (t) {
+ i = i + 1;
+ mk += a;
+ } else {
+ count += b;
+ break;
+ }
+ }
+ }
+ return count;
+}
+
+static void compute_update_table() {
+ int i;
+ for (i = 0; i < 254; i++)
+ update_bits[i] = count_term_subexp(i, SUBEXP_PARAM, 255);
+}
+
+static int split_index(int i, int n, int modulus) {
+ int max1 = (n - 1 - modulus / 2) / modulus + 1;
+ if (i % modulus == modulus / 2) i = i / modulus;
+ else i = max1 + i - (i + modulus - modulus / 2) / modulus;
+ return i;
+}
+
+static int remap_prob(int v, int m) {
+ const int n = 255;
+ const int modulus = MODULUS_PARAM;
+ int i;
+ v--;
+ m--;
+ if ((m << 1) <= n)
+ i = recenter_nonneg(v, m) - 1;
+ else
+ i = recenter_nonneg(n - 1 - v, n - 1 - m) - 1;
+
+ i = split_index(i, n - 1, modulus);
+ return i;
+}
+
+static void write_prob_diff_update(vp9_writer *w,
+ vp9_prob newp, vp9_prob oldp) {
+ int delp = remap_prob(newp, oldp);
+ encode_term_subexp(w, delp, SUBEXP_PARAM, 255);
+}
+
+static int prob_diff_update_cost(vp9_prob newp, vp9_prob oldp) {
+ int delp = remap_prob(newp, oldp);
+ return update_bits[delp] * 256;
+}
+
+static int prob_update_savings(const unsigned int *ct,
+ const vp9_prob oldp, const vp9_prob newp,
+ const vp9_prob upd) {
+ const int old_b = cost_branch256(ct, oldp);
+ const int new_b = cost_branch256(ct, newp);
+ const int update_b = 2048 + vp9_cost_upd256;
+ return old_b - new_b - update_b;
+}
+
+static int prob_diff_update_savings_search(const unsigned int *ct,
+ const vp9_prob oldp, vp9_prob *bestp,
+ const vp9_prob upd) {
+ const int old_b = cost_branch256(ct, oldp);
+ int new_b, update_b, savings, bestsavings, step;
+ vp9_prob newp, bestnewp;
+
+ bestsavings = 0;
+ bestnewp = oldp;
+
+ step = (*bestp > oldp ? -1 : 1);
+ for (newp = *bestp; newp != oldp; newp += step) {
+ new_b = cost_branch256(ct, newp);
+ update_b = prob_diff_update_cost(newp, oldp) + vp9_cost_upd256;
+ savings = old_b - new_b - update_b;
+ if (savings > bestsavings) {
+ bestsavings = savings;
+ bestnewp = newp;
+ }
+ }
+ *bestp = bestnewp;
+ return bestsavings;
+}
+
+static int prob_diff_update_savings_search_model(const unsigned int *ct,
+ const vp9_prob *oldp,
+ vp9_prob *bestp,
+ const vp9_prob upd,
+ int b, int r) {
+ int i, old_b, new_b, update_b, savings, bestsavings, step;
+ int newp;
+ vp9_prob bestnewp, newplist[ENTROPY_NODES], oldplist[ENTROPY_NODES];
+ vp9_model_to_full_probs(oldp, oldplist);
+ vpx_memcpy(newplist, oldp, sizeof(vp9_prob) * UNCONSTRAINED_NODES);
+ for (i = UNCONSTRAINED_NODES, old_b = 0; i < ENTROPY_NODES; ++i)
+ old_b += cost_branch256(ct + 2 * i, oldplist[i]);
+ old_b += cost_branch256(ct + 2 * PIVOT_NODE, oldplist[PIVOT_NODE]);
+
+ bestsavings = 0;
+ bestnewp = oldp[PIVOT_NODE];
+
+ step = (*bestp > oldp[PIVOT_NODE] ? -1 : 1);
+ newp = *bestp;
+ for (; newp != oldp[PIVOT_NODE]; newp += step) {
+ if (newp < 1 || newp > 255) continue;
+ newplist[PIVOT_NODE] = newp;
+ vp9_model_to_full_probs(newplist, newplist);
+ for (i = UNCONSTRAINED_NODES, new_b = 0; i < ENTROPY_NODES; ++i)
+ new_b += cost_branch256(ct + 2 * i, newplist[i]);
+ new_b += cost_branch256(ct + 2 * PIVOT_NODE, newplist[PIVOT_NODE]);
+ update_b = prob_diff_update_cost(newp, oldp[PIVOT_NODE]) +
+ vp9_cost_upd256;
+ savings = old_b - new_b - update_b;
+ if (savings > bestsavings) {
+ bestsavings = savings;
+ bestnewp = newp;
+ }
+ }
+ *bestp = bestnewp;
+ return bestsavings;
+}
+
+static void vp9_cond_prob_update(vp9_writer *bc, vp9_prob *oldp, vp9_prob upd,
+ unsigned int *ct) {
+ vp9_prob newp;
+ int savings;
+ newp = get_binary_prob(ct[0], ct[1]);
+ assert(newp >= 1);
+ savings = prob_update_savings(ct, *oldp, newp, upd);
+ if (savings > 0) {
+ vp9_write(bc, 1, upd);
+ vp9_write_prob(bc, newp);
+ *oldp = newp;
+ } else {
+ vp9_write(bc, 0, upd);
+ }
+}
+
+static void vp9_cond_prob_diff_update(vp9_writer *bc, vp9_prob *oldp,
+ vp9_prob upd,
+ unsigned int *ct) {
+ vp9_prob newp;
+ int savings;
+ newp = get_binary_prob(ct[0], ct[1]);
+ assert(newp >= 1);
+ savings = prob_diff_update_savings_search(ct, *oldp, &newp, upd);
+ if (savings > 0) {
+ vp9_write(bc, 1, upd);
+ write_prob_diff_update(bc, newp, *oldp);
+ *oldp = newp;
+ } else {
+ vp9_write(bc, 0, upd);
+ }
+}
+
+static void update_mode(
+ vp9_writer *w,
+ int n,
+ const struct vp9_token tok[/* n */],
+ vp9_tree tree,
+ vp9_prob Pnew[/* n-1 */],
+ vp9_prob Pcur[/* n-1 */],
+ unsigned int bct[/* n-1 */] [2],
+ const unsigned int num_events[/* n */]
+) {
+ int i = 0;
+
+ vp9_tree_probs_from_distribution(tree, Pnew, bct, num_events, 0);
+ n--;
+
+ for (i = 0; i < n; ++i) {
+ vp9_cond_prob_diff_update(w, &Pcur[i], VP9_MODE_UPDATE_PROB, bct[i]);
+ }
+}
+
+static void update_mbintra_mode_probs(VP9_COMP* const cpi,
+ vp9_writer* const bc) {
+ VP9_COMMON *const cm = &cpi->common;
+ int j;
+ vp9_prob pnew[VP9_INTRA_MODES - 1];
+ unsigned int bct[VP9_INTRA_MODES - 1][2];
+
+ for (j = 0; j < BLOCK_SIZE_GROUPS; j++)
+ update_mode(bc, VP9_INTRA_MODES, vp9_intra_mode_encodings,
+ vp9_intra_mode_tree, pnew,
+ cm->fc.y_mode_prob[j], bct,
+ (unsigned int *)cpi->y_mode_count[j]);
+}
+
+void vp9_update_skip_probs(VP9_COMP *cpi, vp9_writer *bc) {
+ VP9_COMMON *const pc = &cpi->common;
+ int k;
+
+ for (k = 0; k < MBSKIP_CONTEXTS; ++k) {
+ vp9_cond_prob_diff_update(bc, &pc->fc.mbskip_probs[k],
+ VP9_MODE_UPDATE_PROB, pc->fc.mbskip_count[k]);
+ }
+}
+
+static void write_intra_mode(vp9_writer *bc, int m, const vp9_prob *p) {
+ write_token(bc, vp9_intra_mode_tree, p, vp9_intra_mode_encodings + m);
+}
+
+static void update_switchable_interp_probs(VP9_COMP *const cpi,
+ vp9_writer* const bc) {
+ VP9_COMMON *const pc = &cpi->common;
+ unsigned int branch_ct[VP9_SWITCHABLE_FILTERS + 1]
+ [VP9_SWITCHABLE_FILTERS - 1][2];
+ vp9_prob new_prob[VP9_SWITCHABLE_FILTERS + 1][VP9_SWITCHABLE_FILTERS - 1];
+ int i, j;
+ for (j = 0; j <= VP9_SWITCHABLE_FILTERS; ++j) {
+ vp9_tree_probs_from_distribution(
+ vp9_switchable_interp_tree,
+ new_prob[j], branch_ct[j],
+ pc->fc.switchable_interp_count[j], 0);
+ }
+ for (j = 0; j <= VP9_SWITCHABLE_FILTERS; ++j) {
+ for (i = 0; i < VP9_SWITCHABLE_FILTERS - 1; ++i) {
+ vp9_cond_prob_diff_update(bc, &pc->fc.switchable_interp_prob[j][i],
+ VP9_MODE_UPDATE_PROB, branch_ct[j][i]);
+ }
+ }
+#ifdef MODE_STATS
+ if (!cpi->dummy_packing)
+ update_switchable_interp_stats(pc);
+#endif
+}
+
+static void update_inter_mode_probs(VP9_COMMON *pc, vp9_writer* const bc) {
+ int i, j;
+
+ for (i = 0; i < INTER_MODE_CONTEXTS; i++) {
+ for (j = 0; j < VP9_INTER_MODES - 1; j++) {
+ vp9_cond_prob_diff_update(bc, &pc->fc.inter_mode_probs[i][j],
+ VP9_MODE_UPDATE_PROB,
+ pc->fc.inter_mode_counts[i][j]);
+ }
+ }
+}
+
+static void pack_mb_tokens(vp9_writer* const bc,
+ TOKENEXTRA **tp,
+ const TOKENEXTRA *const stop) {
+ TOKENEXTRA *p = *tp;
+
+ while (p < stop) {
+ const int t = p->token;
+ const struct vp9_token *const a = vp9_coef_encodings + t;
+ const vp9_extra_bit *const b = vp9_extra_bits + t;
+ int i = 0;
+ const vp9_prob *pp;
+ int v = a->value;
+ int n = a->len;
+ vp9_prob probs[ENTROPY_NODES];
+
+ if (t == EOSB_TOKEN) {
+ ++p;
+ break;
+ }
+ if (t >= TWO_TOKEN) {
+ vp9_model_to_full_probs(p->context_tree, probs);
+ pp = probs;
+ } else {
+ pp = p->context_tree;
+ }
+ assert(pp != 0);
+
+ /* skip one or two nodes */
+#if !CONFIG_BALANCED_COEFTREE
+ if (p->skip_eob_node) {
+ n -= p->skip_eob_node;
+ i = 2 * p->skip_eob_node;
+ }
+#endif
+
+ do {
+ const int bb = (v >> --n) & 1;
+#if CONFIG_BALANCED_COEFTREE
+ if (i == 2 && p->skip_eob_node) {
+ i += 2;
+ assert(bb == 1);
+ continue;
+ }
+#endif
+ vp9_write(bc, bb, pp[i >> 1]);
+ i = vp9_coef_tree[i + bb];
+ } while (n);
+
+ if (b->base_val) {
+ const int e = p->extra, l = b->len;
+
+ if (l) {
+ const unsigned char *pb = b->prob;
+ int v = e >> 1;
+ int n = l; /* number of bits in v, assumed nonzero */
+ int i = 0;
+
+ do {
+ const int bb = (v >> --n) & 1;
+ vp9_write(bc, bb, pb[i >> 1]);
+ i = b->tree[i + bb];
+ } while (n);
+ }
+
+ vp9_write_bit(bc, e & 1);
+ }
+ ++p;
+ }
+
+ *tp = p;
+}
+
+static void write_sb_mv_ref(vp9_writer *bc, MB_PREDICTION_MODE m,
+ const vp9_prob *p) {
+#if CONFIG_DEBUG
+ assert(NEARESTMV <= m && m <= NEWMV);
+#endif
+ write_token(bc, vp9_sb_mv_ref_tree, p,
+ vp9_sb_mv_ref_encoding_array - NEARESTMV + m);
+}
+
+// This function writes the current macro block's segnment id to the bitstream
+// It should only be called if a segment map update is indicated.
+static void write_mb_segid(vp9_writer *bc,
+ const MB_MODE_INFO *mi, const MACROBLOCKD *xd) {
+ if (xd->segmentation_enabled && xd->update_mb_segmentation_map)
+ treed_write(bc, vp9_segment_tree, xd->mb_segment_tree_probs,
+ mi->segment_id, 3);
+}
+
+// This function encodes the reference frame
+static void encode_ref_frame(VP9_COMP *cpi, vp9_writer *bc) {
+ VP9_COMMON *const pc = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MB_MODE_INFO *mi = &xd->mode_info_context->mbmi;
+ const int segment_id = mi->segment_id;
+ int seg_ref_active = vp9_segfeature_active(xd, segment_id,
+ SEG_LVL_REF_FRAME);
+ // If segment level coding of this signal is disabled...
+ // or the segment allows multiple reference frame options
+ if (!seg_ref_active) {
+ // does the feature use compound prediction or not
+ // (if not specified at the frame/segment level)
+ if (pc->comp_pred_mode == HYBRID_PREDICTION) {
+ vp9_write(bc, mi->ref_frame[1] > INTRA_FRAME,
+ vp9_get_pred_prob(pc, xd, PRED_COMP_INTER_INTER));
+ } else {
+ assert((mi->ref_frame[1] <= INTRA_FRAME) ==
+ (pc->comp_pred_mode == SINGLE_PREDICTION_ONLY));
+ }
+
+ if (mi->ref_frame[1] > INTRA_FRAME) {
+ vp9_write(bc, mi->ref_frame[0] == GOLDEN_FRAME,
+ vp9_get_pred_prob(pc, xd, PRED_COMP_REF_P));
+ } else {
+ vp9_write(bc, mi->ref_frame[0] != LAST_FRAME,
+ vp9_get_pred_prob(pc, xd, PRED_SINGLE_REF_P1));
+ if (mi->ref_frame[0] != LAST_FRAME)
+ vp9_write(bc, mi->ref_frame[0] != GOLDEN_FRAME,
+ vp9_get_pred_prob(pc, xd, PRED_SINGLE_REF_P2));
+ }
+ } else {
+ assert(mi->ref_frame[1] <= INTRA_FRAME);
+ assert(vp9_get_segdata(xd, segment_id, SEG_LVL_REF_FRAME) ==
+ mi->ref_frame[0]);
+ }
+
+ // if using the prediction mdoel we have nothing further to do because
+ // the reference frame is fully coded by the segment
+}
+
+static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
+ vp9_writer *bc, int mi_row, int mi_col) {
+ VP9_COMMON *const pc = &cpi->common;
+ const nmv_context *nmvc = &pc->fc.nmvc;
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MB_MODE_INFO *const mi = &m->mbmi;
+ const MV_REFERENCE_FRAME rf = mi->ref_frame[0];
+ const MB_PREDICTION_MODE mode = mi->mode;
+ const int segment_id = mi->segment_id;
+ int skip_coeff;
+
+ xd->prev_mode_info_context = pc->prev_mi + (m - pc->mi);
+ x->partition_info = x->pi + (m - pc->mi);
+
+#ifdef ENTROPY_STATS
+ active_section = 9;
+#endif
+
+ if (cpi->mb.e_mbd.update_mb_segmentation_map) {
+ // Is temporal coding of the segment map enabled
+ if (pc->temporal_update) {
+ unsigned char prediction_flag = vp9_get_pred_flag(xd, PRED_SEG_ID);
+ vp9_prob pred_prob = vp9_get_pred_prob(pc, xd, PRED_SEG_ID);
+
+ // Code the segment id prediction flag for this mb
+ vp9_write(bc, prediction_flag, pred_prob);
+
+ // If the mb segment id wasn't predicted code explicitly
+ if (!prediction_flag)
+ write_mb_segid(bc, mi, &cpi->mb.e_mbd);
+ } else {
+ // Normal unpredicted coding
+ write_mb_segid(bc, mi, &cpi->mb.e_mbd);
+ }
+ }
+
+ if (vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)) {
+ skip_coeff = 1;
+ } else {
+ skip_coeff = m->mbmi.mb_skip_coeff;
+ vp9_write(bc, skip_coeff,
+ vp9_get_pred_prob(pc, xd, PRED_MBSKIP));
+ }
+
+ if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME))
+ vp9_write(bc, rf != INTRA_FRAME,
+ vp9_get_pred_prob(pc, xd, PRED_INTRA_INTER));
+
+ if (mi->sb_type >= BLOCK_SIZE_SB8X8 && pc->txfm_mode == TX_MODE_SELECT &&
+ !(rf != INTRA_FRAME &&
+ (skip_coeff || vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)))) {
+ TX_SIZE sz = mi->txfm_size;
+ const vp9_prob *tx_probs = vp9_get_pred_probs(pc, xd, PRED_TX_SIZE);
+ vp9_write(bc, sz != TX_4X4, tx_probs[0]);
+ if (mi->sb_type >= BLOCK_SIZE_MB16X16 && sz != TX_4X4) {
+ vp9_write(bc, sz != TX_8X8, tx_probs[1]);
+ if (mi->sb_type >= BLOCK_SIZE_SB32X32 && sz != TX_8X8)
+ vp9_write(bc, sz != TX_16X16, tx_probs[2]);
+ }
+ }
+
+ if (rf == INTRA_FRAME) {
+#ifdef ENTROPY_STATS
+ active_section = 6;
+#endif
+
+ if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8) {
+ const BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type;
+ const int bwl = b_width_log2(bsize), bhl = b_height_log2(bsize);
+ const int bsl = MIN(bwl, bhl);
+ write_intra_mode(bc, mode, pc->fc.y_mode_prob[MIN(3, bsl)]);
+ } else {
+ int idx, idy;
+ int bw = 1 << b_width_log2(mi->sb_type);
+ int bh = 1 << b_height_log2(mi->sb_type);
+ for (idy = 0; idy < 2; idy += bh)
+ for (idx = 0; idx < 2; idx += bw) {
+ MB_PREDICTION_MODE bm = m->bmi[idy * 2 + idx].as_mode.first;
+ write_intra_mode(bc, bm, pc->fc.y_mode_prob[0]);
+ }
+ }
+ write_intra_mode(bc, mi->uv_mode,
+ pc->fc.uv_mode_prob[mode]);
+ } else {
+ vp9_prob *mv_ref_p;
+
+ encode_ref_frame(cpi, bc);
+
+ mv_ref_p = cpi->common.fc.inter_mode_probs[mi->mb_mode_context[rf]];
+
+#ifdef ENTROPY_STATS
+ active_section = 3;
+#endif
+
+ // If segment skip is not enabled code the mode.
+ if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)) {
+ if (mi->sb_type >= BLOCK_SIZE_SB8X8) {
+ write_sb_mv_ref(bc, mode, mv_ref_p);
+ vp9_accum_mv_refs(&cpi->common, mode, mi->mb_mode_context[rf]);
+ }
+ }
+
+ if (cpi->common.mcomp_filter_type == SWITCHABLE) {
+ write_token(bc, vp9_switchable_interp_tree,
+ vp9_get_pred_probs(&cpi->common, xd,
+ PRED_SWITCHABLE_INTERP),
+ vp9_switchable_interp_encodings +
+ vp9_switchable_interp_map[mi->interp_filter]);
+ } else {
+ assert(mi->interp_filter == cpi->common.mcomp_filter_type);
+ }
+
+ if (xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
+ int j;
+ MB_PREDICTION_MODE blockmode;
+ int_mv blockmv;
+ int bwl = b_width_log2(mi->sb_type), bw = 1 << bwl;
+ int bhl = b_height_log2(mi->sb_type), bh = 1 << bhl;
+ int idx, idy;
+ for (idy = 0; idy < 2; idy += bh) {
+ for (idx = 0; idx < 2; idx += bw) {
+ j = idy * 2 + idx;
+ blockmode = cpi->mb.partition_info->bmi[j].mode;
+ blockmv = cpi->mb.partition_info->bmi[j].mv;
+ write_sb_mv_ref(bc, blockmode, mv_ref_p);
+ vp9_accum_mv_refs(&cpi->common, blockmode, mi->mb_mode_context[rf]);
+ if (blockmode == NEWMV) {
+#ifdef ENTROPY_STATS
+ active_section = 11;
+#endif
+ vp9_encode_mv(bc, &blockmv.as_mv, &mi->best_mv.as_mv,
+ nmvc, xd->allow_high_precision_mv);
+
+ if (mi->ref_frame[1] > INTRA_FRAME)
+ vp9_encode_mv(bc,
+ &cpi->mb.partition_info->bmi[j].second_mv.as_mv,
+ &mi->best_second_mv.as_mv,
+ nmvc, xd->allow_high_precision_mv);
+ }
+ }
+ }
+ } else if (mode == NEWMV) {
+#ifdef ENTROPY_STATS
+ active_section = 5;
+#endif
+ vp9_encode_mv(bc,
+ &mi->mv[0].as_mv, &mi->best_mv.as_mv,
+ nmvc, xd->allow_high_precision_mv);
+
+ if (mi->ref_frame[1] > INTRA_FRAME)
+ vp9_encode_mv(bc,
+ &mi->mv[1].as_mv, &mi->best_second_mv.as_mv,
+ nmvc, xd->allow_high_precision_mv);
+ }
+ }
+}
+
+static void write_mb_modes_kf(const VP9_COMP *cpi,
+ MODE_INFO *m,
+ vp9_writer *bc, int mi_row, int mi_col) {
+ const VP9_COMMON *const c = &cpi->common;
+ const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+ const int ym = m->mbmi.mode;
+ const int mis = c->mode_info_stride;
+ const int segment_id = m->mbmi.segment_id;
+ int skip_coeff;
+
+ if (xd->update_mb_segmentation_map)
+ write_mb_segid(bc, &m->mbmi, xd);
+
+ if (vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)) {
+ skip_coeff = 1;
+ } else {
+ skip_coeff = m->mbmi.mb_skip_coeff;
+ vp9_write(bc, skip_coeff, vp9_get_pred_prob(c, xd, PRED_MBSKIP));
+ }
+
+ if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8 && c->txfm_mode == TX_MODE_SELECT) {
+ TX_SIZE sz = m->mbmi.txfm_size;
+ const vp9_prob *tx_probs = vp9_get_pred_probs(c, xd, PRED_TX_SIZE);
+ vp9_write(bc, sz != TX_4X4, tx_probs[0]);
+ if (m->mbmi.sb_type >= BLOCK_SIZE_MB16X16 && sz != TX_4X4) {
+ vp9_write(bc, sz != TX_8X8, tx_probs[1]);
+ if (m->mbmi.sb_type >= BLOCK_SIZE_SB32X32 && sz != TX_8X8)
+ vp9_write(bc, sz != TX_16X16, tx_probs[2]);
+ }
+ }
+
+ if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8) {
+ const MB_PREDICTION_MODE A = above_block_mode(m, 0, mis);
+ const MB_PREDICTION_MODE L = xd->left_available ?
+ left_block_mode(m, 0) : DC_PRED;
+ write_intra_mode(bc, ym, c->kf_y_mode_prob[A][L]);
+ } else {
+ int idx, idy;
+ int bw = 1 << b_width_log2(m->mbmi.sb_type);
+ int bh = 1 << b_height_log2(m->mbmi.sb_type);
+ for (idy = 0; idy < 2; idy += bh) {
+ for (idx = 0; idx < 2; idx += bw) {
+ int i = idy * 2 + idx;
+ const MB_PREDICTION_MODE A = above_block_mode(m, i, mis);
+ const MB_PREDICTION_MODE L = (xd->left_available || idx) ?
+ left_block_mode(m, i) : DC_PRED;
+ const int bm = m->bmi[i].as_mode.first;
+#ifdef ENTROPY_STATS
+ ++intra_mode_stats[A][L][bm];
+#endif
+ write_intra_mode(bc, bm, c->kf_y_mode_prob[A][L]);
+ }
+ }
+ }
+
+ write_intra_mode(bc, m->mbmi.uv_mode, c->kf_uv_mode_prob[ym]);
+}
+
+static void write_modes_b(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
+ TOKENEXTRA **tok, TOKENEXTRA *tok_end,
+ int mi_row, int mi_col) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+
+ if (m->mbmi.sb_type < BLOCK_SIZE_SB8X8)
+ if (xd->ab_index > 0)
+ return;
+ xd->mode_info_context = m;
+ set_mi_row_col(&cpi->common, xd, mi_row,
+ 1 << mi_height_log2(m->mbmi.sb_type),
+ mi_col, 1 << mi_width_log2(m->mbmi.sb_type));
+ if ((cm->frame_type == KEY_FRAME) || cm->intra_only) {
+ write_mb_modes_kf(cpi, m, bc, mi_row, mi_col);
+#ifdef ENTROPY_STATS
+ active_section = 8;
+#endif
+ } else {
+ pack_inter_mode_mvs(cpi, m, bc, mi_row, mi_col);
+#ifdef ENTROPY_STATS
+ active_section = 1;
+#endif
+ }
+
+ assert(*tok < tok_end);
+ pack_mb_tokens(bc, tok, tok_end);
+}
+
+static void write_modes_sb(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
+ TOKENEXTRA **tok, TOKENEXTRA *tok_end,
+ int mi_row, int mi_col,
+ BLOCK_SIZE_TYPE bsize) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+ const int mis = cm->mode_info_stride;
+ int bwl, bhl;
+ int bsl = b_width_log2(bsize);
+ int bs = (1 << bsl) / 4; // mode_info step for subsize
+ int n;
+ PARTITION_TYPE partition;
+ BLOCK_SIZE_TYPE subsize;
+
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
+ return;
+
+ bwl = b_width_log2(m->mbmi.sb_type);
+ bhl = b_height_log2(m->mbmi.sb_type);
+
+ // parse the partition type
+ if ((bwl == bsl) && (bhl == bsl))
+ partition = PARTITION_NONE;
+ else if ((bwl == bsl) && (bhl < bsl))
+ partition = PARTITION_HORZ;
+ else if ((bwl < bsl) && (bhl == bsl))
+ partition = PARTITION_VERT;
+ else if ((bwl < bsl) && (bhl < bsl))
+ partition = PARTITION_SPLIT;
+ else
+ assert(0);
+
+ if (bsize < BLOCK_SIZE_SB8X8)
+ if (xd->ab_index > 0)
+ return;
+
+ if (bsize >= BLOCK_SIZE_SB8X8) {
+ int pl;
+ int idx = check_bsize_coverage(cm, xd, mi_row, mi_col, bsize);
+ xd->left_seg_context = cm->left_seg_context + (mi_row & MI_MASK);
+ xd->above_seg_context = cm->above_seg_context + mi_col;
+ pl = partition_plane_context(xd, bsize);
+ // encode the partition information
+ if (idx == 0)
+ write_token(bc, vp9_partition_tree,
+ cm->fc.partition_prob[cm->frame_type][pl],
+ vp9_partition_encodings + partition);
+ else if (idx > 0)
+ vp9_write(bc, partition == PARTITION_SPLIT,
+ cm->fc.partition_prob[cm->frame_type][pl][idx]);
+ }
+
+ subsize = get_subsize(bsize, partition);
+ *(get_sb_index(xd, subsize)) = 0;
+
+ switch (partition) {
+ case PARTITION_NONE:
+ write_modes_b(cpi, m, bc, tok, tok_end, mi_row, mi_col);
+ break;
+ case PARTITION_HORZ:
+ write_modes_b(cpi, m, bc, tok, tok_end, mi_row, mi_col);
+ *(get_sb_index(xd, subsize)) = 1;
+ if ((mi_row + bs) < cm->mi_rows)
+ write_modes_b(cpi, m + bs * mis, bc, tok, tok_end, mi_row + bs, mi_col);
+ break;
+ case PARTITION_VERT:
+ write_modes_b(cpi, m, bc, tok, tok_end, mi_row, mi_col);
+ *(get_sb_index(xd, subsize)) = 1;
+ if ((mi_col + bs) < cm->mi_cols)
+ write_modes_b(cpi, m + bs, bc, tok, tok_end, mi_row, mi_col + bs);
+ break;
+ case PARTITION_SPLIT:
+ for (n = 0; n < 4; n++) {
+ int j = n >> 1, i = n & 0x01;
+ *(get_sb_index(xd, subsize)) = n;
+ write_modes_sb(cpi, m + j * bs * mis + i * bs, bc, tok, tok_end,
+ mi_row + j * bs, mi_col + i * bs, subsize);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ // update partition context
+ if (bsize >= BLOCK_SIZE_SB8X8 &&
+ (bsize == BLOCK_SIZE_SB8X8 || partition != PARTITION_SPLIT)) {
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ update_partition_context(xd, subsize, bsize);
+ }
+}
+
+static void write_modes(VP9_COMP *cpi, vp9_writer* const bc,
+ TOKENEXTRA **tok, TOKENEXTRA *tok_end) {
+ VP9_COMMON *const c = &cpi->common;
+ const int mis = c->mode_info_stride;
+ MODE_INFO *m, *m_ptr = c->mi;
+ int mi_row, mi_col;
+
+ m_ptr += c->cur_tile_mi_col_start + c->cur_tile_mi_row_start * mis;
+
+ for (mi_row = c->cur_tile_mi_row_start;
+ mi_row < c->cur_tile_mi_row_end;
+ mi_row += 8, m_ptr += 8 * mis) {
+ m = m_ptr;
+ vpx_memset(c->left_seg_context, 0, sizeof(c->left_seg_context));
+ for (mi_col = c->cur_tile_mi_col_start;
+ mi_col < c->cur_tile_mi_col_end;
+ mi_col += 64 / MI_SIZE, m += 64 / MI_SIZE)
+ write_modes_sb(cpi, m, bc, tok, tok_end, mi_row, mi_col,
+ BLOCK_SIZE_SB64X64);
+ }
+}
+
+/* This function is used for debugging probability trees. */
+static void print_prob_tree(vp9_coeff_probs *coef_probs, int block_types) {
+ /* print coef probability tree */
+ int i, j, k, l, m;
+ FILE *f = fopen("enc_tree_probs.txt", "a");
+ fprintf(f, "{\n");
+ for (i = 0; i < block_types; i++) {
+ fprintf(f, " {\n");
+ for (j = 0; j < REF_TYPES; ++j) {
+ fprintf(f, " {\n");
+ for (k = 0; k < COEF_BANDS; k++) {
+ fprintf(f, " {\n");
+ for (l = 0; l < PREV_COEF_CONTEXTS; l++) {
+ fprintf(f, " {");
+ for (m = 0; m < ENTROPY_NODES; m++) {
+ fprintf(f, "%3u, ",
+ (unsigned int)(coef_probs[i][j][k][l][m]));
+ }
+ }
+ fprintf(f, " }\n");
+ }
+ fprintf(f, " }\n");
+ }
+ fprintf(f, " }\n");
+ }
+ fprintf(f, "}\n");
+ fclose(f);
+}
+
+static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE txfm_size) {
+ vp9_coeff_probs_model *coef_probs = cpi->frame_coef_probs[txfm_size];
+ vp9_coeff_count *coef_counts = cpi->coef_counts[txfm_size];
+ unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] =
+ cpi->common.fc.eob_branch_counts[txfm_size];
+ vp9_coeff_stats *coef_branch_ct = cpi->frame_branch_ct[txfm_size];
+ vp9_prob full_probs[ENTROPY_NODES];
+ int i, j, k, l;
+
+ for (i = 0; i < BLOCK_TYPES; ++i) {
+ for (j = 0; j < REF_TYPES; ++j) {
+ for (k = 0; k < COEF_BANDS; ++k) {
+ for (l = 0; l < PREV_COEF_CONTEXTS; ++l) {
+ if (l >= 3 && k == 0)
+ continue;
+ vp9_tree_probs_from_distribution(vp9_coef_tree,
+ full_probs,
+ coef_branch_ct[i][j][k][l],
+ coef_counts[i][j][k][l], 0);
+ vpx_memcpy(coef_probs[i][j][k][l], full_probs,
+ sizeof(vp9_prob) * UNCONSTRAINED_NODES);
+#if CONFIG_BALANCED_COEFTREE
+ coef_branch_ct[i][j][k][l][1][1] = eob_branch_ct[i][j][k][l] -
+ coef_branch_ct[i][j][k][l][1][0];
+ coef_probs[i][j][k][l][1] =
+ get_binary_prob(coef_branch_ct[i][j][k][l][1][0],
+ coef_branch_ct[i][j][k][l][1][1]);
+#else
+ coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] -
+ coef_branch_ct[i][j][k][l][0][0];
+ coef_probs[i][j][k][l][0] =
+ get_binary_prob(coef_branch_ct[i][j][k][l][0][0],
+ coef_branch_ct[i][j][k][l][0][1]);
+#endif
+#ifdef ENTROPY_STATS
+ if (!cpi->dummy_packing) {
+ int t;
+ for (t = 0; t < MAX_ENTROPY_TOKENS; ++t)
+ context_counters[txfm_size][i][j][k][l][t] +=
+ coef_counts[i][j][k][l][t];
+ context_counters[txfm_size][i][j][k][l][MAX_ENTROPY_TOKENS] +=
+ eob_branch_ct[i][j][k][l];
+ }
+#endif
+ }
+ }
+ }
+ }
+}
+
+static void build_coeff_contexts(VP9_COMP *cpi) {
+ TX_SIZE t;
+ for (t = TX_4X4; t <= TX_32X32; t++)
+ build_tree_distribution(cpi, t);
+}
+
+static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi,
+ TX_SIZE tx_size) {
+ vp9_coeff_probs_model *new_frame_coef_probs = cpi->frame_coef_probs[tx_size];
+ vp9_coeff_probs_model *old_frame_coef_probs =
+ cpi->common.fc.coef_probs[tx_size];
+ vp9_coeff_stats *frame_branch_ct = cpi->frame_branch_ct[tx_size];
+ int i, j, k, l, t;
+ int update[2] = {0, 0};
+ int savings;
+
+ const int entropy_nodes_update = UNCONSTRAINED_NODES;
+
+ const int tstart = 0;
+ /* dry run to see if there is any udpate at all needed */
+ savings = 0;
+ for (i = 0; i < BLOCK_TYPES; ++i) {
+ for (j = 0; j < REF_TYPES; ++j) {
+ for (k = 0; k < COEF_BANDS; ++k) {
+ // int prev_coef_savings[ENTROPY_NODES] = {0};
+ for (l = 0; l < PREV_COEF_CONTEXTS; ++l) {
+ for (t = tstart; t < entropy_nodes_update; ++t) {
+ vp9_prob newp = new_frame_coef_probs[i][j][k][l][t];
+ const vp9_prob oldp = old_frame_coef_probs[i][j][k][l][t];
+ const vp9_prob upd = VP9_COEF_UPDATE_PROB;
+ int s;
+ int u = 0;
+
+ if (l >= 3 && k == 0)
+ continue;
+ if (t == PIVOT_NODE)
+ s = prob_diff_update_savings_search_model(
+ frame_branch_ct[i][j][k][l][0],
+ old_frame_coef_probs[i][j][k][l], &newp, upd, i, j);
+ else
+ s = prob_diff_update_savings_search(
+ frame_branch_ct[i][j][k][l][t], oldp, &newp, upd);
+ if (s > 0 && newp != oldp)
+ u = 1;
+ if (u)
+ savings += s - (int)(vp9_cost_zero(upd));
+ else
+ savings -= (int)(vp9_cost_zero(upd));
+ update[u]++;
+ }
+ }
+ }
+ }
+ }
+
+ // printf("Update %d %d, savings %d\n", update[0], update[1], savings);
+ /* Is coef updated at all */
+ if (update[1] == 0 || savings < 0) {
+ vp9_write_bit(bc, 0);
+ return;
+ }
+ vp9_write_bit(bc, 1);
+ for (i = 0; i < BLOCK_TYPES; ++i) {
+ for (j = 0; j < REF_TYPES; ++j) {
+ for (k = 0; k < COEF_BANDS; ++k) {
+ // int prev_coef_savings[ENTROPY_NODES] = {0};
+ for (l = 0; l < PREV_COEF_CONTEXTS; ++l) {
+ // calc probs and branch cts for this frame only
+ for (t = tstart; t < entropy_nodes_update; ++t) {
+ vp9_prob newp = new_frame_coef_probs[i][j][k][l][t];
+ vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t;
+ const vp9_prob upd = VP9_COEF_UPDATE_PROB;
+ int s;
+ int u = 0;
+ if (l >= 3 && k == 0)
+ continue;
+ if (t == PIVOT_NODE)
+ s = prob_diff_update_savings_search_model(
+ frame_branch_ct[i][j][k][l][0],
+ old_frame_coef_probs[i][j][k][l], &newp, upd, i, j);
+ else
+ s = prob_diff_update_savings_search(
+ frame_branch_ct[i][j][k][l][t],
+ *oldp, &newp, upd);
+ if (s > 0 && newp != *oldp)
+ u = 1;
+ vp9_write(bc, u, upd);
+#ifdef ENTROPY_STATS
+ if (!cpi->dummy_packing)
+ ++tree_update_hist[tx_size][i][j][k][l][t][u];
+#endif
+ if (u) {
+ /* send/use new probability */
+ write_prob_diff_update(bc, newp, *oldp);
+ *oldp = newp;
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+static void update_coef_probs(VP9_COMP* const cpi, vp9_writer* const bc) {
+ const TXFM_MODE txfm_mode = cpi->common.txfm_mode;
+
+ vp9_clear_system_state();
+
+ // Build the cofficient contexts based on counts collected in encode loop
+ build_coeff_contexts(cpi);
+
+ update_coef_probs_common(bc, cpi, TX_4X4);
+
+ // do not do this if not even allowed
+ if (txfm_mode > ONLY_4X4)
+ update_coef_probs_common(bc, cpi, TX_8X8);
+
+ if (txfm_mode > ALLOW_8X8)
+ update_coef_probs_common(bc, cpi, TX_16X16);
+
+ if (txfm_mode > ALLOW_16X16)
+ update_coef_probs_common(bc, cpi, TX_32X32);
+}
+
+static void encode_loopfilter(VP9_COMMON *pc, MACROBLOCKD *xd,
+ struct vp9_write_bit_buffer *wb) {
+ int i;
+
+ // Encode the loop filter level and type
+ vp9_wb_write_literal(wb, pc->filter_level, 6);
+ vp9_wb_write_literal(wb, pc->sharpness_level, 3);
+
+ // Write out loop filter deltas applied at the MB level based on mode or
+ // ref frame (if they are enabled).
+ vp9_wb_write_bit(wb, xd->mode_ref_lf_delta_enabled);
+
+ if (xd->mode_ref_lf_delta_enabled) {
+ // Do the deltas need to be updated
+ vp9_wb_write_bit(wb, xd->mode_ref_lf_delta_update);
+ if (xd->mode_ref_lf_delta_update) {
+ // Send update
+ for (i = 0; i < MAX_REF_LF_DELTAS; i++) {
+ const int delta = xd->ref_lf_deltas[i];
+
+ // Frame level data
+ if (delta != xd->last_ref_lf_deltas[i]) {
+ xd->last_ref_lf_deltas[i] = delta;
+ vp9_wb_write_bit(wb, 1);
+
+ assert(delta != 0);
+ vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6);
+ vp9_wb_write_bit(wb, delta < 0);
+ } else {
+ vp9_wb_write_bit(wb, 0);
+ }
+ }
+
+ // Send update
+ for (i = 0; i < MAX_MODE_LF_DELTAS; i++) {
+ const int delta = xd->mode_lf_deltas[i];
+ if (delta != xd->last_mode_lf_deltas[i]) {
+ xd->last_mode_lf_deltas[i] = delta;
+ vp9_wb_write_bit(wb, 1);
+
+ assert(delta != 0);
+ vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6);
+ vp9_wb_write_bit(wb, delta < 0);
+ } else {
+ vp9_wb_write_bit(wb, 0);
+ }
+ }
+ }
+ }
+}
+
+static void write_delta_q(struct vp9_write_bit_buffer *wb, int delta_q) {
+ if (delta_q != 0) {
+ vp9_wb_write_bit(wb, 1);
+ vp9_wb_write_literal(wb, abs(delta_q), 4);
+ vp9_wb_write_bit(wb, delta_q < 0);
+ } else {
+ vp9_wb_write_bit(wb, 0);
+ }
+}
+
+static void encode_quantization(VP9_COMMON *cm,
+ struct vp9_write_bit_buffer *wb) {
+ vp9_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS);
+ write_delta_q(wb, cm->y_dc_delta_q);
+ write_delta_q(wb, cm->uv_dc_delta_q);
+ write_delta_q(wb, cm->uv_ac_delta_q);
+}
+
+
+static void encode_segmentation(VP9_COMP *cpi,
+ struct vp9_write_bit_buffer *wb) {
+ int i, j;
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+
+ vp9_wb_write_bit(wb, xd->segmentation_enabled);
+ if (!xd->segmentation_enabled)
+ return;
+
+ // Segmentation map
+ vp9_wb_write_bit(wb, xd->update_mb_segmentation_map);
+ if (xd->update_mb_segmentation_map) {
+ // Select the coding strategy (temporal or spatial)
+ vp9_choose_segmap_coding_method(cpi);
+ // Write out probabilities used to decode unpredicted macro-block segments
+ for (i = 0; i < MB_SEG_TREE_PROBS; i++) {
+ const int prob = xd->mb_segment_tree_probs[i];
+ const int update = prob != MAX_PROB;
+ vp9_wb_write_bit(wb, update);
+ if (update)
+ vp9_wb_write_literal(wb, prob, 8);
+ }
+
+ // Write out the chosen coding method.
+ vp9_wb_write_bit(wb, cm->temporal_update);
+ if (cm->temporal_update) {
+ for (i = 0; i < PREDICTION_PROBS; i++) {
+ const int prob = cm->segment_pred_probs[i];
+ const int update = prob != MAX_PROB;
+ vp9_wb_write_bit(wb, update);
+ if (update)
+ vp9_wb_write_literal(wb, prob, 8);
+ }
+ }
+ }
+
+ // Segmentation data
+ vp9_wb_write_bit(wb, xd->update_mb_segmentation_data);
+ if (xd->update_mb_segmentation_data) {
+ vp9_wb_write_bit(wb, xd->mb_segment_abs_delta);
+
+ for (i = 0; i < MAX_MB_SEGMENTS; i++) {
+ for (j = 0; j < SEG_LVL_MAX; j++) {
+ const int active = vp9_segfeature_active(xd, i, j);
+ vp9_wb_write_bit(wb, active);
+ if (active) {
+ const int data = vp9_get_segdata(xd, i, j);
+ const int data_max = vp9_seg_feature_data_max(j);
+
+ if (vp9_is_segfeature_signed(j)) {
+ vp9_encode_unsigned_max(wb, abs(data), data_max);
+ vp9_wb_write_bit(wb, data < 0);
+ } else {
+ vp9_encode_unsigned_max(wb, data, data_max);
+ }
+ }
+ }
+ }
+ }
+}
+
+
+static void encode_txfm_probs(VP9_COMP *cpi, vp9_writer *w) {
+ VP9_COMMON *const cm = &cpi->common;
+
+ // Mode
+ vp9_write_literal(w, MIN(cm->txfm_mode, ALLOW_32X32), 2);
+ if (cm->txfm_mode >= ALLOW_32X32)
+ vp9_write_bit(w, cm->txfm_mode == TX_MODE_SELECT);
+
+ // Probabilities
+ if (cm->txfm_mode == TX_MODE_SELECT) {
+ int i, j;
+ unsigned int ct_8x8p[TX_SIZE_MAX_SB - 3][2];
+ unsigned int ct_16x16p[TX_SIZE_MAX_SB - 2][2];
+ unsigned int ct_32x32p[TX_SIZE_MAX_SB - 1][2];
+
+
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
+ tx_counts_to_branch_counts_8x8(cm->fc.tx_count_8x8p[i],
+ ct_8x8p);
+ for (j = 0; j < TX_SIZE_MAX_SB - 3; j++) {
+ vp9_cond_prob_diff_update(w, &cm->fc.tx_probs_8x8p[i][j],
+ VP9_MODE_UPDATE_PROB, ct_8x8p[j]);
+ }
+ }
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
+ tx_counts_to_branch_counts_16x16(cm->fc.tx_count_16x16p[i],
+ ct_16x16p);
+ for (j = 0; j < TX_SIZE_MAX_SB - 2; j++) {
+ vp9_cond_prob_diff_update(w, &cm->fc.tx_probs_16x16p[i][j],
+ VP9_MODE_UPDATE_PROB, ct_16x16p[j]);
+ }
+ }
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
+ tx_counts_to_branch_counts_32x32(cm->fc.tx_count_32x32p[i],
+ ct_32x32p);
+ for (j = 0; j < TX_SIZE_MAX_SB - 1; j++) {
+ vp9_cond_prob_diff_update(w, &cm->fc.tx_probs_32x32p[i][j],
+ VP9_MODE_UPDATE_PROB, ct_32x32p[j]);
+ }
+ }
+#ifdef MODE_STATS
+ if (!cpi->dummy_packing)
+ update_tx_count_stats(cm);
+#endif
+ }
+}
+
+static void write_interp_filter_type(INTERPOLATIONFILTERTYPE type,
+ struct vp9_write_bit_buffer *wb) {
+ vp9_wb_write_bit(wb, type == SWITCHABLE);
+ if (type != SWITCHABLE)
+ vp9_wb_write_literal(wb, type, 2);
+}
+
+static void fix_mcomp_filter_type(VP9_COMP *cpi) {
+ VP9_COMMON *const cm = &cpi->common;
+
+ if (cm->mcomp_filter_type == SWITCHABLE) {
+ // Check to see if only one of the filters is actually used
+ int count[VP9_SWITCHABLE_FILTERS];
+ int i, j, c = 0;
+ for (i = 0; i < VP9_SWITCHABLE_FILTERS; ++i) {
+ count[i] = 0;
+ for (j = 0; j <= VP9_SWITCHABLE_FILTERS; ++j)
+ count[i] += cm->fc.switchable_interp_count[j][i];
+ c += (count[i] > 0);
+ }
+ if (c == 1) {
+ // Only one filter is used. So set the filter at frame level
+ for (i = 0; i < VP9_SWITCHABLE_FILTERS; ++i) {
+ if (count[i]) {
+ cm->mcomp_filter_type = vp9_switchable_interp[i];
+ break;
+ }
+ }
+ }
+ }
+}
+
+static void write_tile_info(VP9_COMMON *cm, struct vp9_write_bit_buffer *wb) {
+ int min_log2_tiles, delta_log2_tiles, n_tile_bits, n;
+ vp9_get_tile_n_bits(cm, &min_log2_tiles, &delta_log2_tiles);
+ n_tile_bits = cm->log2_tile_columns - min_log2_tiles;
+ for (n = 0; n < delta_log2_tiles; n++) {
+ if (n_tile_bits--) {
+ vp9_wb_write_bit(wb, 1);
+ } else {
+ vp9_wb_write_bit(wb, 0);
+ break;
+ }
+ }
+
+ vp9_wb_write_bit(wb, cm->log2_tile_rows != 0);
+ if (cm->log2_tile_rows != 0)
+ vp9_wb_write_bit(wb, cm->log2_tile_rows != 1);
+}
+
+static int get_refresh_mask(VP9_COMP *cpi) {
+ // Should the GF or ARF be updated using the transmitted frame or buffer
+#if CONFIG_MULTIPLE_ARF
+ if (!cpi->multi_arf_enabled && cpi->refresh_golden_frame &&
+ !cpi->refresh_alt_ref_frame) {
+#else
+ if (cpi->refresh_golden_frame && !cpi->refresh_alt_ref_frame) {
+#endif
+ // Preserve the previously existing golden frame and update the frame in
+ // the alt ref slot instead. This is highly specific to the use of
+ // alt-ref as a forward reference, and this needs to be generalized as
+ // other uses are implemented (like RTC/temporal scaling)
+ //
+ // gld_fb_idx and alt_fb_idx need to be swapped for future frames, but
+ // that happens in vp9_onyx_if.c:update_reference_frames() so that it can
+ // be done outside of the recode loop.
+ return (cpi->refresh_last_frame << cpi->lst_fb_idx) |
+ (cpi->refresh_golden_frame << cpi->alt_fb_idx);
+ } else {
+ int arf_idx = cpi->alt_fb_idx;
+#if CONFIG_MULTIPLE_ARF
+ // Determine which ARF buffer to use to encode this ARF frame.
+ if (cpi->multi_arf_enabled) {
+ int sn = cpi->sequence_number;
+ arf_idx = (cpi->frame_coding_order[sn] < 0) ?
+ cpi->arf_buffer_idx[sn + 1] :
+ cpi->arf_buffer_idx[sn];
+ }
+#endif
+ return (cpi->refresh_last_frame << cpi->lst_fb_idx) |
+ (cpi->refresh_golden_frame << cpi->gld_fb_idx) |
+ (cpi->refresh_alt_ref_frame << arf_idx);
+ }
+}
+
+static void write_display_size(VP9_COMP *cpi, struct vp9_write_bit_buffer *wb) {
+ VP9_COMMON *const cm = &cpi->common;
+
+ const int scaling_active = cm->width != cm->display_width ||
+ cm->height != cm->display_height;
+ vp9_wb_write_bit(wb, scaling_active);
+ if (scaling_active) {
+ vp9_wb_write_literal(wb, cm->display_width - 1, 16);
+ vp9_wb_write_literal(wb, cm->display_height - 1, 16);
+ }
+}
+
+static void write_frame_size(VP9_COMP *cpi,
+ struct vp9_write_bit_buffer *wb) {
+ VP9_COMMON *const cm = &cpi->common;
+ vp9_wb_write_literal(wb, cm->width - 1, 16);
+ vp9_wb_write_literal(wb, cm->height - 1, 16);
+
+ write_display_size(cpi, wb);
+}
+
+static void write_frame_size_with_refs(VP9_COMP *cpi,
+ struct vp9_write_bit_buffer *wb) {
+ VP9_COMMON *const cm = &cpi->common;
+ int refs[ALLOWED_REFS_PER_FRAME] = {cpi->lst_fb_idx, cpi->gld_fb_idx,
+ cpi->alt_fb_idx};
+ int i, found = 0;
+
+ for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) {
+ YV12_BUFFER_CONFIG *cfg = &cm->yv12_fb[cm->ref_frame_map[refs[i]]];
+ found = cm->width == cfg->y_crop_width &&
+ cm->height == cfg->y_crop_height;
+ vp9_wb_write_bit(wb, found);
+ if (found)
+ break;
+ }
+
+ if (!found) {
+ vp9_wb_write_literal(wb, cm->width - 1, 16);
+ vp9_wb_write_literal(wb, cm->height - 1, 16);
+ }
+
+ write_display_size(cpi, wb);
+}
+
+static void write_sync_code(struct vp9_write_bit_buffer *wb) {
+ vp9_wb_write_literal(wb, SYNC_CODE_0, 8);
+ vp9_wb_write_literal(wb, SYNC_CODE_1, 8);
+ vp9_wb_write_literal(wb, SYNC_CODE_2, 8);
+}
+
+static void write_uncompressed_header(VP9_COMP *cpi,
+ struct vp9_write_bit_buffer *wb) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+
+ // frame marker bits
+ vp9_wb_write_literal(wb, 0x2, 2);
+
+ // bitstream version.
+ // 00 - profile 0. 4:2:0 only
+ // 10 - profile 1. adds 4:4:4, 4:2:2, alpha
+ vp9_wb_write_bit(wb, cm->version);
+ vp9_wb_write_bit(wb, 0);
+
+ vp9_wb_write_bit(wb, 0);
+ vp9_wb_write_bit(wb, cm->frame_type);
+ vp9_wb_write_bit(wb, cm->show_frame);
+ vp9_wb_write_bit(wb, cm->error_resilient_mode);
+
+ if (cm->frame_type == KEY_FRAME) {
+ write_sync_code(wb);
+ // colorspaces
+ // 000 - Unknown
+ // 001 - BT.601
+ // 010 - BT.709
+ // 011 - SMPTE-170
+ // 100 - SMPTE-240
+ // 101 - Reserved
+ // 110 - Reserved
+ // 111 - sRGB (RGB)
+ vp9_wb_write_literal(wb, 0, 3);
+ if (1 /* colorspace != sRGB */) {
+ vp9_wb_write_bit(wb, 0); // 0: [16, 235] (i.e. xvYCC), 1: [0, 255]
+ if (cm->version == 1) {
+ vp9_wb_write_bit(wb, cm->subsampling_x);
+ vp9_wb_write_bit(wb, cm->subsampling_y);
+ vp9_wb_write_bit(wb, 0); // has extra plane
+ }
+ } else {
+ assert(cm->version == 1);
+ vp9_wb_write_bit(wb, 0); // has extra plane
+ }
+
+ write_frame_size(cpi, wb);
+ } else {
+ const int refs[ALLOWED_REFS_PER_FRAME] = {cpi->lst_fb_idx, cpi->gld_fb_idx,
+ cpi->alt_fb_idx};
+ if (!cm->show_frame)
+ vp9_wb_write_bit(wb, cm->intra_only);
+
+ if (!cm->error_resilient_mode)
+ vp9_wb_write_literal(wb, cm->reset_frame_context, 2);
+
+ if (cm->intra_only) {
+ write_sync_code(wb);
+
+ vp9_wb_write_literal(wb, get_refresh_mask(cpi), NUM_REF_FRAMES);
+ write_frame_size(cpi, wb);
+ } else {
+ int i;
+ vp9_wb_write_literal(wb, get_refresh_mask(cpi), NUM_REF_FRAMES);
+ for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) {
+ vp9_wb_write_literal(wb, refs[i], NUM_REF_FRAMES_LG2);
+ vp9_wb_write_bit(wb, cm->ref_frame_sign_bias[LAST_FRAME + i]);
+ }
+
+ write_frame_size_with_refs(cpi, wb);
+
+ vp9_wb_write_bit(wb, xd->allow_high_precision_mv);
+
+ fix_mcomp_filter_type(cpi);
+ write_interp_filter_type(cm->mcomp_filter_type, wb);
+ }
+ }
+
+ if (!cm->error_resilient_mode) {
+ vp9_wb_write_bit(wb, cm->refresh_frame_context);
+ vp9_wb_write_bit(wb, cm->frame_parallel_decoding_mode);
+ }
+
+ vp9_wb_write_literal(wb, cm->frame_context_idx, NUM_FRAME_CONTEXTS_LG2);
+
+ encode_loopfilter(cm, xd, wb);
+ encode_quantization(cm, wb);
+ encode_segmentation(cpi, wb);
+
+ write_tile_info(cm, wb);
+}
+
+void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) {
+ int i, bytes_packed;
+ VP9_COMMON *const pc = &cpi->common;
+ vp9_writer header_bc, residual_bc;
+ MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+
+ uint8_t *cx_data = dest;
+ struct vp9_write_bit_buffer wb = {dest, 0};
+ struct vp9_write_bit_buffer first_partition_size_wb;
+
+ write_uncompressed_header(cpi, &wb);
+ first_partition_size_wb = wb;
+ vp9_wb_write_literal(&wb, 0, 16); // don't know in advance first part. size
+
+ bytes_packed = vp9_rb_bytes_written(&wb);
+ cx_data += bytes_packed;
+
+ compute_update_table();
+
+ vp9_start_encode(&header_bc, cx_data);
+
+#ifdef ENTROPY_STATS
+ if (pc->frame_type == INTER_FRAME)
+ active_section = 0;
+ else
+ active_section = 7;
+#endif
+
+ vp9_clear_system_state(); // __asm emms;
+
+ vp9_copy(pc->fc.pre_coef_probs, pc->fc.coef_probs);
+ vp9_copy(pc->fc.pre_y_mode_prob, pc->fc.y_mode_prob);
+ vp9_copy(pc->fc.pre_uv_mode_prob, pc->fc.uv_mode_prob);
+ vp9_copy(pc->fc.pre_partition_prob, pc->fc.partition_prob[INTER_FRAME]);
+ pc->fc.pre_nmvc = pc->fc.nmvc;
+ vp9_copy(pc->fc.pre_switchable_interp_prob, pc->fc.switchable_interp_prob);
+ vp9_copy(pc->fc.pre_inter_mode_probs, pc->fc.inter_mode_probs);
+ vp9_copy(pc->fc.pre_intra_inter_prob, pc->fc.intra_inter_prob);
+ vp9_copy(pc->fc.pre_comp_inter_prob, pc->fc.comp_inter_prob);
+ vp9_copy(pc->fc.pre_comp_ref_prob, pc->fc.comp_ref_prob);
+ vp9_copy(pc->fc.pre_single_ref_prob, pc->fc.single_ref_prob);
+ vp9_copy(pc->fc.pre_tx_probs_8x8p, pc->fc.tx_probs_8x8p);
+ vp9_copy(pc->fc.pre_tx_probs_16x16p, pc->fc.tx_probs_16x16p);
+ vp9_copy(pc->fc.pre_tx_probs_32x32p, pc->fc.tx_probs_32x32p);
+ vp9_copy(pc->fc.pre_mbskip_probs, pc->fc.mbskip_probs);
+
+ if (xd->lossless) {
+ pc->txfm_mode = ONLY_4X4;
+ } else {
+ encode_txfm_probs(cpi, &header_bc);
+ }
+
+ update_coef_probs(cpi, &header_bc);
+
+#ifdef ENTROPY_STATS
+ active_section = 2;
+#endif
+
+ vp9_update_skip_probs(cpi, &header_bc);
+
+ if (pc->frame_type != KEY_FRAME) {
+#ifdef ENTROPY_STATS
+ active_section = 1;
+#endif
+
+ update_inter_mode_probs(pc, &header_bc);
+ vp9_zero(cpi->common.fc.inter_mode_counts);
+
+ if (pc->mcomp_filter_type == SWITCHABLE)
+ update_switchable_interp_probs(cpi, &header_bc);
+
+ for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
+ vp9_cond_prob_diff_update(&header_bc, &pc->fc.intra_inter_prob[i],
+ VP9_MODE_UPDATE_PROB,
+ cpi->intra_inter_count[i]);
+
+ if (pc->allow_comp_inter_inter) {
+ const int comp_pred_mode = cpi->common.comp_pred_mode;
+ const int use_compound_pred = (comp_pred_mode != SINGLE_PREDICTION_ONLY);
+ const int use_hybrid_pred = (comp_pred_mode == HYBRID_PREDICTION);
+
+ vp9_write_bit(&header_bc, use_compound_pred);
+ if (use_compound_pred) {
+ vp9_write_bit(&header_bc, use_hybrid_pred);
+ if (use_hybrid_pred) {
+ for (i = 0; i < COMP_INTER_CONTEXTS; i++)
+ vp9_cond_prob_diff_update(&header_bc, &pc->fc.comp_inter_prob[i],
+ VP9_MODE_UPDATE_PROB,
+ cpi->comp_inter_count[i]);
+ }
+ }
+ }
+
+ if (pc->comp_pred_mode != COMP_PREDICTION_ONLY) {
+ for (i = 0; i < REF_CONTEXTS; i++) {
+ vp9_cond_prob_diff_update(&header_bc, &pc->fc.single_ref_prob[i][0],
+ VP9_MODE_UPDATE_PROB,
+ cpi->single_ref_count[i][0]);
+ vp9_cond_prob_diff_update(&header_bc, &pc->fc.single_ref_prob[i][1],
+ VP9_MODE_UPDATE_PROB,
+ cpi->single_ref_count[i][1]);
+ }
+ }
+
+ if (pc->comp_pred_mode != SINGLE_PREDICTION_ONLY) {
+ for (i = 0; i < REF_CONTEXTS; i++)
+ vp9_cond_prob_diff_update(&header_bc, &pc->fc.comp_ref_prob[i],
+ VP9_MODE_UPDATE_PROB,
+ cpi->comp_ref_count[i]);
+ }
+
+ update_mbintra_mode_probs(cpi, &header_bc);
+
+ for (i = 0; i < NUM_PARTITION_CONTEXTS; ++i) {
+ vp9_prob Pnew[PARTITION_TYPES - 1];
+ unsigned int bct[PARTITION_TYPES - 1][2];
+ update_mode(&header_bc, PARTITION_TYPES, vp9_partition_encodings,
+ vp9_partition_tree, Pnew,
+ pc->fc.partition_prob[pc->frame_type][i], bct,
+ (unsigned int *)cpi->partition_count[i]);
+ }
+
+ vp9_write_nmv_probs(cpi, xd->allow_high_precision_mv, &header_bc);
+ }
+
+
+ vp9_stop_encode(&header_bc);
+
+
+ // first partition size
+ assert(header_bc.pos <= 0xffff);
+ vp9_wb_write_literal(&first_partition_size_wb, header_bc.pos, 16);
+ *size = bytes_packed + header_bc.pos;
+
+ {
+ int tile_row, tile_col, total_size = 0;
+ unsigned char *data_ptr = cx_data + header_bc.pos;
+ TOKENEXTRA *tok[4][1 << 6], *tok_end;
+
+ vpx_memset(cpi->common.above_seg_context, 0, sizeof(PARTITION_CONTEXT) *
+ mi_cols_aligned_to_sb(&cpi->common));
+ tok[0][0] = cpi->tok;
+ for (tile_row = 0; tile_row < pc->tile_rows; tile_row++) {
+ if (tile_row) {
+ tok[tile_row][0] = tok[tile_row - 1][pc->tile_columns - 1] +
+ cpi->tok_count[tile_row - 1][pc->tile_columns - 1];
+ }
+ for (tile_col = 1; tile_col < pc->tile_columns; tile_col++) {
+ tok[tile_row][tile_col] = tok[tile_row][tile_col - 1] +
+ cpi->tok_count[tile_row][tile_col - 1];
+ }
+ }
+
+ for (tile_row = 0; tile_row < pc->tile_rows; tile_row++) {
+ vp9_get_tile_row_offsets(pc, tile_row);
+ for (tile_col = 0; tile_col < pc->tile_columns; tile_col++) {
+ vp9_get_tile_col_offsets(pc, tile_col);
+ tok_end = tok[tile_row][tile_col] + cpi->tok_count[tile_row][tile_col];
+
+ if (tile_col < pc->tile_columns - 1 || tile_row < pc->tile_rows - 1)
+ vp9_start_encode(&residual_bc, data_ptr + total_size + 4);
+ else
+ vp9_start_encode(&residual_bc, data_ptr + total_size);
+ write_modes(cpi, &residual_bc, &tok[tile_row][tile_col], tok_end);
+ assert(tok[tile_row][tile_col] == tok_end);
+ vp9_stop_encode(&residual_bc);
+ if (tile_col < pc->tile_columns - 1 || tile_row < pc->tile_rows - 1) {
+ // size of this tile
+ write_be32(data_ptr + total_size, residual_bc.pos);
+ total_size += 4;
+ }
+
+ total_size += residual_bc.pos;
+ }
+ }
+
+ *size += total_size;
+ }
+}
+
+#ifdef ENTROPY_STATS
+static void print_tree_update_for_type(FILE *f,
+ vp9_coeff_stats *tree_update_hist,
+ int block_types, const char *header) {
+ int i, j, k, l, m;
+
+ fprintf(f, "const vp9_coeff_prob %s = {\n", header);
+ for (i = 0; i < block_types; i++) {
+ fprintf(f, " { \n");
+ for (j = 0; j < REF_TYPES; j++) {
+ fprintf(f, " { \n");
+ for (k = 0; k < COEF_BANDS; k++) {
+ fprintf(f, " {\n");
+ for (l = 0; l < PREV_COEF_CONTEXTS; l++) {
+ fprintf(f, " {");
+ for (m = 0; m < ENTROPY_NODES; m++) {
+ fprintf(f, "%3d, ",
+ get_binary_prob(tree_update_hist[i][j][k][l][m][0],
+ tree_update_hist[i][j][k][l][m][1]));
+ }
+ fprintf(f, "},\n");
+ }
+ fprintf(f, "},\n");
+ }
+ fprintf(f, " },\n");
+ }
+ fprintf(f, " },\n");
+ }
+ fprintf(f, "};\n");
+}
+
+void print_tree_update_probs() {
+ FILE *f = fopen("coefupdprob.h", "w");
+ fprintf(f, "\n/* Update probabilities for token entropy tree. */\n\n");
+
+ print_tree_update_for_type(f, tree_update_hist[TX_4X4], BLOCK_TYPES,
+ "vp9_coef_update_probs_4x4[BLOCK_TYPES]");
+ print_tree_update_for_type(f, tree_update_hist[TX_8X8], BLOCK_TYPES,
+ "vp9_coef_update_probs_8x8[BLOCK_TYPES]");
+ print_tree_update_for_type(f, tree_update_hist[TX_16X16], BLOCK_TYPES,
+ "vp9_coef_update_probs_16x16[BLOCK_TYPES]");
+ print_tree_update_for_type(f, tree_update_hist[TX_32X32], BLOCK_TYPES,
+ "vp9_coef_update_probs_32x32[BLOCK_TYPES]");
+
+ fclose(f);
+ f = fopen("treeupdate.bin", "wb");
+ fwrite(tree_update_hist, sizeof(tree_update_hist), 1, f);
+ fclose(f);
+}
+#endif
diff --git a/libvpx/vp9/encoder/vp9_bitstream.h b/libvpx/vp9/encoder/vp9_bitstream.h
new file mode 100644
index 0000000..b3dbee1
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_bitstream.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_VP9_BITSTREAM_H_
+#define VP9_ENCODER_VP9_BITSTREAM_H_
+
+void vp9_update_skip_probs(VP9_COMP *cpi, vp9_writer *bc);
+
+#endif // VP9_ENCODER_VP9_BITSTREAM_H_
diff --git a/libvpx/vp9/encoder/vp9_block.h b/libvpx/vp9/encoder/vp9_block.h
new file mode 100644
index 0000000..59cc3d9
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_block.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_ENCODER_VP9_BLOCK_H_
+#define VP9_ENCODER_VP9_BLOCK_H_
+
+#include "vp9/common/vp9_onyx.h"
+#include "vp9/common/vp9_entropymv.h"
+#include "vp9/common/vp9_entropy.h"
+#include "vpx_ports/mem.h"
+#include "vp9/common/vp9_onyxc_int.h"
+
+// motion search site
+typedef struct {
+ MV mv;
+ int offset;
+} search_site;
+
+typedef struct {
+ int count;
+ struct {
+ MB_PREDICTION_MODE mode;
+ int_mv mv;
+ int_mv second_mv;
+ } bmi[4];
+} PARTITION_INFO;
+
+// Structure to hold snapshot of coding context during the mode picking process
+// TODO Do we need all of these?
+typedef struct {
+ MODE_INFO mic;
+ PARTITION_INFO partition_info;
+ int skip;
+ int_mv best_ref_mv;
+ int_mv second_best_ref_mv;
+ int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES];
+ int rate;
+ int distortion;
+ int64_t intra_error;
+ int best_mode_index;
+ int rddiv;
+ int rdmult;
+ int hybrid_pred_diff;
+ int comp_pred_diff;
+ int single_pred_diff;
+ int64_t txfm_rd_diff[NB_TXFM_MODES];
+
+ // Bit flag for each mode whether it has high error in comparison to others.
+ unsigned int modes_with_high_error;
+
+ // Bit flag for each ref frame whether it has high error compared to others.
+ unsigned int frames_with_high_error;
+} PICK_MODE_CONTEXT;
+
+struct macroblock_plane {
+ DECLARE_ALIGNED(16, int16_t, src_diff[64*64]);
+ DECLARE_ALIGNED(16, int16_t, coeff[64*64]);
+ struct buf_2d src;
+
+ // Quantizer setings
+ int16_t *quant;
+ uint8_t *quant_shift;
+ int16_t *zbin;
+ int16_t *zrun_zbin_boost;
+ int16_t *round;
+
+ // Zbin Over Quant value
+ int16_t zbin_extra;
+};
+
+typedef struct macroblock MACROBLOCK;
+struct macroblock {
+ struct macroblock_plane plane[MAX_MB_PLANE];
+
+ MACROBLOCKD e_mbd;
+ int skip_block;
+ PARTITION_INFO *partition_info; /* work pointer */
+ PARTITION_INFO *pi; /* Corresponds to upper left visible macroblock */
+ PARTITION_INFO *pip; /* Base of allocated array */
+
+ search_site *ss;
+ int ss_count;
+ int searches_per_step;
+
+ int errorperbit;
+ int sadperbit16;
+ int sadperbit4;
+ int rddiv;
+ int rdmult;
+ unsigned int *mb_activity_ptr;
+ int *mb_norm_activity_ptr;
+ signed int act_zbin_adj;
+
+ int mv_best_ref_index[MAX_REF_FRAMES];
+
+ int nmvjointcost[MV_JOINTS];
+ int nmvcosts[2][MV_VALS];
+ int *nmvcost[2];
+ int nmvcosts_hp[2][MV_VALS];
+ int *nmvcost_hp[2];
+ int **mvcost;
+
+ int nmvjointsadcost[MV_JOINTS];
+ int nmvsadcosts[2][MV_VALS];
+ int *nmvsadcost[2];
+ int nmvsadcosts_hp[2][MV_VALS];
+ int *nmvsadcost_hp[2];
+ int **mvsadcost;
+
+ int mbmode_cost[MB_MODE_COUNT];
+ int intra_uv_mode_cost[2][MB_MODE_COUNT];
+ int y_mode_costs[VP9_INTRA_MODES][VP9_INTRA_MODES][VP9_INTRA_MODES];
+ int switchable_interp_costs[VP9_SWITCHABLE_FILTERS + 1]
+ [VP9_SWITCHABLE_FILTERS];
+
+ // These define limits to motion vector components to prevent them
+ // from extending outside the UMV borders
+ int mv_col_min;
+ int mv_col_max;
+ int mv_row_min;
+ int mv_row_max;
+
+ int skip;
+
+ int encode_breakout;
+
+ unsigned char *active_ptr;
+
+ // note that token_costs is the cost when eob node is skipped
+ vp9_coeff_count token_costs[TX_SIZE_MAX_SB][BLOCK_TYPES];
+ vp9_coeff_count token_costs_noskip[TX_SIZE_MAX_SB][BLOCK_TYPES];
+
+ int optimize;
+
+ // indicate if it is in the rd search loop or encoding process
+ int rd_search;
+
+ // TODO(jingning): Need to refactor the structure arrays that buffers the
+ // coding mode decisions of each partition type.
+ PICK_MODE_CONTEXT ab4x4_context[4][4][4];
+ PICK_MODE_CONTEXT sb8x4_context[4][4][4];
+ PICK_MODE_CONTEXT sb4x8_context[4][4][4];
+ PICK_MODE_CONTEXT sb8x8_context[4][4][4];
+ PICK_MODE_CONTEXT sb8x16_context[4][4][2];
+ PICK_MODE_CONTEXT sb16x8_context[4][4][2];
+ PICK_MODE_CONTEXT mb_context[4][4];
+ PICK_MODE_CONTEXT sb32x16_context[4][2];
+ PICK_MODE_CONTEXT sb16x32_context[4][2];
+ // when 4 MBs share coding parameters:
+ PICK_MODE_CONTEXT sb32_context[4];
+ PICK_MODE_CONTEXT sb32x64_context[2];
+ PICK_MODE_CONTEXT sb64x32_context[2];
+ PICK_MODE_CONTEXT sb64_context;
+ int partition_cost[NUM_PARTITION_CONTEXTS][PARTITION_TYPES];
+
+ BLOCK_SIZE_TYPE b_partitioning[4][4][4];
+ BLOCK_SIZE_TYPE mb_partitioning[4][4];
+ BLOCK_SIZE_TYPE sb_partitioning[4];
+ BLOCK_SIZE_TYPE sb64_partitioning;
+
+ void (*fwd_txm4x4)(int16_t *input, int16_t *output, int pitch);
+ void (*fwd_txm8x4)(int16_t *input, int16_t *output, int pitch);
+ void (*fwd_txm8x8)(int16_t *input, int16_t *output, int pitch);
+ void (*fwd_txm16x16)(int16_t *input, int16_t *output, int pitch);
+ void (*quantize_b_4x4)(MACROBLOCK *x, int b_idx, TX_TYPE tx_type,
+ int y_blocks);
+};
+
+#endif // VP9_ENCODER_VP9_BLOCK_H_
diff --git a/libvpx/vp9/encoder/vp9_boolhuff.c b/libvpx/vp9/encoder/vp9_boolhuff.c
new file mode 100644
index 0000000..0f1aa59
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_boolhuff.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include "vp9/encoder/vp9_boolhuff.h"
+#include "vp9/common/vp9_entropy.h"
+
+#if defined(SECTIONBITS_OUTPUT)
+unsigned __int64 Sectionbits[500];
+
+#endif
+
+#ifdef ENTROPY_STATS
+unsigned int active_section = 0;
+#endif
+
+const unsigned int vp9_prob_cost[256] = {
+ 2047, 2047, 1791, 1641, 1535, 1452, 1385, 1328, 1279, 1235, 1196, 1161, 1129, 1099, 1072, 1046,
+ 1023, 1000, 979, 959, 940, 922, 905, 889, 873, 858, 843, 829, 816, 803, 790, 778,
+ 767, 755, 744, 733, 723, 713, 703, 693, 684, 675, 666, 657, 649, 641, 633, 625,
+ 617, 609, 602, 594, 587, 580, 573, 567, 560, 553, 547, 541, 534, 528, 522, 516,
+ 511, 505, 499, 494, 488, 483, 477, 472, 467, 462, 457, 452, 447, 442, 437, 433,
+ 428, 424, 419, 415, 410, 406, 401, 397, 393, 389, 385, 381, 377, 373, 369, 365,
+ 361, 357, 353, 349, 346, 342, 338, 335, 331, 328, 324, 321, 317, 314, 311, 307,
+ 304, 301, 297, 294, 291, 288, 285, 281, 278, 275, 272, 269, 266, 263, 260, 257,
+ 255, 252, 249, 246, 243, 240, 238, 235, 232, 229, 227, 224, 221, 219, 216, 214,
+ 211, 208, 206, 203, 201, 198, 196, 194, 191, 189, 186, 184, 181, 179, 177, 174,
+ 172, 170, 168, 165, 163, 161, 159, 156, 154, 152, 150, 148, 145, 143, 141, 139,
+ 137, 135, 133, 131, 129, 127, 125, 123, 121, 119, 117, 115, 113, 111, 109, 107,
+ 105, 103, 101, 99, 97, 95, 93, 92, 90, 88, 86, 84, 82, 81, 79, 77,
+ 75, 73, 72, 70, 68, 66, 65, 63, 61, 60, 58, 56, 55, 53, 51, 50,
+ 48, 46, 45, 43, 41, 40, 38, 37, 35, 33, 32, 30, 29, 27, 25, 24,
+ 22, 21, 19, 18, 16, 15, 13, 12, 10, 9, 7, 6, 4, 3, 1, 1
+};
+
+void vp9_start_encode(vp9_writer *br, uint8_t *source) {
+ br->lowvalue = 0;
+ br->range = 255;
+ br->value = 0;
+ br->count = -24;
+ br->buffer = source;
+ br->pos = 0;
+ vp9_write_bit(br, 0);
+}
+
+void vp9_stop_encode(vp9_writer *br) {
+ int i;
+
+ for (i = 0; i < 32; i++)
+ vp9_write_bit(br, 0);
+
+ // Ensure there's no ambigous collision with any index marker bytes
+ if ((br->buffer[br->pos - 1] & 0xe0) == 0xc0)
+ br->buffer[br->pos++] = 0;
+}
+
diff --git a/libvpx/vp9/encoder/vp9_boolhuff.h b/libvpx/vp9/encoder/vp9_boolhuff.h
new file mode 100644
index 0000000..c3f340d
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_boolhuff.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/****************************************************************************
+*
+* Module Title : vp9_boolhuff.h
+*
+* Description : Bool Coder header file.
+*
+****************************************************************************/
+#ifndef VP9_ENCODER_VP9_BOOLHUFF_H_
+#define VP9_ENCODER_VP9_BOOLHUFF_H_
+
+#include "vpx_ports/mem.h"
+
+typedef struct {
+ unsigned int lowvalue;
+ unsigned int range;
+ unsigned int value;
+ int count;
+ unsigned int pos;
+ uint8_t *buffer;
+
+ // Variables used to track bit costs without outputing to the bitstream
+ unsigned int measure_cost;
+ unsigned long bit_counter;
+} vp9_writer;
+
+extern const unsigned int vp9_prob_cost[256];
+
+void vp9_start_encode(vp9_writer *bc, uint8_t *buffer);
+void vp9_stop_encode(vp9_writer *bc);
+
+DECLARE_ALIGNED(16, extern const unsigned char, vp9_norm[256]);
+
+static void vp9_write(vp9_writer *br, int bit, int probability) {
+ unsigned int split;
+ int count = br->count;
+ unsigned int range = br->range;
+ unsigned int lowvalue = br->lowvalue;
+ register unsigned int shift;
+
+#ifdef ENTROPY_STATS
+#if defined(SECTIONBITS_OUTPUT)
+
+ if (bit)
+ Sectionbits[active_section] += vp9_prob_cost[255 - probability];
+ else
+ Sectionbits[active_section] += vp9_prob_cost[probability];
+
+#endif
+#endif
+
+ split = 1 + (((range - 1) * probability) >> 8);
+
+ range = split;
+
+ if (bit) {
+ lowvalue += split;
+ range = br->range - split;
+ }
+
+ shift = vp9_norm[range];
+
+ range <<= shift;
+ count += shift;
+
+ if (count >= 0) {
+ int offset = shift - count;
+
+ if ((lowvalue << (offset - 1)) & 0x80000000) {
+ int x = br->pos - 1;
+
+ while (x >= 0 && br->buffer[x] == 0xff) {
+ br->buffer[x] = 0;
+ x--;
+ }
+
+ br->buffer[x] += 1;
+ }
+
+ br->buffer[br->pos++] = (lowvalue >> (24 - offset));
+ lowvalue <<= offset;
+ shift = count;
+ lowvalue &= 0xffffff;
+ count -= 8;
+ }
+
+ lowvalue <<= shift;
+ br->count = count;
+ br->lowvalue = lowvalue;
+ br->range = range;
+}
+
+static void vp9_write_bit(vp9_writer *w, int bit) {
+ vp9_write(w, bit, 128); // vp9_prob_half
+}
+
+static void vp9_write_literal(vp9_writer *w, int data, int bits) {
+ int bit;
+
+ for (bit = bits - 1; bit >= 0; bit--)
+ vp9_write_bit(w, 1 & (data >> bit));
+}
+
+
+#endif // VP9_ENCODER_VP9_BOOLHUFF_H_
diff --git a/libvpx/vp9/encoder/vp9_dct.c b/libvpx/vp9/encoder/vp9_dct.c
new file mode 100644
index 0000000..a90bcf5
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_dct.c
@@ -0,0 +1,1381 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include <assert.h>
+#include <math.h>
+#include "./vpx_config.h"
+#include "vp9/common/vp9_systemdependent.h"
+
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/common/vp9_idct.h"
+
+static void fdct4_1d(int16_t *input, int16_t *output) {
+ int16_t step[4];
+ int temp1, temp2;
+
+ step[0] = input[0] + input[3];
+ step[1] = input[1] + input[2];
+ step[2] = input[1] - input[2];
+ step[3] = input[0] - input[3];
+
+ temp1 = (step[0] + step[1]) * cospi_16_64;
+ temp2 = (step[0] - step[1]) * cospi_16_64;
+ output[0] = dct_const_round_shift(temp1);
+ output[2] = dct_const_round_shift(temp2);
+ temp1 = step[2] * cospi_24_64 + step[3] * cospi_8_64;
+ temp2 = -step[2] * cospi_8_64 + step[3] * cospi_24_64;
+ output[1] = dct_const_round_shift(temp1);
+ output[3] = dct_const_round_shift(temp2);
+}
+
+void vp9_short_fdct4x4_c(int16_t *input, int16_t *output, int pitch) {
+ // The 2D transform is done with two passes which are actually pretty
+ // similar. In the first one, we transform the columns and transpose
+ // the results. In the second one, we transform the rows. To achieve that,
+ // as the first pass results are transposed, we tranpose the columns (that
+ // is the transposed rows) and transpose the results (so that it goes back
+ // in normal/row positions).
+ const int stride = pitch >> 1;
+ int pass;
+ // We need an intermediate buffer between passes.
+ int16_t intermediate[4 * 4];
+ int16_t *in = input;
+ int16_t *out = intermediate;
+ // Do the two transform/transpose passes
+ for (pass = 0; pass < 2; ++pass) {
+ /*canbe16*/ int input[4];
+ /*canbe16*/ int step[4];
+ /*needs32*/ int temp1, temp2;
+ int i;
+ for (i = 0; i < 4; ++i) {
+ // Load inputs.
+ if (0 == pass) {
+ input[0] = in[0 * stride] << 4;
+ input[1] = in[1 * stride] << 4;
+ input[2] = in[2 * stride] << 4;
+ input[3] = in[3 * stride] << 4;
+ if (i == 0 && input[0]) {
+ input[0] += 1;
+ }
+ } else {
+ input[0] = in[0 * 4];
+ input[1] = in[1 * 4];
+ input[2] = in[2 * 4];
+ input[3] = in[3 * 4];
+ }
+ // Transform.
+ step[0] = input[0] + input[3];
+ step[1] = input[1] + input[2];
+ step[2] = input[1] - input[2];
+ step[3] = input[0] - input[3];
+ temp1 = (step[0] + step[1]) * cospi_16_64;
+ temp2 = (step[0] - step[1]) * cospi_16_64;
+ out[0] = dct_const_round_shift(temp1);
+ out[2] = dct_const_round_shift(temp2);
+ temp1 = step[2] * cospi_24_64 + step[3] * cospi_8_64;
+ temp2 = -step[2] * cospi_8_64 + step[3] * cospi_24_64;
+ out[1] = dct_const_round_shift(temp1);
+ out[3] = dct_const_round_shift(temp2);
+ // Do next column (which is a transposed row in second/horizontal pass)
+ in++;
+ out += 4;
+ }
+ // Setup in/out for next pass.
+ in = intermediate;
+ out = output;
+ }
+
+ {
+ int i, j;
+ for (i = 0; i < 4; ++i) {
+ for (j = 0; j < 4; ++j)
+ output[j + i * 4] = (output[j + i * 4] + 1) >> 2;
+ }
+ }
+}
+
+static void fadst4_1d(int16_t *input, int16_t *output) {
+ int x0, x1, x2, x3;
+ int s0, s1, s2, s3, s4, s5, s6, s7;
+
+ x0 = input[0];
+ x1 = input[1];
+ x2 = input[2];
+ x3 = input[3];
+
+ if (!(x0 | x1 | x2 | x3)) {
+ output[0] = output[1] = output[2] = output[3] = 0;
+ return;
+ }
+
+ s0 = sinpi_1_9 * x0;
+ s1 = sinpi_4_9 * x0;
+ s2 = sinpi_2_9 * x1;
+ s3 = sinpi_1_9 * x1;
+ s4 = sinpi_3_9 * x2;
+ s5 = sinpi_4_9 * x3;
+ s6 = sinpi_2_9 * x3;
+ s7 = x0 + x1 - x3;
+
+ x0 = s0 + s2 + s5;
+ x1 = sinpi_3_9 * s7;
+ x2 = s1 - s3 + s6;
+ x3 = s4;
+
+ s0 = x0 + x3;
+ s1 = x1;
+ s2 = x2 - x3;
+ s3 = x2 - x0 + x3;
+
+ // 1-D transform scaling factor is sqrt(2).
+ output[0] = dct_const_round_shift(s0);
+ output[1] = dct_const_round_shift(s1);
+ output[2] = dct_const_round_shift(s2);
+ output[3] = dct_const_round_shift(s3);
+}
+
+static const transform_2d FHT_4[] = {
+ { fdct4_1d, fdct4_1d }, // DCT_DCT = 0
+ { fadst4_1d, fdct4_1d }, // ADST_DCT = 1
+ { fdct4_1d, fadst4_1d }, // DCT_ADST = 2
+ { fadst4_1d, fadst4_1d } // ADST_ADST = 3
+};
+
+void vp9_short_fht4x4_c(int16_t *input, int16_t *output,
+ int pitch, TX_TYPE tx_type) {
+ int16_t out[4 * 4];
+ int16_t *outptr = &out[0];
+ int i, j;
+ int16_t temp_in[4], temp_out[4];
+ const transform_2d ht = FHT_4[tx_type];
+
+ // Columns
+ for (i = 0; i < 4; ++i) {
+ for (j = 0; j < 4; ++j)
+ temp_in[j] = input[j * pitch + i] << 4;
+ if (i == 0 && temp_in[0])
+ temp_in[0] += 1;
+ ht.cols(temp_in, temp_out);
+ for (j = 0; j < 4; ++j)
+ outptr[j * 4 + i] = temp_out[j];
+ }
+
+ // Rows
+ for (i = 0; i < 4; ++i) {
+ for (j = 0; j < 4; ++j)
+ temp_in[j] = out[j + i * 4];
+ ht.rows(temp_in, temp_out);
+ for (j = 0; j < 4; ++j)
+ output[j + i * 4] = (temp_out[j] + 1) >> 2;
+ }
+}
+
+void vp9_short_fdct8x4_c(int16_t *input, int16_t *output, int pitch) {
+ vp9_short_fdct4x4_c(input, output, pitch);
+ vp9_short_fdct4x4_c(input + 4, output + 16, pitch);
+}
+
+static void fdct8_1d(int16_t *input, int16_t *output) {
+ /*canbe16*/ int s0, s1, s2, s3, s4, s5, s6, s7;
+ /*needs32*/ int t0, t1, t2, t3;
+ /*canbe16*/ int x0, x1, x2, x3;
+
+ // stage 1
+ s0 = input[0] + input[7];
+ s1 = input[1] + input[6];
+ s2 = input[2] + input[5];
+ s3 = input[3] + input[4];
+ s4 = input[3] - input[4];
+ s5 = input[2] - input[5];
+ s6 = input[1] - input[6];
+ s7 = input[0] - input[7];
+
+ // fdct4_1d(step, step);
+ x0 = s0 + s3;
+ x1 = s1 + s2;
+ x2 = s1 - s2;
+ x3 = s0 - s3;
+ t0 = (x0 + x1) * cospi_16_64;
+ t1 = (x0 - x1) * cospi_16_64;
+ t2 = x2 * cospi_24_64 + x3 * cospi_8_64;
+ t3 = -x2 * cospi_8_64 + x3 * cospi_24_64;
+ output[0] = dct_const_round_shift(t0);
+ output[2] = dct_const_round_shift(t2);
+ output[4] = dct_const_round_shift(t1);
+ output[6] = dct_const_round_shift(t3);
+
+ // Stage 2
+ t0 = (s6 - s5) * cospi_16_64;
+ t1 = (s6 + s5) * cospi_16_64;
+ t2 = dct_const_round_shift(t0);
+ t3 = dct_const_round_shift(t1);
+
+ // Stage 3
+ x0 = s4 + t2;
+ x1 = s4 - t2;
+ x2 = s7 - t3;
+ x3 = s7 + t3;
+
+ // Stage 4
+ t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
+ t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
+ t2 = x2 * cospi_12_64 + x1 * -cospi_20_64;
+ t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
+ output[1] = dct_const_round_shift(t0);
+ output[3] = dct_const_round_shift(t2);
+ output[5] = dct_const_round_shift(t1);
+ output[7] = dct_const_round_shift(t3);
+}
+
+void vp9_short_fdct8x8_c(int16_t *input, int16_t *final_output, int pitch) {
+ const int stride = pitch >> 1;
+ int i, j;
+ int16_t intermediate[64];
+
+ // Transform columns
+ {
+ int16_t *output = intermediate;
+ /*canbe16*/ int s0, s1, s2, s3, s4, s5, s6, s7;
+ /*needs32*/ int t0, t1, t2, t3;
+ /*canbe16*/ int x0, x1, x2, x3;
+
+ int i;
+ for (i = 0; i < 8; i++) {
+ // stage 1
+ s0 = (input[0 * stride] + input[7 * stride]) << 2;
+ s1 = (input[1 * stride] + input[6 * stride]) << 2;
+ s2 = (input[2 * stride] + input[5 * stride]) << 2;
+ s3 = (input[3 * stride] + input[4 * stride]) << 2;
+ s4 = (input[3 * stride] - input[4 * stride]) << 2;
+ s5 = (input[2 * stride] - input[5 * stride]) << 2;
+ s6 = (input[1 * stride] - input[6 * stride]) << 2;
+ s7 = (input[0 * stride] - input[7 * stride]) << 2;
+
+ // fdct4_1d(step, step);
+ x0 = s0 + s3;
+ x1 = s1 + s2;
+ x2 = s1 - s2;
+ x3 = s0 - s3;
+ t0 = (x0 + x1) * cospi_16_64;
+ t1 = (x0 - x1) * cospi_16_64;
+ t2 = x2 * cospi_24_64 + x3 * cospi_8_64;
+ t3 = -x2 * cospi_8_64 + x3 * cospi_24_64;
+ output[0 * 8] = dct_const_round_shift(t0);
+ output[2 * 8] = dct_const_round_shift(t2);
+ output[4 * 8] = dct_const_round_shift(t1);
+ output[6 * 8] = dct_const_round_shift(t3);
+
+ // Stage 2
+ t0 = (s6 - s5) * cospi_16_64;
+ t1 = (s6 + s5) * cospi_16_64;
+ t2 = dct_const_round_shift(t0);
+ t3 = dct_const_round_shift(t1);
+
+ // Stage 3
+ x0 = s4 + t2;
+ x1 = s4 - t2;
+ x2 = s7 - t3;
+ x3 = s7 + t3;
+
+ // Stage 4
+ t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
+ t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
+ t2 = x2 * cospi_12_64 + x1 * -cospi_20_64;
+ t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
+ output[1 * 8] = dct_const_round_shift(t0);
+ output[3 * 8] = dct_const_round_shift(t2);
+ output[5 * 8] = dct_const_round_shift(t1);
+ output[7 * 8] = dct_const_round_shift(t3);
+ input++;
+ output++;
+ }
+ }
+
+ // Rows
+ for (i = 0; i < 8; ++i) {
+ fdct8_1d(&intermediate[i * 8], &final_output[i * 8]);
+ for (j = 0; j < 8; ++j)
+ final_output[j + i * 8] /= 2;
+ }
+}
+
+void vp9_short_fdct16x16_c(int16_t *input, int16_t *output, int pitch) {
+ // The 2D transform is done with two passes which are actually pretty
+ // similar. In the first one, we transform the columns and transpose
+ // the results. In the second one, we transform the rows. To achieve that,
+ // as the first pass results are transposed, we tranpose the columns (that
+ // is the transposed rows) and transpose the results (so that it goes back
+ // in normal/row positions).
+ const int stride = pitch >> 1;
+ int pass;
+ // We need an intermediate buffer between passes.
+ int16_t intermediate[256];
+ int16_t *in = input;
+ int16_t *out = intermediate;
+ // Do the two transform/transpose passes
+ for (pass = 0; pass < 2; ++pass) {
+ /*canbe16*/ int step1[8];
+ /*canbe16*/ int step2[8];
+ /*canbe16*/ int step3[8];
+ /*canbe16*/ int input[8];
+ /*needs32*/ int temp1, temp2;
+ int i;
+ for (i = 0; i < 16; i++) {
+ if (0 == pass) {
+ // Calculate input for the first 8 results.
+ input[0] = (in[0 * stride] + in[15 * stride]) << 2;
+ input[1] = (in[1 * stride] + in[14 * stride]) << 2;
+ input[2] = (in[2 * stride] + in[13 * stride]) << 2;
+ input[3] = (in[3 * stride] + in[12 * stride]) << 2;
+ input[4] = (in[4 * stride] + in[11 * stride]) << 2;
+ input[5] = (in[5 * stride] + in[10 * stride]) << 2;
+ input[6] = (in[6 * stride] + in[ 9 * stride]) << 2;
+ input[7] = (in[7 * stride] + in[ 8 * stride]) << 2;
+ // Calculate input for the next 8 results.
+ step1[0] = (in[7 * stride] - in[ 8 * stride]) << 2;
+ step1[1] = (in[6 * stride] - in[ 9 * stride]) << 2;
+ step1[2] = (in[5 * stride] - in[10 * stride]) << 2;
+ step1[3] = (in[4 * stride] - in[11 * stride]) << 2;
+ step1[4] = (in[3 * stride] - in[12 * stride]) << 2;
+ step1[5] = (in[2 * stride] - in[13 * stride]) << 2;
+ step1[6] = (in[1 * stride] - in[14 * stride]) << 2;
+ step1[7] = (in[0 * stride] - in[15 * stride]) << 2;
+ } else {
+ // Calculate input for the first 8 results.
+ input[0] = ((in[0 * 16] + 1) >> 2) + ((in[15 * 16] + 1) >> 2);
+ input[1] = ((in[1 * 16] + 1) >> 2) + ((in[14 * 16] + 1) >> 2);
+ input[2] = ((in[2 * 16] + 1) >> 2) + ((in[13 * 16] + 1) >> 2);
+ input[3] = ((in[3 * 16] + 1) >> 2) + ((in[12 * 16] + 1) >> 2);
+ input[4] = ((in[4 * 16] + 1) >> 2) + ((in[11 * 16] + 1) >> 2);
+ input[5] = ((in[5 * 16] + 1) >> 2) + ((in[10 * 16] + 1) >> 2);
+ input[6] = ((in[6 * 16] + 1) >> 2) + ((in[ 9 * 16] + 1) >> 2);
+ input[7] = ((in[7 * 16] + 1) >> 2) + ((in[ 8 * 16] + 1) >> 2);
+ // Calculate input for the next 8 results.
+ step1[0] = ((in[7 * 16] + 1) >> 2) - ((in[ 8 * 16] + 1) >> 2);
+ step1[1] = ((in[6 * 16] + 1) >> 2) - ((in[ 9 * 16] + 1) >> 2);
+ step1[2] = ((in[5 * 16] + 1) >> 2) - ((in[10 * 16] + 1) >> 2);
+ step1[3] = ((in[4 * 16] + 1) >> 2) - ((in[11 * 16] + 1) >> 2);
+ step1[4] = ((in[3 * 16] + 1) >> 2) - ((in[12 * 16] + 1) >> 2);
+ step1[5] = ((in[2 * 16] + 1) >> 2) - ((in[13 * 16] + 1) >> 2);
+ step1[6] = ((in[1 * 16] + 1) >> 2) - ((in[14 * 16] + 1) >> 2);
+ step1[7] = ((in[0 * 16] + 1) >> 2) - ((in[15 * 16] + 1) >> 2);
+ }
+ // Work on the first eight values; fdct8_1d(input, even_results);
+ {
+ /*canbe16*/ int s0, s1, s2, s3, s4, s5, s6, s7;
+ /*needs32*/ int t0, t1, t2, t3;
+ /*canbe16*/ int x0, x1, x2, x3;
+
+ // stage 1
+ s0 = input[0] + input[7];
+ s1 = input[1] + input[6];
+ s2 = input[2] + input[5];
+ s3 = input[3] + input[4];
+ s4 = input[3] - input[4];
+ s5 = input[2] - input[5];
+ s6 = input[1] - input[6];
+ s7 = input[0] - input[7];
+
+ // fdct4_1d(step, step);
+ x0 = s0 + s3;
+ x1 = s1 + s2;
+ x2 = s1 - s2;
+ x3 = s0 - s3;
+ t0 = (x0 + x1) * cospi_16_64;
+ t1 = (x0 - x1) * cospi_16_64;
+ t2 = x3 * cospi_8_64 + x2 * cospi_24_64;
+ t3 = x3 * cospi_24_64 - x2 * cospi_8_64;
+ out[0] = dct_const_round_shift(t0);
+ out[4] = dct_const_round_shift(t2);
+ out[8] = dct_const_round_shift(t1);
+ out[12] = dct_const_round_shift(t3);
+
+ // Stage 2
+ t0 = (s6 - s5) * cospi_16_64;
+ t1 = (s6 + s5) * cospi_16_64;
+ t2 = dct_const_round_shift(t0);
+ t3 = dct_const_round_shift(t1);
+
+ // Stage 3
+ x0 = s4 + t2;
+ x1 = s4 - t2;
+ x2 = s7 - t3;
+ x3 = s7 + t3;
+
+ // Stage 4
+ t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
+ t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
+ t2 = x2 * cospi_12_64 + x1 * -cospi_20_64;
+ t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
+ out[2] = dct_const_round_shift(t0);
+ out[6] = dct_const_round_shift(t2);
+ out[10] = dct_const_round_shift(t1);
+ out[14] = dct_const_round_shift(t3);
+ }
+ // Work on the next eight values; step1 -> odd_results
+ {
+ // step 2
+ temp1 = (step1[5] - step1[2]) * cospi_16_64;
+ temp2 = (step1[4] - step1[3]) * cospi_16_64;
+ step2[2] = dct_const_round_shift(temp1);
+ step2[3] = dct_const_round_shift(temp2);
+ temp1 = (step1[4] + step1[3]) * cospi_16_64;
+ temp2 = (step1[5] + step1[2]) * cospi_16_64;
+ step2[4] = dct_const_round_shift(temp1);
+ step2[5] = dct_const_round_shift(temp2);
+ // step 3
+ step3[0] = step1[0] + step2[3];
+ step3[1] = step1[1] + step2[2];
+ step3[2] = step1[1] - step2[2];
+ step3[3] = step1[0] - step2[3];
+ step3[4] = step1[7] - step2[4];
+ step3[5] = step1[6] - step2[5];
+ step3[6] = step1[6] + step2[5];
+ step3[7] = step1[7] + step2[4];
+ // step 4
+ temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64;
+ temp2 = step3[2] * -cospi_24_64 - step3[5] * cospi_8_64;
+ step2[1] = dct_const_round_shift(temp1);
+ step2[2] = dct_const_round_shift(temp2);
+ temp1 = step3[2] * -cospi_8_64 + step3[5] * cospi_24_64;
+ temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64;
+ step2[5] = dct_const_round_shift(temp1);
+ step2[6] = dct_const_round_shift(temp2);
+ // step 5
+ step1[0] = step3[0] + step2[1];
+ step1[1] = step3[0] - step2[1];
+ step1[2] = step3[3] - step2[2];
+ step1[3] = step3[3] + step2[2];
+ step1[4] = step3[4] + step2[5];
+ step1[5] = step3[4] - step2[5];
+ step1[6] = step3[7] - step2[6];
+ step1[7] = step3[7] + step2[6];
+ // step 6
+ temp1 = step1[0] * cospi_30_64 + step1[7] * cospi_2_64;
+ temp2 = step1[1] * cospi_14_64 + step1[6] * cospi_18_64;
+ out[1] = dct_const_round_shift(temp1);
+ out[9] = dct_const_round_shift(temp2);
+ temp1 = step1[2] * cospi_22_64 + step1[5] * cospi_10_64;
+ temp2 = step1[3] * cospi_6_64 + step1[4] * cospi_26_64;
+ out[5] = dct_const_round_shift(temp1);
+ out[13] = dct_const_round_shift(temp2);
+ temp1 = step1[3] * -cospi_26_64 + step1[4] * cospi_6_64;
+ temp2 = step1[2] * -cospi_10_64 + step1[5] * cospi_22_64;
+ out[3] = dct_const_round_shift(temp1);
+ out[11] = dct_const_round_shift(temp2);
+ temp1 = step1[1] * -cospi_18_64 + step1[6] * cospi_14_64;
+ temp2 = step1[0] * -cospi_2_64 + step1[7] * cospi_30_64;
+ out[7] = dct_const_round_shift(temp1);
+ out[15] = dct_const_round_shift(temp2);
+ }
+ // Do next column (which is a transposed row in second/horizontal pass)
+ in++;
+ out += 16;
+ }
+ // Setup in/out for next pass.
+ in = intermediate;
+ out = output;
+ }
+}
+
+static void fadst8_1d(int16_t *input, int16_t *output) {
+ int s0, s1, s2, s3, s4, s5, s6, s7;
+
+ int x0 = input[7];
+ int x1 = input[0];
+ int x2 = input[5];
+ int x3 = input[2];
+ int x4 = input[3];
+ int x5 = input[4];
+ int x6 = input[1];
+ int x7 = input[6];
+
+ // stage 1
+ s0 = cospi_2_64 * x0 + cospi_30_64 * x1;
+ s1 = cospi_30_64 * x0 - cospi_2_64 * x1;
+ s2 = cospi_10_64 * x2 + cospi_22_64 * x3;
+ s3 = cospi_22_64 * x2 - cospi_10_64 * x3;
+ s4 = cospi_18_64 * x4 + cospi_14_64 * x5;
+ s5 = cospi_14_64 * x4 - cospi_18_64 * x5;
+ s6 = cospi_26_64 * x6 + cospi_6_64 * x7;
+ s7 = cospi_6_64 * x6 - cospi_26_64 * x7;
+
+ x0 = dct_const_round_shift(s0 + s4);
+ x1 = dct_const_round_shift(s1 + s5);
+ x2 = dct_const_round_shift(s2 + s6);
+ x3 = dct_const_round_shift(s3 + s7);
+ x4 = dct_const_round_shift(s0 - s4);
+ x5 = dct_const_round_shift(s1 - s5);
+ x6 = dct_const_round_shift(s2 - s6);
+ x7 = dct_const_round_shift(s3 - s7);
+
+ // stage 2
+ s0 = x0;
+ s1 = x1;
+ s2 = x2;
+ s3 = x3;
+ s4 = cospi_8_64 * x4 + cospi_24_64 * x5;
+ s5 = cospi_24_64 * x4 - cospi_8_64 * x5;
+ s6 = - cospi_24_64 * x6 + cospi_8_64 * x7;
+ s7 = cospi_8_64 * x6 + cospi_24_64 * x7;
+
+ x0 = s0 + s2;
+ x1 = s1 + s3;
+ x2 = s0 - s2;
+ x3 = s1 - s3;
+ x4 = dct_const_round_shift(s4 + s6);
+ x5 = dct_const_round_shift(s5 + s7);
+ x6 = dct_const_round_shift(s4 - s6);
+ x7 = dct_const_round_shift(s5 - s7);
+
+ // stage 3
+ s2 = cospi_16_64 * (x2 + x3);
+ s3 = cospi_16_64 * (x2 - x3);
+ s6 = cospi_16_64 * (x6 + x7);
+ s7 = cospi_16_64 * (x6 - x7);
+
+ x2 = dct_const_round_shift(s2);
+ x3 = dct_const_round_shift(s3);
+ x6 = dct_const_round_shift(s6);
+ x7 = dct_const_round_shift(s7);
+
+ output[0] = x0;
+ output[1] = - x4;
+ output[2] = x6;
+ output[3] = - x2;
+ output[4] = x3;
+ output[5] = - x7;
+ output[6] = x5;
+ output[7] = - x1;
+}
+
+static const transform_2d FHT_8[] = {
+ { fdct8_1d, fdct8_1d }, // DCT_DCT = 0
+ { fadst8_1d, fdct8_1d }, // ADST_DCT = 1
+ { fdct8_1d, fadst8_1d }, // DCT_ADST = 2
+ { fadst8_1d, fadst8_1d } // ADST_ADST = 3
+};
+
+void vp9_short_fht8x8_c(int16_t *input, int16_t *output,
+ int pitch, TX_TYPE tx_type) {
+ int16_t out[64];
+ int16_t *outptr = &out[0];
+ int i, j;
+ int16_t temp_in[8], temp_out[8];
+ const transform_2d ht = FHT_8[tx_type];
+
+ // Columns
+ for (i = 0; i < 8; ++i) {
+ for (j = 0; j < 8; ++j)
+ temp_in[j] = input[j * pitch + i] << 2;
+ ht.cols(temp_in, temp_out);
+ for (j = 0; j < 8; ++j)
+ outptr[j * 8 + i] = temp_out[j];
+ }
+
+ // Rows
+ for (i = 0; i < 8; ++i) {
+ for (j = 0; j < 8; ++j)
+ temp_in[j] = out[j + i * 8];
+ ht.rows(temp_in, temp_out);
+ for (j = 0; j < 8; ++j)
+ output[j + i * 8] = temp_out[j] >> 1;
+ }
+}
+
+/* 4-point reversible, orthonormal Walsh-Hadamard in 3.5 adds, 0.5 shifts per
+ pixel. */
+void vp9_short_walsh4x4_c(short *input, short *output, int pitch) {
+ int i;
+ int a1, b1, c1, d1, e1;
+ short *ip = input;
+ short *op = output;
+ int pitch_short = pitch >> 1;
+
+ for (i = 0; i < 4; i++) {
+ a1 = ip[0 * pitch_short];
+ b1 = ip[1 * pitch_short];
+ c1 = ip[2 * pitch_short];
+ d1 = ip[3 * pitch_short];
+
+ a1 += b1;
+ d1 = d1 - c1;
+ e1 = (a1 - d1) >> 1;
+ b1 = e1 - b1;
+ c1 = e1 - c1;
+ a1 -= c1;
+ d1 += b1;
+ op[0] = a1;
+ op[4] = c1;
+ op[8] = d1;
+ op[12] = b1;
+
+ ip++;
+ op++;
+ }
+ ip = output;
+ op = output;
+
+ for (i = 0; i < 4; i++) {
+ a1 = ip[0];
+ b1 = ip[1];
+ c1 = ip[2];
+ d1 = ip[3];
+
+ a1 += b1;
+ d1 -= c1;
+ e1 = (a1 - d1) >> 1;
+ b1 = e1 - b1;
+ c1 = e1 - c1;
+ a1 -= c1;
+ d1 += b1;
+ op[0] = a1 << WHT_UPSCALE_FACTOR;
+ op[1] = c1 << WHT_UPSCALE_FACTOR;
+ op[2] = d1 << WHT_UPSCALE_FACTOR;
+ op[3] = b1 << WHT_UPSCALE_FACTOR;
+
+ ip += 4;
+ op += 4;
+ }
+}
+
+void vp9_short_walsh8x4_c(short *input, short *output, int pitch) {
+ vp9_short_walsh4x4_c(input, output, pitch);
+ vp9_short_walsh4x4_c(input + 4, output + 16, pitch);
+}
+
+
+// Rewrote to use same algorithm as others.
+static void fdct16_1d(int16_t in[16], int16_t out[16]) {
+ /*canbe16*/ int step1[8];
+ /*canbe16*/ int step2[8];
+ /*canbe16*/ int step3[8];
+ /*canbe16*/ int input[8];
+ /*needs32*/ int temp1, temp2;
+
+ // step 1
+ input[0] = in[0] + in[15];
+ input[1] = in[1] + in[14];
+ input[2] = in[2] + in[13];
+ input[3] = in[3] + in[12];
+ input[4] = in[4] + in[11];
+ input[5] = in[5] + in[10];
+ input[6] = in[6] + in[ 9];
+ input[7] = in[7] + in[ 8];
+
+ step1[0] = in[7] - in[ 8];
+ step1[1] = in[6] - in[ 9];
+ step1[2] = in[5] - in[10];
+ step1[3] = in[4] - in[11];
+ step1[4] = in[3] - in[12];
+ step1[5] = in[2] - in[13];
+ step1[6] = in[1] - in[14];
+ step1[7] = in[0] - in[15];
+
+ // fdct8_1d(step, step);
+ {
+ /*canbe16*/ int s0, s1, s2, s3, s4, s5, s6, s7;
+ /*needs32*/ int t0, t1, t2, t3;
+ /*canbe16*/ int x0, x1, x2, x3;
+
+ // stage 1
+ s0 = input[0] + input[7];
+ s1 = input[1] + input[6];
+ s2 = input[2] + input[5];
+ s3 = input[3] + input[4];
+ s4 = input[3] - input[4];
+ s5 = input[2] - input[5];
+ s6 = input[1] - input[6];
+ s7 = input[0] - input[7];
+
+ // fdct4_1d(step, step);
+ x0 = s0 + s3;
+ x1 = s1 + s2;
+ x2 = s1 - s2;
+ x3 = s0 - s3;
+ t0 = (x0 + x1) * cospi_16_64;
+ t1 = (x0 - x1) * cospi_16_64;
+ t2 = x3 * cospi_8_64 + x2 * cospi_24_64;
+ t3 = x3 * cospi_24_64 - x2 * cospi_8_64;
+ out[0] = dct_const_round_shift(t0);
+ out[4] = dct_const_round_shift(t2);
+ out[8] = dct_const_round_shift(t1);
+ out[12] = dct_const_round_shift(t3);
+
+ // Stage 2
+ t0 = (s6 - s5) * cospi_16_64;
+ t1 = (s6 + s5) * cospi_16_64;
+ t2 = dct_const_round_shift(t0);
+ t3 = dct_const_round_shift(t1);
+
+ // Stage 3
+ x0 = s4 + t2;
+ x1 = s4 - t2;
+ x2 = s7 - t3;
+ x3 = s7 + t3;
+
+ // Stage 4
+ t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
+ t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
+ t2 = x2 * cospi_12_64 + x1 * -cospi_20_64;
+ t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
+ out[2] = dct_const_round_shift(t0);
+ out[6] = dct_const_round_shift(t2);
+ out[10] = dct_const_round_shift(t1);
+ out[14] = dct_const_round_shift(t3);
+ }
+
+ // step 2
+ temp1 = (step1[5] - step1[2]) * cospi_16_64;
+ temp2 = (step1[4] - step1[3]) * cospi_16_64;
+ step2[2] = dct_const_round_shift(temp1);
+ step2[3] = dct_const_round_shift(temp2);
+ temp1 = (step1[4] + step1[3]) * cospi_16_64;
+ temp2 = (step1[5] + step1[2]) * cospi_16_64;
+ step2[4] = dct_const_round_shift(temp1);
+ step2[5] = dct_const_round_shift(temp2);
+
+ // step 3
+ step3[0] = step1[0] + step2[3];
+ step3[1] = step1[1] + step2[2];
+ step3[2] = step1[1] - step2[2];
+ step3[3] = step1[0] - step2[3];
+ step3[4] = step1[7] - step2[4];
+ step3[5] = step1[6] - step2[5];
+ step3[6] = step1[6] + step2[5];
+ step3[7] = step1[7] + step2[4];
+
+ // step 4
+ temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64;
+ temp2 = step3[2] * -cospi_24_64 - step3[5] * cospi_8_64;
+ step2[1] = dct_const_round_shift(temp1);
+ step2[2] = dct_const_round_shift(temp2);
+ temp1 = step3[2] * -cospi_8_64 + step3[5] * cospi_24_64;
+ temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64;
+ step2[5] = dct_const_round_shift(temp1);
+ step2[6] = dct_const_round_shift(temp2);
+
+ // step 5
+ step1[0] = step3[0] + step2[1];
+ step1[1] = step3[0] - step2[1];
+ step1[2] = step3[3] - step2[2];
+ step1[3] = step3[3] + step2[2];
+ step1[4] = step3[4] + step2[5];
+ step1[5] = step3[4] - step2[5];
+ step1[6] = step3[7] - step2[6];
+ step1[7] = step3[7] + step2[6];
+
+ // step 6
+ temp1 = step1[0] * cospi_30_64 + step1[7] * cospi_2_64;
+ temp2 = step1[1] * cospi_14_64 + step1[6] * cospi_18_64;
+ out[1] = dct_const_round_shift(temp1);
+ out[9] = dct_const_round_shift(temp2);
+
+ temp1 = step1[2] * cospi_22_64 + step1[5] * cospi_10_64;
+ temp2 = step1[3] * cospi_6_64 + step1[4] * cospi_26_64;
+ out[5] = dct_const_round_shift(temp1);
+ out[13] = dct_const_round_shift(temp2);
+
+ temp1 = step1[3] * -cospi_26_64 + step1[4] * cospi_6_64;
+ temp2 = step1[2] * -cospi_10_64 + step1[5] * cospi_22_64;
+ out[3] = dct_const_round_shift(temp1);
+ out[11] = dct_const_round_shift(temp2);
+
+ temp1 = step1[1] * -cospi_18_64 + step1[6] * cospi_14_64;
+ temp2 = step1[0] * -cospi_2_64 + step1[7] * cospi_30_64;
+ out[7] = dct_const_round_shift(temp1);
+ out[15] = dct_const_round_shift(temp2);
+}
+
+void fadst16_1d(int16_t *input, int16_t *output) {
+ int s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15;
+
+ int x0 = input[15];
+ int x1 = input[0];
+ int x2 = input[13];
+ int x3 = input[2];
+ int x4 = input[11];
+ int x5 = input[4];
+ int x6 = input[9];
+ int x7 = input[6];
+ int x8 = input[7];
+ int x9 = input[8];
+ int x10 = input[5];
+ int x11 = input[10];
+ int x12 = input[3];
+ int x13 = input[12];
+ int x14 = input[1];
+ int x15 = input[14];
+
+ // stage 1
+ s0 = x0 * cospi_1_64 + x1 * cospi_31_64;
+ s1 = x0 * cospi_31_64 - x1 * cospi_1_64;
+ s2 = x2 * cospi_5_64 + x3 * cospi_27_64;
+ s3 = x2 * cospi_27_64 - x3 * cospi_5_64;
+ s4 = x4 * cospi_9_64 + x5 * cospi_23_64;
+ s5 = x4 * cospi_23_64 - x5 * cospi_9_64;
+ s6 = x6 * cospi_13_64 + x7 * cospi_19_64;
+ s7 = x6 * cospi_19_64 - x7 * cospi_13_64;
+ s8 = x8 * cospi_17_64 + x9 * cospi_15_64;
+ s9 = x8 * cospi_15_64 - x9 * cospi_17_64;
+ s10 = x10 * cospi_21_64 + x11 * cospi_11_64;
+ s11 = x10 * cospi_11_64 - x11 * cospi_21_64;
+ s12 = x12 * cospi_25_64 + x13 * cospi_7_64;
+ s13 = x12 * cospi_7_64 - x13 * cospi_25_64;
+ s14 = x14 * cospi_29_64 + x15 * cospi_3_64;
+ s15 = x14 * cospi_3_64 - x15 * cospi_29_64;
+
+ x0 = dct_const_round_shift(s0 + s8);
+ x1 = dct_const_round_shift(s1 + s9);
+ x2 = dct_const_round_shift(s2 + s10);
+ x3 = dct_const_round_shift(s3 + s11);
+ x4 = dct_const_round_shift(s4 + s12);
+ x5 = dct_const_round_shift(s5 + s13);
+ x6 = dct_const_round_shift(s6 + s14);
+ x7 = dct_const_round_shift(s7 + s15);
+ x8 = dct_const_round_shift(s0 - s8);
+ x9 = dct_const_round_shift(s1 - s9);
+ x10 = dct_const_round_shift(s2 - s10);
+ x11 = dct_const_round_shift(s3 - s11);
+ x12 = dct_const_round_shift(s4 - s12);
+ x13 = dct_const_round_shift(s5 - s13);
+ x14 = dct_const_round_shift(s6 - s14);
+ x15 = dct_const_round_shift(s7 - s15);
+
+ // stage 2
+ s0 = x0;
+ s1 = x1;
+ s2 = x2;
+ s3 = x3;
+ s4 = x4;
+ s5 = x5;
+ s6 = x6;
+ s7 = x7;
+ s8 = x8 * cospi_4_64 + x9 * cospi_28_64;
+ s9 = x8 * cospi_28_64 - x9 * cospi_4_64;
+ s10 = x10 * cospi_20_64 + x11 * cospi_12_64;
+ s11 = x10 * cospi_12_64 - x11 * cospi_20_64;
+ s12 = - x12 * cospi_28_64 + x13 * cospi_4_64;
+ s13 = x12 * cospi_4_64 + x13 * cospi_28_64;
+ s14 = - x14 * cospi_12_64 + x15 * cospi_20_64;
+ s15 = x14 * cospi_20_64 + x15 * cospi_12_64;
+
+ x0 = s0 + s4;
+ x1 = s1 + s5;
+ x2 = s2 + s6;
+ x3 = s3 + s7;
+ x4 = s0 - s4;
+ x5 = s1 - s5;
+ x6 = s2 - s6;
+ x7 = s3 - s7;
+ x8 = dct_const_round_shift(s8 + s12);
+ x9 = dct_const_round_shift(s9 + s13);
+ x10 = dct_const_round_shift(s10 + s14);
+ x11 = dct_const_round_shift(s11 + s15);
+ x12 = dct_const_round_shift(s8 - s12);
+ x13 = dct_const_round_shift(s9 - s13);
+ x14 = dct_const_round_shift(s10 - s14);
+ x15 = dct_const_round_shift(s11 - s15);
+
+ // stage 3
+ s0 = x0;
+ s1 = x1;
+ s2 = x2;
+ s3 = x3;
+ s4 = x4 * cospi_8_64 + x5 * cospi_24_64;
+ s5 = x4 * cospi_24_64 - x5 * cospi_8_64;
+ s6 = - x6 * cospi_24_64 + x7 * cospi_8_64;
+ s7 = x6 * cospi_8_64 + x7 * cospi_24_64;
+ s8 = x8;
+ s9 = x9;
+ s10 = x10;
+ s11 = x11;
+ s12 = x12 * cospi_8_64 + x13 * cospi_24_64;
+ s13 = x12 * cospi_24_64 - x13 * cospi_8_64;
+ s14 = - x14 * cospi_24_64 + x15 * cospi_8_64;
+ s15 = x14 * cospi_8_64 + x15 * cospi_24_64;
+
+ x0 = s0 + s2;
+ x1 = s1 + s3;
+ x2 = s0 - s2;
+ x3 = s1 - s3;
+ x4 = dct_const_round_shift(s4 + s6);
+ x5 = dct_const_round_shift(s5 + s7);
+ x6 = dct_const_round_shift(s4 - s6);
+ x7 = dct_const_round_shift(s5 - s7);
+ x8 = s8 + s10;
+ x9 = s9 + s11;
+ x10 = s8 - s10;
+ x11 = s9 - s11;
+ x12 = dct_const_round_shift(s12 + s14);
+ x13 = dct_const_round_shift(s13 + s15);
+ x14 = dct_const_round_shift(s12 - s14);
+ x15 = dct_const_round_shift(s13 - s15);
+
+ // stage 4
+ s2 = (- cospi_16_64) * (x2 + x3);
+ s3 = cospi_16_64 * (x2 - x3);
+ s6 = cospi_16_64 * (x6 + x7);
+ s7 = cospi_16_64 * (- x6 + x7);
+ s10 = cospi_16_64 * (x10 + x11);
+ s11 = cospi_16_64 * (- x10 + x11);
+ s14 = (- cospi_16_64) * (x14 + x15);
+ s15 = cospi_16_64 * (x14 - x15);
+
+ x2 = dct_const_round_shift(s2);
+ x3 = dct_const_round_shift(s3);
+ x6 = dct_const_round_shift(s6);
+ x7 = dct_const_round_shift(s7);
+ x10 = dct_const_round_shift(s10);
+ x11 = dct_const_round_shift(s11);
+ x14 = dct_const_round_shift(s14);
+ x15 = dct_const_round_shift(s15);
+
+ output[0] = x0;
+ output[1] = - x8;
+ output[2] = x12;
+ output[3] = - x4;
+ output[4] = x6;
+ output[5] = x14;
+ output[6] = x10;
+ output[7] = x2;
+ output[8] = x3;
+ output[9] = x11;
+ output[10] = x15;
+ output[11] = x7;
+ output[12] = x5;
+ output[13] = - x13;
+ output[14] = x9;
+ output[15] = - x1;
+}
+
+static const transform_2d FHT_16[] = {
+ { fdct16_1d, fdct16_1d }, // DCT_DCT = 0
+ { fadst16_1d, fdct16_1d }, // ADST_DCT = 1
+ { fdct16_1d, fadst16_1d }, // DCT_ADST = 2
+ { fadst16_1d, fadst16_1d } // ADST_ADST = 3
+};
+
+void vp9_short_fht16x16_c(int16_t *input, int16_t *output,
+ int pitch, TX_TYPE tx_type) {
+ int16_t out[256];
+ int16_t *outptr = &out[0];
+ int i, j;
+ int16_t temp_in[16], temp_out[16];
+ const transform_2d ht = FHT_16[tx_type];
+
+ // Columns
+ for (i = 0; i < 16; ++i) {
+ for (j = 0; j < 16; ++j)
+ temp_in[j] = input[j * pitch + i] << 2;
+ ht.cols(temp_in, temp_out);
+ for (j = 0; j < 16; ++j)
+ outptr[j * 16 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
+ }
+
+ // Rows
+ for (i = 0; i < 16; ++i) {
+ for (j = 0; j < 16; ++j)
+ temp_in[j] = out[j + i * 16];
+ ht.rows(temp_in, temp_out);
+ for (j = 0; j < 16; ++j)
+ output[j + i * 16] = temp_out[j];
+ }
+}
+
+static INLINE int dct_32_round(int input) {
+ int rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
+ assert(-131072 <= rv && rv <= 131071);
+ return rv;
+}
+
+static INLINE int half_round_shift(int input) {
+ int rv = (input + 1 + (input < 0)) >> 2;
+ return rv;
+}
+
+static void dct32_1d(int *input, int *output, int round) {
+ int step[32];
+ // Stage 1
+ step[0] = input[0] + input[(32 - 1)];
+ step[1] = input[1] + input[(32 - 2)];
+ step[2] = input[2] + input[(32 - 3)];
+ step[3] = input[3] + input[(32 - 4)];
+ step[4] = input[4] + input[(32 - 5)];
+ step[5] = input[5] + input[(32 - 6)];
+ step[6] = input[6] + input[(32 - 7)];
+ step[7] = input[7] + input[(32 - 8)];
+ step[8] = input[8] + input[(32 - 9)];
+ step[9] = input[9] + input[(32 - 10)];
+ step[10] = input[10] + input[(32 - 11)];
+ step[11] = input[11] + input[(32 - 12)];
+ step[12] = input[12] + input[(32 - 13)];
+ step[13] = input[13] + input[(32 - 14)];
+ step[14] = input[14] + input[(32 - 15)];
+ step[15] = input[15] + input[(32 - 16)];
+ step[16] = -input[16] + input[(32 - 17)];
+ step[17] = -input[17] + input[(32 - 18)];
+ step[18] = -input[18] + input[(32 - 19)];
+ step[19] = -input[19] + input[(32 - 20)];
+ step[20] = -input[20] + input[(32 - 21)];
+ step[21] = -input[21] + input[(32 - 22)];
+ step[22] = -input[22] + input[(32 - 23)];
+ step[23] = -input[23] + input[(32 - 24)];
+ step[24] = -input[24] + input[(32 - 25)];
+ step[25] = -input[25] + input[(32 - 26)];
+ step[26] = -input[26] + input[(32 - 27)];
+ step[27] = -input[27] + input[(32 - 28)];
+ step[28] = -input[28] + input[(32 - 29)];
+ step[29] = -input[29] + input[(32 - 30)];
+ step[30] = -input[30] + input[(32 - 31)];
+ step[31] = -input[31] + input[(32 - 32)];
+
+ // Stage 2
+ output[0] = step[0] + step[16 - 1];
+ output[1] = step[1] + step[16 - 2];
+ output[2] = step[2] + step[16 - 3];
+ output[3] = step[3] + step[16 - 4];
+ output[4] = step[4] + step[16 - 5];
+ output[5] = step[5] + step[16 - 6];
+ output[6] = step[6] + step[16 - 7];
+ output[7] = step[7] + step[16 - 8];
+ output[8] = -step[8] + step[16 - 9];
+ output[9] = -step[9] + step[16 - 10];
+ output[10] = -step[10] + step[16 - 11];
+ output[11] = -step[11] + step[16 - 12];
+ output[12] = -step[12] + step[16 - 13];
+ output[13] = -step[13] + step[16 - 14];
+ output[14] = -step[14] + step[16 - 15];
+ output[15] = -step[15] + step[16 - 16];
+
+ output[16] = step[16];
+ output[17] = step[17];
+ output[18] = step[18];
+ output[19] = step[19];
+
+ output[20] = dct_32_round((-step[20] + step[27]) * cospi_16_64);
+ output[21] = dct_32_round((-step[21] + step[26]) * cospi_16_64);
+ output[22] = dct_32_round((-step[22] + step[25]) * cospi_16_64);
+ output[23] = dct_32_round((-step[23] + step[24]) * cospi_16_64);
+
+ output[24] = dct_32_round((step[24] + step[23]) * cospi_16_64);
+ output[25] = dct_32_round((step[25] + step[22]) * cospi_16_64);
+ output[26] = dct_32_round((step[26] + step[21]) * cospi_16_64);
+ output[27] = dct_32_round((step[27] + step[20]) * cospi_16_64);
+
+ output[28] = step[28];
+ output[29] = step[29];
+ output[30] = step[30];
+ output[31] = step[31];
+
+ // Stage 3
+ step[0] = output[0] + output[(8 - 1)];
+ step[1] = output[1] + output[(8 - 2)];
+ step[2] = output[2] + output[(8 - 3)];
+ step[3] = output[3] + output[(8 - 4)];
+ step[4] = -output[4] + output[(8 - 5)];
+ step[5] = -output[5] + output[(8 - 6)];
+ step[6] = -output[6] + output[(8 - 7)];
+ step[7] = -output[7] + output[(8 - 8)];
+ step[8] = output[8];
+ step[9] = output[9];
+ step[10] = dct_32_round((-output[10] + output[13]) * cospi_16_64);
+ step[11] = dct_32_round((-output[11] + output[12]) * cospi_16_64);
+ step[12] = dct_32_round((output[12] + output[11]) * cospi_16_64);
+ step[13] = dct_32_round((output[13] + output[10]) * cospi_16_64);
+ step[14] = output[14];
+ step[15] = output[15];
+
+ step[16] = output[16] + output[23];
+ step[17] = output[17] + output[22];
+ step[18] = output[18] + output[21];
+ step[19] = output[19] + output[20];
+ step[20] = -output[20] + output[19];
+ step[21] = -output[21] + output[18];
+ step[22] = -output[22] + output[17];
+ step[23] = -output[23] + output[16];
+ step[24] = -output[24] + output[31];
+ step[25] = -output[25] + output[30];
+ step[26] = -output[26] + output[29];
+ step[27] = -output[27] + output[28];
+ step[28] = output[28] + output[27];
+ step[29] = output[29] + output[26];
+ step[30] = output[30] + output[25];
+ step[31] = output[31] + output[24];
+
+ // dump the magnitude by half, hence the intermediate values are within 1108
+ // the range of 16 bits.
+ if (round) {
+ step[0] = half_round_shift(step[0]);
+ step[1] = half_round_shift(step[1]);
+ step[2] = half_round_shift(step[2]);
+ step[3] = half_round_shift(step[3]);
+ step[4] = half_round_shift(step[4]);
+ step[5] = half_round_shift(step[5]);
+ step[6] = half_round_shift(step[6]);
+ step[7] = half_round_shift(step[7]);
+ step[8] = half_round_shift(step[8]);
+ step[9] = half_round_shift(step[9]);
+ step[10] = half_round_shift(step[10]);
+ step[11] = half_round_shift(step[11]);
+ step[12] = half_round_shift(step[12]);
+ step[13] = half_round_shift(step[13]);
+ step[14] = half_round_shift(step[14]);
+ step[15] = half_round_shift(step[15]);
+
+ step[16] = half_round_shift(step[16]);
+ step[17] = half_round_shift(step[17]);
+ step[18] = half_round_shift(step[18]);
+ step[19] = half_round_shift(step[19]);
+ step[20] = half_round_shift(step[20]);
+ step[21] = half_round_shift(step[21]);
+ step[22] = half_round_shift(step[22]);
+ step[23] = half_round_shift(step[23]);
+ step[24] = half_round_shift(step[24]);
+ step[25] = half_round_shift(step[25]);
+ step[26] = half_round_shift(step[26]);
+ step[27] = half_round_shift(step[27]);
+ step[28] = half_round_shift(step[28]);
+ step[29] = half_round_shift(step[29]);
+ step[30] = half_round_shift(step[30]);
+ step[31] = half_round_shift(step[31]);
+ }
+
+ // Stage 4
+ output[0] = step[0] + step[3];
+ output[1] = step[1] + step[2];
+ output[2] = -step[2] + step[1];
+ output[3] = -step[3] + step[0];
+ output[4] = step[4];
+ output[5] = dct_32_round((-step[5] + step[6]) * cospi_16_64);
+ output[6] = dct_32_round((step[6] + step[5]) * cospi_16_64);
+ output[7] = step[7];
+ output[8] = step[8] + step[11];
+ output[9] = step[9] + step[10];
+ output[10] = -step[10] + step[9];
+ output[11] = -step[11] + step[8];
+ output[12] = -step[12] + step[15];
+ output[13] = -step[13] + step[14];
+ output[14] = step[14] + step[13];
+ output[15] = step[15] + step[12];
+
+ output[16] = step[16];
+ output[17] = step[17];
+ output[18] = dct_32_round(step[18] * -cospi_8_64 + step[29] * cospi_24_64);
+ output[19] = dct_32_round(step[19] * -cospi_8_64 + step[28] * cospi_24_64);
+ output[20] = dct_32_round(step[20] * -cospi_24_64 + step[27] * -cospi_8_64);
+ output[21] = dct_32_round(step[21] * -cospi_24_64 + step[26] * -cospi_8_64);
+ output[22] = step[22];
+ output[23] = step[23];
+ output[24] = step[24];
+ output[25] = step[25];
+ output[26] = dct_32_round(step[26] * cospi_24_64 + step[21] * -cospi_8_64);
+ output[27] = dct_32_round(step[27] * cospi_24_64 + step[20] * -cospi_8_64);
+ output[28] = dct_32_round(step[28] * cospi_8_64 + step[19] * cospi_24_64);
+ output[29] = dct_32_round(step[29] * cospi_8_64 + step[18] * cospi_24_64);
+ output[30] = step[30];
+ output[31] = step[31];
+
+ // Stage 5
+ step[0] = dct_32_round((output[0] + output[1]) * cospi_16_64);
+ step[1] = dct_32_round((-output[1] + output[0]) * cospi_16_64);
+ step[2] = dct_32_round(output[2] * cospi_24_64 + output[3] * cospi_8_64);
+ step[3] = dct_32_round(output[3] * cospi_24_64 - output[2] * cospi_8_64);
+ step[4] = output[4] + output[5];
+ step[5] = -output[5] + output[4];
+ step[6] = -output[6] + output[7];
+ step[7] = output[7] + output[6];
+ step[8] = output[8];
+ step[9] = dct_32_round(output[9] * -cospi_8_64 + output[14] * cospi_24_64);
+ step[10] = dct_32_round(output[10] * -cospi_24_64 + output[13] * -cospi_8_64);
+ step[11] = output[11];
+ step[12] = output[12];
+ step[13] = dct_32_round(output[13] * cospi_24_64 + output[10] * -cospi_8_64);
+ step[14] = dct_32_round(output[14] * cospi_8_64 + output[9] * cospi_24_64);
+ step[15] = output[15];
+
+ step[16] = output[16] + output[19];
+ step[17] = output[17] + output[18];
+ step[18] = -output[18] + output[17];
+ step[19] = -output[19] + output[16];
+ step[20] = -output[20] + output[23];
+ step[21] = -output[21] + output[22];
+ step[22] = output[22] + output[21];
+ step[23] = output[23] + output[20];
+ step[24] = output[24] + output[27];
+ step[25] = output[25] + output[26];
+ step[26] = -output[26] + output[25];
+ step[27] = -output[27] + output[24];
+ step[28] = -output[28] + output[31];
+ step[29] = -output[29] + output[30];
+ step[30] = output[30] + output[29];
+ step[31] = output[31] + output[28];
+
+ // Stage 6
+ output[0] = step[0];
+ output[1] = step[1];
+ output[2] = step[2];
+ output[3] = step[3];
+ output[4] = dct_32_round(step[4] * cospi_28_64 + step[7] * cospi_4_64);
+ output[5] = dct_32_round(step[5] * cospi_12_64 + step[6] * cospi_20_64);
+ output[6] = dct_32_round(step[6] * cospi_12_64 + step[5] * -cospi_20_64);
+ output[7] = dct_32_round(step[7] * cospi_28_64 + step[4] * -cospi_4_64);
+ output[8] = step[8] + step[9];
+ output[9] = -step[9] + step[8];
+ output[10] = -step[10] + step[11];
+ output[11] = step[11] + step[10];
+ output[12] = step[12] + step[13];
+ output[13] = -step[13] + step[12];
+ output[14] = -step[14] + step[15];
+ output[15] = step[15] + step[14];
+
+ output[16] = step[16];
+ output[17] = dct_32_round(step[17] * -cospi_4_64 + step[30] * cospi_28_64);
+ output[18] = dct_32_round(step[18] * -cospi_28_64 + step[29] * -cospi_4_64);
+ output[19] = step[19];
+ output[20] = step[20];
+ output[21] = dct_32_round(step[21] * -cospi_20_64 + step[26] * cospi_12_64);
+ output[22] = dct_32_round(step[22] * -cospi_12_64 + step[25] * -cospi_20_64);
+ output[23] = step[23];
+ output[24] = step[24];
+ output[25] = dct_32_round(step[25] * cospi_12_64 + step[22] * -cospi_20_64);
+ output[26] = dct_32_round(step[26] * cospi_20_64 + step[21] * cospi_12_64);
+ output[27] = step[27];
+ output[28] = step[28];
+ output[29] = dct_32_round(step[29] * cospi_28_64 + step[18] * -cospi_4_64);
+ output[30] = dct_32_round(step[30] * cospi_4_64 + step[17] * cospi_28_64);
+ output[31] = step[31];
+
+ // Stage 7
+ step[0] = output[0];
+ step[1] = output[1];
+ step[2] = output[2];
+ step[3] = output[3];
+ step[4] = output[4];
+ step[5] = output[5];
+ step[6] = output[6];
+ step[7] = output[7];
+ step[8] = dct_32_round(output[8] * cospi_30_64 + output[15] * cospi_2_64);
+ step[9] = dct_32_round(output[9] * cospi_14_64 + output[14] * cospi_18_64);
+ step[10] = dct_32_round(output[10] * cospi_22_64 + output[13] * cospi_10_64);
+ step[11] = dct_32_round(output[11] * cospi_6_64 + output[12] * cospi_26_64);
+ step[12] = dct_32_round(output[12] * cospi_6_64 + output[11] * -cospi_26_64);
+ step[13] = dct_32_round(output[13] * cospi_22_64 + output[10] * -cospi_10_64);
+ step[14] = dct_32_round(output[14] * cospi_14_64 + output[9] * -cospi_18_64);
+ step[15] = dct_32_round(output[15] * cospi_30_64 + output[8] * -cospi_2_64);
+
+ step[16] = output[16] + output[17];
+ step[17] = -output[17] + output[16];
+ step[18] = -output[18] + output[19];
+ step[19] = output[19] + output[18];
+ step[20] = output[20] + output[21];
+ step[21] = -output[21] + output[20];
+ step[22] = -output[22] + output[23];
+ step[23] = output[23] + output[22];
+ step[24] = output[24] + output[25];
+ step[25] = -output[25] + output[24];
+ step[26] = -output[26] + output[27];
+ step[27] = output[27] + output[26];
+ step[28] = output[28] + output[29];
+ step[29] = -output[29] + output[28];
+ step[30] = -output[30] + output[31];
+ step[31] = output[31] + output[30];
+
+ // Final stage --- outputs indices are bit-reversed.
+ output[0] = step[0];
+ output[16] = step[1];
+ output[8] = step[2];
+ output[24] = step[3];
+ output[4] = step[4];
+ output[20] = step[5];
+ output[12] = step[6];
+ output[28] = step[7];
+ output[2] = step[8];
+ output[18] = step[9];
+ output[10] = step[10];
+ output[26] = step[11];
+ output[6] = step[12];
+ output[22] = step[13];
+ output[14] = step[14];
+ output[30] = step[15];
+
+ output[1] = dct_32_round(step[16] * cospi_31_64 + step[31] * cospi_1_64);
+ output[17] = dct_32_round(step[17] * cospi_15_64 + step[30] * cospi_17_64);
+ output[9] = dct_32_round(step[18] * cospi_23_64 + step[29] * cospi_9_64);
+ output[25] = dct_32_round(step[19] * cospi_7_64 + step[28] * cospi_25_64);
+ output[5] = dct_32_round(step[20] * cospi_27_64 + step[27] * cospi_5_64);
+ output[21] = dct_32_round(step[21] * cospi_11_64 + step[26] * cospi_21_64);
+ output[13] = dct_32_round(step[22] * cospi_19_64 + step[25] * cospi_13_64);
+ output[29] = dct_32_round(step[23] * cospi_3_64 + step[24] * cospi_29_64);
+ output[3] = dct_32_round(step[24] * cospi_3_64 + step[23] * -cospi_29_64);
+ output[19] = dct_32_round(step[25] * cospi_19_64 + step[22] * -cospi_13_64);
+ output[11] = dct_32_round(step[26] * cospi_11_64 + step[21] * -cospi_21_64);
+ output[27] = dct_32_round(step[27] * cospi_27_64 + step[20] * -cospi_5_64);
+ output[7] = dct_32_round(step[28] * cospi_7_64 + step[19] * -cospi_25_64);
+ output[23] = dct_32_round(step[29] * cospi_23_64 + step[18] * -cospi_9_64);
+ output[15] = dct_32_round(step[30] * cospi_15_64 + step[17] * -cospi_17_64);
+ output[31] = dct_32_round(step[31] * cospi_31_64 + step[16] * -cospi_1_64);
+}
+
+void vp9_short_fdct32x32_c(int16_t *input, int16_t *out, int pitch) {
+ int shortpitch = pitch >> 1;
+ int i, j;
+ int output[32 * 32];
+
+ // Columns
+ for (i = 0; i < 32; ++i) {
+ int temp_in[32], temp_out[32];
+ for (j = 0; j < 32; ++j)
+ temp_in[j] = input[j * shortpitch + i] << 2;
+ dct32_1d(temp_in, temp_out, 0);
+ for (j = 0; j < 32; ++j)
+ output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
+ }
+
+ // Rows
+ for (i = 0; i < 32; ++i) {
+ int temp_in[32], temp_out[32];
+ for (j = 0; j < 32; ++j)
+ temp_in[j] = output[j + i * 32];
+ dct32_1d(temp_in, temp_out, 0);
+ for (j = 0; j < 32; ++j)
+ out[j + i * 32] = (temp_out[j] + 1 + (temp_out[j] < 0)) >> 2;
+ }
+}
+
+// Note that although we use dct_32_round in dct32_1d computation flow,
+// this 2d fdct32x32 for rate-distortion optimization loop is operating
+// within 16 bits precision.
+void vp9_short_fdct32x32_rd_c(int16_t *input, int16_t *out, int pitch) {
+ int shortpitch = pitch >> 1;
+ int i, j;
+ int output[32 * 32];
+
+ // Columns
+ for (i = 0; i < 32; ++i) {
+ int temp_in[32], temp_out[32];
+ for (j = 0; j < 32; ++j)
+ temp_in[j] = input[j * shortpitch + i] << 2;
+ dct32_1d(temp_in, temp_out, 0);
+ for (j = 0; j < 32; ++j)
+ output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
+ }
+
+ // Rows
+ for (i = 0; i < 32; ++i) {
+ int temp_in[32], temp_out[32];
+ for (j = 0; j < 32; ++j)
+ temp_in[j] = output[j + i * 32];
+ dct32_1d(temp_in, temp_out, 1);
+ for (j = 0; j < 32; ++j)
+ out[j + i * 32] = temp_out[j];
+ }
+}
diff --git a/libvpx/vp9/encoder/vp9_encodeframe.c b/libvpx/vp9/encoder/vp9_encodeframe.c
new file mode 100644
index 0000000..54b6e24
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_encodeframe.c
@@ -0,0 +1,2109 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "./vpx_config.h"
+#include "./vp9_rtcd.h"
+#include "vp9/encoder/vp9_encodeframe.h"
+#include "vp9/encoder/vp9_encodemb.h"
+#include "vp9/encoder/vp9_encodemv.h"
+#include "vp9/common/vp9_common.h"
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vp9/common/vp9_extend.h"
+#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_entropymode.h"
+#include "vp9/common/vp9_quant_common.h"
+#include "vp9/encoder/vp9_segmentation.h"
+#include "vp9/encoder/vp9_encodeintra.h"
+#include "vp9/common/vp9_reconinter.h"
+#include "vp9/encoder/vp9_rdopt.h"
+#include "vp9/common/vp9_findnearmv.h"
+#include "vp9/common/vp9_reconintra.h"
+#include "vp9/common/vp9_seg_common.h"
+#include "vp9/common/vp9_tile_common.h"
+#include "vp9/encoder/vp9_tokenize.h"
+#include "./vp9_rtcd.h"
+#include <stdio.h>
+#include <math.h>
+#include <limits.h>
+#include "vpx_ports/vpx_timer.h"
+#include "vp9/common/vp9_pred_common.h"
+#include "vp9/common/vp9_mvref_common.h"
+
+#define DBG_PRNT_SEGMAP 0
+
+// #define ENC_DEBUG
+#ifdef ENC_DEBUG
+int enc_debug = 0;
+#endif
+
+void vp9_select_interp_filter_type(VP9_COMP *cpi);
+
+static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
+ int output_enabled, int mi_row, int mi_col,
+ BLOCK_SIZE_TYPE bsize);
+
+static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x);
+
+/* activity_avg must be positive, or flat regions could get a zero weight
+ * (infinite lambda), which confounds analysis.
+ * This also avoids the need for divide by zero checks in
+ * vp9_activity_masking().
+ */
+#define VP9_ACTIVITY_AVG_MIN (64)
+
+/* This is used as a reference when computing the source variance for the
+ * purposes of activity masking.
+ * Eventually this should be replaced by custom no-reference routines,
+ * which will be faster.
+ */
+static const uint8_t VP9_VAR_OFFS[16] = {
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
+};
+
+
+// Original activity measure from Tim T's code.
+static unsigned int tt_activity_measure(VP9_COMP *cpi, MACROBLOCK *x) {
+ unsigned int act;
+ unsigned int sse;
+ /* TODO: This could also be done over smaller areas (8x8), but that would
+ * require extensive changes elsewhere, as lambda is assumed to be fixed
+ * over an entire MB in most of the code.
+ * Another option is to compute four 8x8 variances, and pick a single
+ * lambda using a non-linear combination (e.g., the smallest, or second
+ * smallest, etc.).
+ */
+ act = vp9_variance16x16(x->plane[0].src.buf, x->plane[0].src.stride,
+ VP9_VAR_OFFS, 0, &sse);
+ act <<= 4;
+
+ /* If the region is flat, lower the activity some more. */
+ if (act < 8 << 12)
+ act = act < 5 << 12 ? act : 5 << 12;
+
+ return act;
+}
+
+// Stub for alternative experimental activity measures.
+static unsigned int alt_activity_measure(VP9_COMP *cpi,
+ MACROBLOCK *x, int use_dc_pred) {
+ return vp9_encode_intra(cpi, x, use_dc_pred);
+}
+
+DECLARE_ALIGNED(16, static const uint8_t, vp9_64x64_zeros[64*64]) = { 0 };
+
+
+// Measure the activity of the current macroblock
+// What we measure here is TBD so abstracted to this function
+#define ALT_ACT_MEASURE 1
+static unsigned int mb_activity_measure(VP9_COMP *cpi, MACROBLOCK *x,
+ int mb_row, int mb_col) {
+ unsigned int mb_activity;
+
+ if (ALT_ACT_MEASURE) {
+ int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
+
+ // Or use and alternative.
+ mb_activity = alt_activity_measure(cpi, x, use_dc_pred);
+ } else {
+ // Original activity measure from Tim T's code.
+ mb_activity = tt_activity_measure(cpi, x);
+ }
+
+ if (mb_activity < VP9_ACTIVITY_AVG_MIN)
+ mb_activity = VP9_ACTIVITY_AVG_MIN;
+
+ return mb_activity;
+}
+
+// Calculate an "average" mb activity value for the frame
+#define ACT_MEDIAN 0
+static void calc_av_activity(VP9_COMP *cpi, int64_t activity_sum) {
+#if ACT_MEDIAN
+ // Find median: Simple n^2 algorithm for experimentation
+ {
+ unsigned int median;
+ unsigned int i, j;
+ unsigned int *sortlist;
+ unsigned int tmp;
+
+ // Create a list to sort to
+ CHECK_MEM_ERROR(sortlist,
+ vpx_calloc(sizeof(unsigned int),
+ cpi->common.MBs));
+
+ // Copy map to sort list
+ vpx_memcpy(sortlist, cpi->mb_activity_map,
+ sizeof(unsigned int) * cpi->common.MBs);
+
+
+ // Ripple each value down to its correct position
+ for (i = 1; i < cpi->common.MBs; i ++) {
+ for (j = i; j > 0; j --) {
+ if (sortlist[j] < sortlist[j - 1]) {
+ // Swap values
+ tmp = sortlist[j - 1];
+ sortlist[j - 1] = sortlist[j];
+ sortlist[j] = tmp;
+ } else
+ break;
+ }
+ }
+
+ // Even number MBs so estimate median as mean of two either side.
+ median = (1 + sortlist[cpi->common.MBs >> 1] +
+ sortlist[(cpi->common.MBs >> 1) + 1]) >> 1;
+
+ cpi->activity_avg = median;
+
+ vpx_free(sortlist);
+ }
+#else
+ // Simple mean for now
+ cpi->activity_avg = (unsigned int)(activity_sum / cpi->common.MBs);
+#endif
+
+ if (cpi->activity_avg < VP9_ACTIVITY_AVG_MIN)
+ cpi->activity_avg = VP9_ACTIVITY_AVG_MIN;
+
+ // Experimental code: return fixed value normalized for several clips
+ if (ALT_ACT_MEASURE)
+ cpi->activity_avg = 100000;
+}
+
+#define USE_ACT_INDEX 0
+#define OUTPUT_NORM_ACT_STATS 0
+
+#if USE_ACT_INDEX
+// Calculate an activity index for each mb
+static void calc_activity_index(VP9_COMP *cpi, MACROBLOCK *x) {
+ VP9_COMMON *const cm = &cpi->common;
+ int mb_row, mb_col;
+
+ int64_t act;
+ int64_t a;
+ int64_t b;
+
+#if OUTPUT_NORM_ACT_STATS
+ FILE *f = fopen("norm_act.stt", "a");
+ fprintf(f, "\n%12d\n", cpi->activity_avg);
+#endif
+
+ // Reset pointers to start of activity map
+ x->mb_activity_ptr = cpi->mb_activity_map;
+
+ // Calculate normalized mb activity number.
+ for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
+ // for each macroblock col in image
+ for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
+ // Read activity from the map
+ act = *(x->mb_activity_ptr);
+
+ // Calculate a normalized activity number
+ a = act + 4 * cpi->activity_avg;
+ b = 4 * act + cpi->activity_avg;
+
+ if (b >= a)
+ *(x->activity_ptr) = (int)((b + (a >> 1)) / a) - 1;
+ else
+ *(x->activity_ptr) = 1 - (int)((a + (b >> 1)) / b);
+
+#if OUTPUT_NORM_ACT_STATS
+ fprintf(f, " %6d", *(x->mb_activity_ptr));
+#endif
+ // Increment activity map pointers
+ x->mb_activity_ptr++;
+ }
+
+#if OUTPUT_NORM_ACT_STATS
+ fprintf(f, "\n");
+#endif
+
+ }
+
+#if OUTPUT_NORM_ACT_STATS
+ fclose(f);
+#endif
+
+}
+#endif
+
+// Loop through all MBs. Note activity of each, average activity and
+// calculate a normalized activity for each
+static void build_activity_map(VP9_COMP *cpi) {
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *xd = &x->e_mbd;
+ VP9_COMMON *const cm = &cpi->common;
+
+#if ALT_ACT_MEASURE
+ YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
+ int recon_yoffset;
+ int recon_y_stride = new_yv12->y_stride;
+#endif
+
+ int mb_row, mb_col;
+ unsigned int mb_activity;
+ int64_t activity_sum = 0;
+
+ x->mb_activity_ptr = cpi->mb_activity_map;
+
+ // for each macroblock row in image
+ for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
+#if ALT_ACT_MEASURE
+ // reset above block coeffs
+ xd->up_available = (mb_row != 0);
+ recon_yoffset = (mb_row * recon_y_stride * 16);
+#endif
+ // for each macroblock col in image
+ for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
+#if ALT_ACT_MEASURE
+ xd->plane[0].dst.buf = new_yv12->y_buffer + recon_yoffset;
+ xd->left_available = (mb_col != 0);
+ recon_yoffset += 16;
+#endif
+
+ // measure activity
+ mb_activity = mb_activity_measure(cpi, x, mb_row, mb_col);
+
+ // Keep frame sum
+ activity_sum += mb_activity;
+
+ // Store MB level activity details.
+ *x->mb_activity_ptr = mb_activity;
+
+ // Increment activity map pointer
+ x->mb_activity_ptr++;
+
+ // adjust to the next column of source macroblocks
+ x->plane[0].src.buf += 16;
+ }
+
+
+ // adjust to the next row of mbs
+ x->plane[0].src.buf += 16 * x->plane[0].src.stride - 16 * cm->mb_cols;
+ }
+
+ // Calculate an "average" MB activity
+ calc_av_activity(cpi, activity_sum);
+
+#if USE_ACT_INDEX
+ // Calculate an activity index number of each mb
+ calc_activity_index(cpi, x);
+#endif
+
+}
+
+// Macroblock activity masking
+void vp9_activity_masking(VP9_COMP *cpi, MACROBLOCK *x) {
+#if USE_ACT_INDEX
+ x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2);
+ x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
+ x->errorperbit += (x->errorperbit == 0);
+#else
+ int64_t a;
+ int64_t b;
+ int64_t act = *(x->mb_activity_ptr);
+
+ // Apply the masking to the RD multiplier.
+ a = act + (2 * cpi->activity_avg);
+ b = (2 * act) + cpi->activity_avg;
+
+ x->rdmult = (unsigned int)(((int64_t)x->rdmult * b + (a >> 1)) / a);
+ x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
+ x->errorperbit += (x->errorperbit == 0);
+#endif
+
+ // Activity based Zbin adjustment
+ adjust_act_zbin(cpi, x);
+}
+
+static void update_state(VP9_COMP *cpi,
+ PICK_MODE_CONTEXT *ctx,
+ BLOCK_SIZE_TYPE bsize,
+ int output_enabled) {
+ int i, x_idx, y;
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MODE_INFO *mi = &ctx->mic;
+ MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
+#if CONFIG_DEBUG || CONFIG_INTERNAL_STATS
+ MB_PREDICTION_MODE mb_mode = mi->mbmi.mode;
+#endif
+ int mb_mode_index = ctx->best_mode_index;
+ const int mis = cpi->common.mode_info_stride;
+ const int bh = 1 << mi_height_log2(bsize), bw = 1 << mi_width_log2(bsize);
+
+#if CONFIG_DEBUG
+ assert(mb_mode < MB_MODE_COUNT);
+ assert(mb_mode_index < MAX_MODES);
+ assert(mi->mbmi.ref_frame[0] < MAX_REF_FRAMES);
+ assert(mi->mbmi.ref_frame[1] < MAX_REF_FRAMES);
+#endif
+
+ assert(mi->mbmi.sb_type == bsize);
+ // Restore the coding context of the MB to that that was in place
+ // when the mode was picked for it
+ for (y = 0; y < bh; y++) {
+ for (x_idx = 0; x_idx < bw; x_idx++) {
+ if ((xd->mb_to_right_edge >> (3 + LOG2_MI_SIZE)) + bw > x_idx &&
+ (xd->mb_to_bottom_edge >> (3 + LOG2_MI_SIZE)) + bh > y) {
+ MODE_INFO *mi_addr = xd->mode_info_context + x_idx + y * mis;
+ *mi_addr = *mi;
+ }
+ }
+ }
+ if (bsize < BLOCK_SIZE_SB32X32) {
+ if (bsize < BLOCK_SIZE_MB16X16)
+ ctx->txfm_rd_diff[ALLOW_16X16] = ctx->txfm_rd_diff[ALLOW_8X8];
+ ctx->txfm_rd_diff[ALLOW_32X32] = ctx->txfm_rd_diff[ALLOW_16X16];
+ }
+
+ if (mbmi->ref_frame[0] != INTRA_FRAME && mbmi->sb_type < BLOCK_SIZE_SB8X8) {
+ *x->partition_info = ctx->partition_info;
+ mbmi->mv[0].as_int = x->partition_info->bmi[3].mv.as_int;
+ mbmi->mv[1].as_int = x->partition_info->bmi[3].second_mv.as_int;
+ }
+
+ x->skip = ctx->skip;
+ if (!output_enabled)
+ return;
+
+ if (!vp9_segfeature_active(xd, mbmi->segment_id, SEG_LVL_SKIP)) {
+ for (i = 0; i < NB_TXFM_MODES; i++) {
+ cpi->rd_tx_select_diff[i] += ctx->txfm_rd_diff[i];
+ }
+ }
+
+ if (cpi->common.frame_type == KEY_FRAME) {
+ // Restore the coding modes to that held in the coding context
+ // if (mb_mode == I4X4_PRED)
+ // for (i = 0; i < 16; i++)
+ // {
+ // xd->block[i].bmi.as_mode =
+ // xd->mode_info_context->bmi[i].as_mode;
+ // assert(xd->mode_info_context->bmi[i].as_mode < MB_MODE_COUNT);
+ // }
+#if CONFIG_INTERNAL_STATS
+ static const int kf_mode_index[] = {
+ THR_DC /*DC_PRED*/,
+ THR_V_PRED /*V_PRED*/,
+ THR_H_PRED /*H_PRED*/,
+ THR_D45_PRED /*D45_PRED*/,
+ THR_D135_PRED /*D135_PRED*/,
+ THR_D117_PRED /*D117_PRED*/,
+ THR_D153_PRED /*D153_PRED*/,
+ THR_D27_PRED /*D27_PRED*/,
+ THR_D63_PRED /*D63_PRED*/,
+ THR_TM /*TM_PRED*/,
+ THR_B_PRED /*I4X4_PRED*/,
+ };
+ cpi->mode_chosen_counts[kf_mode_index[mb_mode]]++;
+#endif
+ } else {
+ /*
+ // Reduce the activation RD thresholds for the best choice mode
+ if ((cpi->rd_baseline_thresh[mb_mode_index] > 0) &&
+ (cpi->rd_baseline_thresh[mb_mode_index] < (INT_MAX >> 2)))
+ {
+ int best_adjustment = (cpi->rd_thresh_mult[mb_mode_index] >> 2);
+
+ cpi->rd_thresh_mult[mb_mode_index] =
+ (cpi->rd_thresh_mult[mb_mode_index]
+ >= (MIN_THRESHMULT + best_adjustment)) ?
+ cpi->rd_thresh_mult[mb_mode_index] - best_adjustment :
+ MIN_THRESHMULT;
+ cpi->rd_threshes[mb_mode_index] =
+ (cpi->rd_baseline_thresh[mb_mode_index] >> 7)
+ * cpi->rd_thresh_mult[mb_mode_index];
+
+ }
+ */
+ // Note how often each mode chosen as best
+ cpi->mode_chosen_counts[mb_mode_index]++;
+ if (mbmi->ref_frame[0] != INTRA_FRAME &&
+ (mbmi->sb_type < BLOCK_SIZE_SB8X8 || mbmi->mode == NEWMV)) {
+ int_mv best_mv, best_second_mv;
+ const MV_REFERENCE_FRAME rf1 = mbmi->ref_frame[0];
+ const MV_REFERENCE_FRAME rf2 = mbmi->ref_frame[1];
+ best_mv.as_int = ctx->best_ref_mv.as_int;
+ best_second_mv.as_int = ctx->second_best_ref_mv.as_int;
+ if (mbmi->mode == NEWMV) {
+ best_mv.as_int = mbmi->ref_mvs[rf1][0].as_int;
+ best_second_mv.as_int = mbmi->ref_mvs[rf2][0].as_int;
+ }
+ mbmi->best_mv.as_int = best_mv.as_int;
+ mbmi->best_second_mv.as_int = best_second_mv.as_int;
+ vp9_update_nmv_count(cpi, x, &best_mv, &best_second_mv);
+ }
+
+ if (bsize > BLOCK_SIZE_SB8X8 && mbmi->mode == NEWMV) {
+ int i, j;
+ for (j = 0; j < bh; ++j)
+ for (i = 0; i < bw; ++i)
+ if ((xd->mb_to_right_edge >> (3 + LOG2_MI_SIZE)) + bw > i &&
+ (xd->mb_to_bottom_edge >> (3 + LOG2_MI_SIZE)) + bh > j)
+ xd->mode_info_context[mis * j + i].mbmi = *mbmi;
+ }
+
+ if (cpi->common.mcomp_filter_type == SWITCHABLE &&
+ is_inter_mode(mbmi->mode)) {
+ ++cpi->common.fc.switchable_interp_count
+ [vp9_get_pred_context(&cpi->common, xd, PRED_SWITCHABLE_INTERP)]
+ [vp9_switchable_interp_map[mbmi->interp_filter]];
+ }
+
+ cpi->rd_comp_pred_diff[SINGLE_PREDICTION_ONLY] += ctx->single_pred_diff;
+ cpi->rd_comp_pred_diff[COMP_PREDICTION_ONLY] += ctx->comp_pred_diff;
+ cpi->rd_comp_pred_diff[HYBRID_PREDICTION] += ctx->hybrid_pred_diff;
+ }
+}
+
+static unsigned find_seg_id(VP9_COMMON *cm, uint8_t *buf, BLOCK_SIZE_TYPE bsize,
+ int start_y, int height, int start_x, int width) {
+ const int bw = 1 << mi_width_log2(bsize), bh = 1 << mi_height_log2(bsize);
+ const int end_x = MIN(start_x + bw, width);
+ const int end_y = MIN(start_y + bh, height);
+ int x, y;
+ unsigned seg_id = -1;
+
+ buf += width * start_y;
+ assert(start_y < cm->mi_rows && start_x < cm->cur_tile_mi_col_end);
+ for (y = start_y; y < end_y; y++, buf += width) {
+ for (x = start_x; x < end_x; x++) {
+ seg_id = MIN(seg_id, buf[x]);
+ }
+ }
+
+ return seg_id;
+}
+
+void vp9_setup_src_planes(MACROBLOCK *x,
+ const YV12_BUFFER_CONFIG *src,
+ int mb_row, int mb_col) {
+ uint8_t *buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer,
+ src->alpha_buffer};
+ int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride,
+ src->alpha_stride};
+ int i;
+
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ setup_pred_plane(&x->plane[i].src,
+ buffers[i], strides[i],
+ mb_row, mb_col, NULL,
+ x->e_mbd.plane[i].subsampling_x,
+ x->e_mbd.plane[i].subsampling_y);
+ }
+}
+
+static void set_offsets(VP9_COMP *cpi,
+ int mi_row, int mi_col, BLOCK_SIZE_TYPE bsize) {
+ MACROBLOCK *const x = &cpi->mb;
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MB_MODE_INFO *mbmi;
+ const int dst_fb_idx = cm->new_fb_idx;
+ const int idx_str = xd->mode_info_stride * mi_row + mi_col;
+ const int bw = 1 << mi_width_log2(bsize), bh = 1 << mi_height_log2(bsize);
+ const int mb_row = mi_row >> 1;
+ const int mb_col = mi_col >> 1;
+ const int idx_map = mb_row * cm->mb_cols + mb_col;
+ int i;
+
+ // entropy context structures
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ xd->plane[i].above_context = cm->above_context[i] +
+ (mi_col * 2 >> xd->plane[i].subsampling_x);
+ xd->plane[i].left_context = cm->left_context[i] +
+ (((mi_row * 2) & 15) >> xd->plane[i].subsampling_y);
+ }
+
+ // partition contexts
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+
+ // Activity map pointer
+ x->mb_activity_ptr = &cpi->mb_activity_map[idx_map];
+ x->active_ptr = cpi->active_map + idx_map;
+
+ /* pointers to mode info contexts */
+ x->partition_info = x->pi + idx_str;
+ xd->mode_info_context = cm->mi + idx_str;
+ mbmi = &xd->mode_info_context->mbmi;
+ // Special case: if prev_mi is NULL, the previous mode info context
+ // cannot be used.
+ xd->prev_mode_info_context = cm->prev_mi ?
+ cm->prev_mi + idx_str : NULL;
+
+ // Set up destination pointers
+ setup_dst_planes(xd, &cm->yv12_fb[dst_fb_idx], mi_row, mi_col);
+
+ /* Set up limit values for MV components to prevent them from
+ * extending beyond the UMV borders assuming 16x16 block size */
+ x->mv_row_min = -((mi_row * MI_SIZE) + VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
+ x->mv_col_min = -((mi_col * MI_SIZE) + VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
+ x->mv_row_max = ((cm->mi_rows - mi_row) * MI_SIZE +
+ (VP9BORDERINPIXELS - MI_SIZE * bh - VP9_INTERP_EXTEND));
+ x->mv_col_max = ((cm->mi_cols - mi_col) * MI_SIZE +
+ (VP9BORDERINPIXELS - MI_SIZE * bw - VP9_INTERP_EXTEND));
+
+ // Set up distance of MB to edge of frame in 1/8th pel units
+ assert(!(mi_col & (bw - 1)) && !(mi_row & (bh - 1)));
+ set_mi_row_col(cm, xd, mi_row, bh, mi_col, bw);
+
+ /* set up source buffers */
+ vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
+
+ /* R/D setup */
+ x->rddiv = cpi->RDDIV;
+ x->rdmult = cpi->RDMULT;
+
+ /* segment ID */
+ if (xd->segmentation_enabled) {
+ uint8_t *map = xd->update_mb_segmentation_map ? cpi->segmentation_map
+ : cm->last_frame_seg_map;
+ mbmi->segment_id = find_seg_id(cm, map, bsize, mi_row,
+ cm->mi_rows, mi_col, cm->mi_cols);
+
+ assert(mbmi->segment_id <= (MAX_MB_SEGMENTS-1));
+ vp9_mb_init_quantizer(cpi, x);
+
+ if (xd->segmentation_enabled && cpi->seg0_cnt > 0 &&
+ !vp9_segfeature_active(xd, 0, SEG_LVL_REF_FRAME) &&
+ vp9_segfeature_active(xd, 1, SEG_LVL_REF_FRAME)) {
+ cpi->seg0_progress = (cpi->seg0_idx << 16) / cpi->seg0_cnt;
+ } else {
+ const int y = mb_row & ~3;
+ const int x = mb_col & ~3;
+ const int p16 = ((mb_row & 1) << 1) + (mb_col & 1);
+ const int p32 = ((mb_row & 2) << 2) + ((mb_col & 2) << 1);
+ const int tile_progress =
+ cm->cur_tile_mi_col_start * cm->mb_rows >> 1;
+ const int mb_cols =
+ (cm->cur_tile_mi_col_end - cm->cur_tile_mi_col_start) >> 1;
+
+ cpi->seg0_progress =
+ ((y * mb_cols + x * 4 + p32 + p16 + tile_progress) << 16) / cm->MBs;
+ }
+ } else {
+ mbmi->segment_id = 0;
+ }
+}
+
+static void pick_sb_modes(VP9_COMP *cpi, int mi_row, int mi_col,
+ TOKENEXTRA **tp, int *totalrate, int *totaldist,
+ BLOCK_SIZE_TYPE bsize, PICK_MODE_CONTEXT *ctx) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+
+ x->rd_search = 1;
+
+ if (bsize < BLOCK_SIZE_SB8X8)
+ if (xd->ab_index != 0)
+ return;
+
+ set_offsets(cpi, mi_row, mi_col, bsize);
+ xd->mode_info_context->mbmi.sb_type = bsize;
+ if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
+ vp9_activity_masking(cpi, x);
+
+ /* Find best coding mode & reconstruct the MB so it is available
+ * as a predictor for MBs that follow in the SB */
+ if (cm->frame_type == KEY_FRAME) {
+ vp9_rd_pick_intra_mode_sb(cpi, x, totalrate, totaldist, bsize, ctx);
+ } else {
+ vp9_rd_pick_inter_mode_sb(cpi, x, mi_row, mi_col, totalrate, totaldist,
+ bsize, ctx);
+ }
+}
+
+static void update_stats(VP9_COMP *cpi, int mi_row, int mi_col) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MODE_INFO *mi = xd->mode_info_context;
+ MB_MODE_INFO *const mbmi = &mi->mbmi;
+
+ if (cm->frame_type != KEY_FRAME) {
+ int segment_id, seg_ref_active;
+
+ segment_id = mbmi->segment_id;
+ seg_ref_active = vp9_segfeature_active(xd, segment_id,
+ SEG_LVL_REF_FRAME);
+
+ if (!seg_ref_active)
+ cpi->intra_inter_count[vp9_get_pred_context(cm, xd, PRED_INTRA_INTER)]
+ [mbmi->ref_frame[0] > INTRA_FRAME]++;
+
+ // If the segment reference feature is enabled we have only a single
+ // reference frame allowed for the segment so exclude it from
+ // the reference frame counts used to work out probabilities.
+ if ((mbmi->ref_frame[0] > INTRA_FRAME) && !seg_ref_active) {
+ if (cm->comp_pred_mode == HYBRID_PREDICTION)
+ cpi->comp_inter_count[vp9_get_pred_context(cm, xd,
+ PRED_COMP_INTER_INTER)]
+ [mbmi->ref_frame[1] > INTRA_FRAME]++;
+
+ if (mbmi->ref_frame[1] > INTRA_FRAME) {
+ cpi->comp_ref_count[vp9_get_pred_context(cm, xd, PRED_COMP_REF_P)]
+ [mbmi->ref_frame[0] == GOLDEN_FRAME]++;
+ } else {
+ cpi->single_ref_count[vp9_get_pred_context(cm, xd, PRED_SINGLE_REF_P1)]
+ [0][mbmi->ref_frame[0] != LAST_FRAME]++;
+ if (mbmi->ref_frame[0] != LAST_FRAME)
+ cpi->single_ref_count[vp9_get_pred_context(cm, xd,
+ PRED_SINGLE_REF_P2)]
+ [1][mbmi->ref_frame[0] != GOLDEN_FRAME]++;
+ }
+ }
+ // Count of last ref frame 0,0 usage
+ if ((mbmi->mode == ZEROMV) && (mbmi->ref_frame[0] == LAST_FRAME))
+ cpi->inter_zz_count++;
+ }
+}
+
+// TODO(jingning): the variables used here are little complicated. need further
+// refactoring on organizing the the temporary buffers, when recursive
+// partition down to 4x4 block size is enabled.
+static PICK_MODE_CONTEXT *get_block_context(MACROBLOCK *x,
+ BLOCK_SIZE_TYPE bsize) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+
+ switch (bsize) {
+ case BLOCK_SIZE_SB64X64:
+ return &x->sb64_context;
+ case BLOCK_SIZE_SB64X32:
+ return &x->sb64x32_context[xd->sb_index];
+ case BLOCK_SIZE_SB32X64:
+ return &x->sb32x64_context[xd->sb_index];
+ case BLOCK_SIZE_SB32X32:
+ return &x->sb32_context[xd->sb_index];
+ case BLOCK_SIZE_SB32X16:
+ return &x->sb32x16_context[xd->sb_index][xd->mb_index];
+ case BLOCK_SIZE_SB16X32:
+ return &x->sb16x32_context[xd->sb_index][xd->mb_index];
+ case BLOCK_SIZE_MB16X16:
+ return &x->mb_context[xd->sb_index][xd->mb_index];
+ case BLOCK_SIZE_SB16X8:
+ return &x->sb16x8_context[xd->sb_index][xd->mb_index][xd->b_index];
+ case BLOCK_SIZE_SB8X16:
+ return &x->sb8x16_context[xd->sb_index][xd->mb_index][xd->b_index];
+ case BLOCK_SIZE_SB8X8:
+ return &x->sb8x8_context[xd->sb_index][xd->mb_index][xd->b_index];
+ case BLOCK_SIZE_SB8X4:
+ return &x->sb8x4_context[xd->sb_index][xd->mb_index][xd->b_index];
+ case BLOCK_SIZE_SB4X8:
+ return &x->sb4x8_context[xd->sb_index][xd->mb_index][xd->b_index];
+ case BLOCK_SIZE_AB4X4:
+ return &x->ab4x4_context[xd->sb_index][xd->mb_index][xd->b_index];
+ default:
+ assert(0);
+ return NULL;
+ }
+}
+
+static BLOCK_SIZE_TYPE *get_sb_partitioning(MACROBLOCK *x,
+ BLOCK_SIZE_TYPE bsize) {
+ MACROBLOCKD *xd = &x->e_mbd;
+ switch (bsize) {
+ case BLOCK_SIZE_SB64X64:
+ return &x->sb64_partitioning;
+ case BLOCK_SIZE_SB32X32:
+ return &x->sb_partitioning[xd->sb_index];
+ case BLOCK_SIZE_MB16X16:
+ return &x->mb_partitioning[xd->sb_index][xd->mb_index];
+ case BLOCK_SIZE_SB8X8:
+ return &x->b_partitioning[xd->sb_index][xd->mb_index][xd->b_index];
+ default:
+ assert(0);
+ return NULL;
+ }
+}
+
+static void restore_context(VP9_COMP *cpi, int mi_row, int mi_col,
+ ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
+ ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
+ PARTITION_CONTEXT sa[8],
+ PARTITION_CONTEXT sl[8],
+ BLOCK_SIZE_TYPE bsize) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ int p;
+ int bwl = b_width_log2(bsize), bw = 1 << bwl;
+ int bhl = b_height_log2(bsize), bh = 1 << bhl;
+ int mwl = mi_width_log2(bsize), mw = 1 << mwl;
+ int mhl = mi_height_log2(bsize), mh = 1 << mhl;
+ for (p = 0; p < MAX_MB_PLANE; p++) {
+ vpx_memcpy(cm->above_context[p] +
+ ((mi_col * 2) >> xd->plane[p].subsampling_x),
+ a + bw * p,
+ sizeof(ENTROPY_CONTEXT) * bw >> xd->plane[p].subsampling_x);
+ vpx_memcpy(cm->left_context[p] +
+ ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
+ l + bh * p,
+ sizeof(ENTROPY_CONTEXT) * bh >> xd->plane[p].subsampling_y);
+ }
+ vpx_memcpy(cm->above_seg_context + mi_col, sa,
+ sizeof(PARTITION_CONTEXT) * mw);
+ vpx_memcpy(cm->left_seg_context + (mi_row & MI_MASK), sl,
+ sizeof(PARTITION_CONTEXT) * mh);
+}
+static void save_context(VP9_COMP *cpi, int mi_row, int mi_col,
+ ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
+ ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
+ PARTITION_CONTEXT sa[8],
+ PARTITION_CONTEXT sl[8],
+ BLOCK_SIZE_TYPE bsize) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ int p;
+ int bwl = b_width_log2(bsize), bw = 1 << bwl;
+ int bhl = b_height_log2(bsize), bh = 1 << bhl;
+ int mwl = mi_width_log2(bsize), mw = 1 << mwl;
+ int mhl = mi_height_log2(bsize), mh = 1 << mhl;
+
+ // buffer the above/left context information of the block in search.
+ for (p = 0; p < MAX_MB_PLANE; ++p) {
+ vpx_memcpy(a + bw * p, cm->above_context[p] +
+ (mi_col * 2 >> xd->plane[p].subsampling_x),
+ sizeof(ENTROPY_CONTEXT) * bw >> xd->plane[p].subsampling_x);
+ vpx_memcpy(l + bh * p, cm->left_context[p] +
+ ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
+ sizeof(ENTROPY_CONTEXT) * bh >> xd->plane[p].subsampling_y);
+ }
+ vpx_memcpy(sa, cm->above_seg_context + mi_col,
+ sizeof(PARTITION_CONTEXT) * mw);
+ vpx_memcpy(sl, cm->left_seg_context + (mi_row & MI_MASK),
+ sizeof(PARTITION_CONTEXT) * mh);
+}
+
+static void encode_b(VP9_COMP *cpi, TOKENEXTRA **tp,
+ int mi_row, int mi_col, int output_enabled,
+ BLOCK_SIZE_TYPE bsize, int sub_index) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
+ return;
+
+ if (sub_index != -1)
+ *(get_sb_index(xd, bsize)) = sub_index;
+
+ if (bsize < BLOCK_SIZE_SB8X8)
+ if (xd->ab_index > 0)
+ return;
+ set_offsets(cpi, mi_row, mi_col, bsize);
+ update_state(cpi, get_block_context(x, bsize), bsize, output_enabled);
+ encode_superblock(cpi, tp, output_enabled, mi_row, mi_col, bsize);
+
+ if (output_enabled) {
+ update_stats(cpi, mi_row, mi_col);
+
+ (*tp)->token = EOSB_TOKEN;
+ (*tp)++;
+ }
+}
+
+static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp,
+ int mi_row, int mi_col, int output_enabled,
+ BLOCK_SIZE_TYPE bsize) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ BLOCK_SIZE_TYPE c1 = BLOCK_SIZE_SB8X8;
+ const int bsl = b_width_log2(bsize), bs = (1 << bsl) / 4;
+ int bwl, bhl;
+ int UNINITIALIZED_IS_SAFE(pl);
+
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
+ return;
+
+ c1 = BLOCK_SIZE_AB4X4;
+ if (bsize >= BLOCK_SIZE_SB8X8) {
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ pl = partition_plane_context(xd, bsize);
+ c1 = *(get_sb_partitioning(x, bsize));
+ }
+
+ bwl = b_width_log2(c1), bhl = b_height_log2(c1);
+
+ if (bsl == bwl && bsl == bhl) {
+ if (output_enabled && bsize >= BLOCK_SIZE_SB8X8)
+ cpi->partition_count[pl][PARTITION_NONE]++;
+ encode_b(cpi, tp, mi_row, mi_col, output_enabled, c1, -1);
+ } else if (bsl == bhl && bsl > bwl) {
+ if (output_enabled)
+ cpi->partition_count[pl][PARTITION_VERT]++;
+ encode_b(cpi, tp, mi_row, mi_col, output_enabled, c1, 0);
+ encode_b(cpi, tp, mi_row, mi_col + bs, output_enabled, c1, 1);
+ } else if (bsl == bwl && bsl > bhl) {
+ if (output_enabled)
+ cpi->partition_count[pl][PARTITION_HORZ]++;
+ encode_b(cpi, tp, mi_row, mi_col, output_enabled, c1, 0);
+ encode_b(cpi, tp, mi_row + bs, mi_col, output_enabled, c1, 1);
+ } else {
+ BLOCK_SIZE_TYPE subsize;
+ int i;
+
+ assert(bwl < bsl && bhl < bsl);
+ subsize = get_subsize(bsize, PARTITION_SPLIT);
+
+ if (output_enabled)
+ cpi->partition_count[pl][PARTITION_SPLIT]++;
+
+ for (i = 0; i < 4; i++) {
+ const int x_idx = i & 1, y_idx = i >> 1;
+
+ *(get_sb_index(xd, subsize)) = i;
+ encode_sb(cpi, tp, mi_row + y_idx * bs, mi_col + x_idx * bs,
+ output_enabled, subsize);
+ }
+ }
+
+ if (bsize >= BLOCK_SIZE_SB8X8 &&
+ (bsize == BLOCK_SIZE_SB8X8 || bsl == bwl || bsl == bhl)) {
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ update_partition_context(xd, c1, bsize);
+ }
+}
+
+static void set_partitioning(VP9_COMP *cpi, MODE_INFO *m,
+ BLOCK_SIZE_TYPE bsize) {
+ VP9_COMMON *const cm = &cpi->common;
+ const int mis = cm->mode_info_stride;
+ int bsl = b_width_log2(bsize);
+ int bs = (1 << bsl) / 2; //
+ int block_row, block_col;
+ int row, col;
+
+ // this test function sets the entire macroblock to the same bsize
+ for (block_row = 0; block_row < 8; block_row += bs) {
+ for (block_col = 0; block_col < 8; block_col += bs) {
+ for (row = 0; row < bs; row++) {
+ for (col = 0; col < bs; col++) {
+ m[(block_row+row)*mis + block_col+col].mbmi.sb_type = bsize;
+ }
+ }
+ }
+ }
+}
+
+static void set_block_size(VP9_COMMON *const cm,
+ MODE_INFO *m, BLOCK_SIZE_TYPE bsize, int mis,
+ int mi_row, int mi_col) {
+ int row, col;
+ int bwl = b_width_log2(bsize);
+ int bhl = b_height_log2(bsize);
+ int bsl = (bwl > bhl ? bwl : bhl);
+
+ int bs = (1 << bsl) / 2; //
+ MODE_INFO *m2 = m + mi_row * mis + mi_col;
+ for (row = 0; row < bs; row++) {
+ for (col = 0; col < bs; col++) {
+ if (mi_row + row >= cm->mi_rows || mi_col + col >= cm->mi_cols)
+ continue;
+ m2[row*mis+col].mbmi.sb_type = bsize;
+ }
+ }
+}
+typedef struct {
+ int64_t sum_square_error;
+ int64_t sum_error;
+ int count;
+ int variance;
+} var;
+
+#define VT(TYPE, BLOCKSIZE) \
+ typedef struct { \
+ var none; \
+ var horz[2]; \
+ var vert[2]; \
+ BLOCKSIZE split[4]; } TYPE;
+
+VT(v8x8, var)
+VT(v16x16, v8x8)
+VT(v32x32, v16x16)
+VT(v64x64, v32x32)
+
+typedef enum {
+ V16X16,
+ V32X32,
+ V64X64,
+} TREE_LEVEL;
+
+// Set variance values given sum square error, sum error, count.
+static void fill_variance(var *v, int64_t s2, int64_t s, int c) {
+ v->sum_square_error = s2;
+ v->sum_error = s;
+ v->count = c;
+ v->variance = 256
+ * (v->sum_square_error - v->sum_error * v->sum_error / v->count)
+ / v->count;
+}
+
+// Combine 2 variance structures by summing the sum_error, sum_square_error,
+// and counts and then calculating the new variance.
+void sum_2_variances(var *r, var *a, var*b) {
+ fill_variance(r, a->sum_square_error + b->sum_square_error,
+ a->sum_error + b->sum_error, a->count + b->count);
+}
+// Fill one level of our variance tree, by summing the split sums into each of
+// the horizontal, vertical and none from split and recalculating variance.
+#define fill_variance_tree(VT) \
+ sum_2_variances(VT.horz[0], VT.split[0].none, VT.split[1].none); \
+ sum_2_variances(VT.horz[1], VT.split[2].none, VT.split[3].none); \
+ sum_2_variances(VT.vert[0], VT.split[0].none, VT.split[2].none); \
+ sum_2_variances(VT.vert[1], VT.split[1].none, VT.split[3].none); \
+ sum_2_variances(VT.none, VT.vert[0], VT.vert[1]);
+
+// Set the blocksize in the macroblock info structure if the variance is less
+// than our threshold to one of none, horz, vert.
+#define set_vt_size(VT, BLOCKSIZE, R, C, ACTION) \
+ if (VT.none.variance < threshold) { \
+ set_block_size(cm, m, BLOCKSIZE, mis, R, C); \
+ ACTION; \
+ } \
+ if (VT.horz[0].variance < threshold && VT.horz[1].variance < threshold ) { \
+ set_block_size(cm, m, get_subsize(BLOCKSIZE, PARTITION_HORZ), mis, R, C); \
+ ACTION; \
+ } \
+ if (VT.vert[0].variance < threshold && VT.vert[1].variance < threshold ) { \
+ set_block_size(cm, m, get_subsize(BLOCKSIZE, PARTITION_VERT), mis, R, C); \
+ ACTION; \
+ }
+
+static void choose_partitioning(VP9_COMP *cpi, MODE_INFO *m, int mi_row,
+ int mi_col) {
+ VP9_COMMON * const cm = &cpi->common;
+ MACROBLOCK *x = &cpi->mb;
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+ const int mis = cm->mode_info_stride;
+ // TODO(JBB): More experimentation or testing of this threshold;
+ int64_t threshold = 4;
+ int i, j, k;
+ v64x64 vt;
+ unsigned char * s;
+ int sp;
+ const unsigned char * d = xd->plane[0].pre->buf;
+ int dp = xd->plane[0].pre->stride;
+ int pixels_wide = 64, pixels_high = 64;
+
+ vpx_memset(&vt, 0, sizeof(vt));
+
+ set_offsets(cpi, mi_row, mi_col, BLOCK_SIZE_SB64X64);
+
+ if (xd->mb_to_right_edge < 0)
+ pixels_wide += (xd->mb_to_right_edge >> 3);
+
+ if (xd->mb_to_bottom_edge < 0)
+ pixels_high += (xd->mb_to_bottom_edge >> 3);
+
+ s = x->plane[0].src.buf;
+ sp = x->plane[0].src.stride;
+
+ // TODO(JBB): Clearly the higher the quantizer the fewer partitions we want
+ // but this needs more experimentation.
+ threshold = threshold * cpi->common.base_qindex * cpi->common.base_qindex;
+
+ // if ( cm->frame_type == KEY_FRAME ) {
+ d = vp9_64x64_zeros;
+ dp = 64;
+ // }
+
+ // Fill in the entire tree of 8x8 variances for splits.
+ for (i = 0; i < 4; i++) {
+ const int x32_idx = ((i & 1) << 5);
+ const int y32_idx = ((i >> 1) << 5);
+ for (j = 0; j < 4; j++) {
+ const int x_idx = x32_idx + ((j & 1) << 4);
+ const int y_idx = y32_idx + ((j >> 1) << 4);
+ const uint8_t *st = s + y_idx * sp + x_idx;
+ const uint8_t *dt = d + y_idx * dp + x_idx;
+ unsigned int sse = 0;
+ int sum = 0;
+ v16x16 *vst = &vt.split[i].split[j];
+ sse = sum = 0;
+ if (x_idx < pixels_wide && y_idx < pixels_high)
+ vp9_get_sse_sum_8x8(st, sp, dt, dp, &sse, &sum);
+ fill_variance(&vst->split[0].none, sse, sum, 64);
+ sse = sum = 0;
+ if (x_idx + 8 < pixels_wide && y_idx < pixels_high)
+ vp9_get_sse_sum_8x8(st + 8, sp, dt + 8, dp, &sse, &sum);
+ fill_variance(&vst->split[1].none, sse, sum, 64);
+ sse = sum = 0;
+ if (x_idx < pixels_wide && y_idx + 8 < pixels_high)
+ vp9_get_sse_sum_8x8(st + 8 * sp, sp, dt + 8 * dp, dp, &sse, &sum);
+ fill_variance(&vst->split[2].none, sse, sum, 64);
+ sse = sum = 0;
+ if (x_idx + 8 < pixels_wide && y_idx + 8 < pixels_high)
+ vp9_get_sse_sum_8x8(st + 8 * sp + 8, sp, dt + 8 + 8 * dp, dp, &sse,
+ &sum);
+ fill_variance(&vst->split[3].none, sse, sum, 64);
+ }
+ }
+ // Fill the rest of the variance tree by summing the split partition
+ // values.
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 4; j++) {
+ fill_variance_tree(&vt.split[i].split[j])
+ }
+ fill_variance_tree(&vt.split[i])
+ }
+ fill_variance_tree(&vt)
+
+ // Now go through the entire structure, splitting every blocksize until
+ // we get to one that's got a variance lower than our threshold, or we
+ // hit 8x8.
+ set_vt_size( vt, BLOCK_SIZE_SB64X64, mi_row, mi_col, return);
+ for (i = 0; i < 4; ++i) {
+ const int x32_idx = ((i & 1) << 2);
+ const int y32_idx = ((i >> 1) << 2);
+ set_vt_size(vt, BLOCK_SIZE_SB32X32, mi_row + y32_idx, mi_col + x32_idx,
+ continue);
+
+ for (j = 0; j < 4; ++j) {
+ const int x16_idx = ((j & 1) << 1);
+ const int y16_idx = ((j >> 1) << 1);
+ set_vt_size(vt, BLOCK_SIZE_MB16X16, mi_row + y32_idx + y16_idx,
+ mi_col+x32_idx+x16_idx, continue);
+
+ for (k = 0; k < 4; ++k) {
+ const int x8_idx = (k & 1);
+ const int y8_idx = (k >> 1);
+ set_block_size(cm, m, BLOCK_SIZE_SB8X8, mis,
+ mi_row + y32_idx + y16_idx + y8_idx,
+ mi_col + x32_idx + x16_idx + x8_idx);
+ }
+ }
+ }
+}
+static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
+ int mi_row, int mi_col, BLOCK_SIZE_TYPE bsize,
+ int *rate, int *dist) {
+ VP9_COMMON * const cm = &cpi->common;
+ MACROBLOCK * const x = &cpi->mb;
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+ const int mis = cm->mode_info_stride;
+ int bwl = b_width_log2(m->mbmi.sb_type);
+ int bhl = b_height_log2(m->mbmi.sb_type);
+ int bsl = b_width_log2(bsize);
+ int bh = (1 << bhl);
+ int bs = (1 << bsl);
+ int bss = (1 << bsl)/4;
+ int i, pl;
+ PARTITION_TYPE partition;
+ BLOCK_SIZE_TYPE subsize;
+ ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
+ PARTITION_CONTEXT sl[8], sa[8];
+ int r = 0, d = 0;
+
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
+ return;
+
+
+ // parse the partition type
+ if ((bwl == bsl) && (bhl == bsl))
+ partition = PARTITION_NONE;
+ else if ((bwl == bsl) && (bhl < bsl))
+ partition = PARTITION_HORZ;
+ else if ((bwl < bsl) && (bhl == bsl))
+ partition = PARTITION_VERT;
+ else if ((bwl < bsl) && (bhl < bsl))
+ partition = PARTITION_SPLIT;
+ else
+ assert(0);
+
+ subsize = get_subsize(bsize, partition);
+
+ // TODO(JBB): this restriction is here because pick_sb_modes can return
+ // r's that are INT_MAX meaning we can't select a mode / mv for this block.
+ // when the code is made to work for less than sb8x8 we need to come up with
+ // a solution to this problem.
+ assert(subsize >= BLOCK_SIZE_SB8X8);
+
+ if (bsize >= BLOCK_SIZE_SB8X8) {
+ xd->left_seg_context = cm->left_seg_context + (mi_row & MI_MASK);
+ xd->above_seg_context = cm->above_seg_context + mi_col;
+ *(get_sb_partitioning(x, bsize)) = subsize;
+ }
+
+ pl = partition_plane_context(xd, bsize);
+ save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+ switch (partition) {
+ case PARTITION_NONE:
+ pick_sb_modes(cpi, mi_row, mi_col, tp, &r, &d, bsize,
+ get_block_context(x, bsize));
+ r += x->partition_cost[pl][PARTITION_NONE];
+ break;
+ case PARTITION_HORZ:
+ *(get_sb_index(xd, subsize)) = 0;
+ pick_sb_modes(cpi, mi_row, mi_col, tp, &r, &d, subsize,
+ get_block_context(x, subsize));
+ if (mi_row + (bh >> 1) <= cm->mi_rows) {
+ int rt, dt;
+ update_state(cpi, get_block_context(x, subsize), subsize, 0);
+ encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
+ *(get_sb_index(xd, subsize)) = 1;
+ pick_sb_modes(cpi, mi_row + (bs >> 2), mi_col, tp, &rt, &dt, subsize,
+ get_block_context(x, subsize));
+ r += rt;
+ d += dt;
+ }
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ pl = partition_plane_context(xd, bsize);
+ r += x->partition_cost[pl][PARTITION_HORZ];
+ break;
+ case PARTITION_VERT:
+ *(get_sb_index(xd, subsize)) = 0;
+ pick_sb_modes(cpi, mi_row, mi_col, tp, &r, &d, subsize,
+ get_block_context(x, subsize));
+ if (mi_col + (bs >> 1) <= cm->mi_cols) {
+ int rt, dt;
+ update_state(cpi, get_block_context(x, subsize), subsize, 0);
+ encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
+ *(get_sb_index(xd, subsize)) = 1;
+ pick_sb_modes(cpi, mi_row, mi_col + (bs >> 2), tp, &rt, &dt, subsize,
+ get_block_context(x, subsize));
+ r += rt;
+ d += dt;
+ }
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ pl = partition_plane_context(xd, bsize);
+ r += x->partition_cost[pl][PARTITION_VERT];
+ restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+ break;
+ case PARTITION_SPLIT:
+ for (i = 0; i < 4; i++) {
+ int x_idx = (i & 1) * (bs >> 2);
+ int y_idx = (i >> 1) * (bs >> 2);
+ int jj = i >> 1, ii = i & 0x01;
+ int rt, dt;
+
+ if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
+ continue;
+
+ *(get_sb_index(xd, subsize)) = i;
+
+ rd_use_partition(cpi, m + jj * bss * mis + ii * bss, tp, mi_row + y_idx,
+ mi_col + x_idx, subsize, &rt, &dt);
+ r += rt;
+ d += dt;
+ }
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ pl = partition_plane_context(xd, bsize);
+ r += x->partition_cost[pl][PARTITION_SPLIT];
+ break;
+ default:
+ assert(0);
+ }
+
+ // update partition context
+#if CONFIG_AB4X4
+ if (bsize >= BLOCK_SIZE_SB8X8 &&
+ (bsize == BLOCK_SIZE_SB8X8 || partition != PARTITION_SPLIT)) {
+#else
+ if (bsize > BLOCK_SIZE_SB8X8
+ && (bsize == BLOCK_SIZE_MB16X16 || partition != PARTITION_SPLIT)) {
+#endif
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ update_partition_context(xd, subsize, bsize);
+ }
+ restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+
+ if (r < INT_MAX && d < INT_MAX)
+ encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_SIZE_SB64X64, bsize);
+ *rate = r;
+ *dist = d;
+}
+
+
+// TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
+// unlikely to be selected depending on previously rate-distortion optimization
+// results, for encoding speed-up.
+static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp,
+ int mi_row, int mi_col,
+ BLOCK_SIZE_TYPE bsize,
+ int *rate, int *dist) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ int bsl = b_width_log2(bsize), bs = 1 << bsl;
+ int ms = bs / 2;
+ ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
+ PARTITION_CONTEXT sl[8], sa[8];
+ TOKENEXTRA *tp_orig = *tp;
+ int i, pl;
+ BLOCK_SIZE_TYPE subsize;
+ int srate = INT_MAX, sdist = INT_MAX;
+
+ if (bsize < BLOCK_SIZE_SB8X8)
+ if (xd->ab_index != 0) {
+ *rate = 0;
+ *dist = 0;
+ return;
+ }
+ assert(mi_height_log2(bsize) == mi_width_log2(bsize));
+
+ save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+
+ // PARTITION_SPLIT
+ if (bsize >= BLOCK_SIZE_SB8X8) {
+ int r4 = 0, d4 = 0;
+ subsize = get_subsize(bsize, PARTITION_SPLIT);
+ *(get_sb_partitioning(x, bsize)) = subsize;
+
+ for (i = 0; i < 4; ++i) {
+ int x_idx = (i & 1) * (ms >> 1);
+ int y_idx = (i >> 1) * (ms >> 1);
+ int r = 0, d = 0;
+
+ if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
+ continue;
+
+ *(get_sb_index(xd, subsize)) = i;
+ rd_pick_partition(cpi, tp, mi_row + y_idx, mi_col + x_idx, subsize,
+ &r, &d);
+
+ r4 += r;
+ d4 += d;
+ }
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ pl = partition_plane_context(xd, bsize);
+ if (r4 < INT_MAX)
+ r4 += x->partition_cost[pl][PARTITION_SPLIT];
+ assert(r4 >= 0);
+ assert(d4 >= 0);
+ srate = r4;
+ sdist = d4;
+ restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+ }
+
+ // PARTITION_HORZ
+ if (bsize >= BLOCK_SIZE_SB8X8 && mi_col + (ms >> 1) < cm->mi_cols) {
+ int r2, d2;
+ int r = 0, d = 0;
+ subsize = get_subsize(bsize, PARTITION_HORZ);
+ *(get_sb_index(xd, subsize)) = 0;
+ pick_sb_modes(cpi, mi_row, mi_col, tp, &r2, &d2, subsize,
+ get_block_context(x, subsize));
+
+ if (mi_row + (ms >> 1) < cm->mi_rows) {
+ update_state(cpi, get_block_context(x, subsize), subsize, 0);
+ encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
+
+ *(get_sb_index(xd, subsize)) = 1;
+ pick_sb_modes(cpi, mi_row + (ms >> 1), mi_col, tp, &r, &d, subsize,
+ get_block_context(x, subsize));
+ r2 += r;
+ d2 += d;
+ }
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ pl = partition_plane_context(xd, bsize);
+ if (r2 < INT_MAX)
+ r2 += x->partition_cost[pl][PARTITION_HORZ];
+ if (RDCOST(x->rdmult, x->rddiv, r2, d2) <
+ RDCOST(x->rdmult, x->rddiv, srate, sdist)) {
+ srate = r2;
+ sdist = d2;
+ *(get_sb_partitioning(x, bsize)) = subsize;
+ }
+ restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+ }
+
+ // PARTITION_VERT
+ if (bsize >= BLOCK_SIZE_SB8X8 && mi_row + (ms >> 1) < cm->mi_rows) {
+ int r2, d2;
+ subsize = get_subsize(bsize, PARTITION_VERT);
+ *(get_sb_index(xd, subsize)) = 0;
+ pick_sb_modes(cpi, mi_row, mi_col, tp, &r2, &d2, subsize,
+ get_block_context(x, subsize));
+ if (mi_col + (ms >> 1) < cm->mi_cols) {
+ int r = 0, d = 0;
+ update_state(cpi, get_block_context(x, subsize), subsize, 0);
+ encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
+
+ *(get_sb_index(xd, subsize)) = 1;
+ pick_sb_modes(cpi, mi_row, mi_col + (ms >> 1), tp, &r, &d, subsize,
+ get_block_context(x, subsize));
+ r2 += r;
+ d2 += d;
+ }
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ pl = partition_plane_context(xd, bsize);
+ if (r2 < INT_MAX)
+ r2 += x->partition_cost[pl][PARTITION_VERT];
+ if (RDCOST(x->rdmult, x->rddiv, r2, d2) <
+ RDCOST(x->rdmult, x->rddiv, srate, sdist)) {
+ srate = r2;
+ sdist = d2;
+ *(get_sb_partitioning(x, bsize)) = subsize;
+ }
+ restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+ }
+
+ // PARTITION_NONE
+ if ((mi_row + (ms >> 1) < cm->mi_rows) &&
+ (mi_col + (ms >> 1) < cm->mi_cols)) {
+ int r, d;
+ pick_sb_modes(cpi, mi_row, mi_col, tp, &r, &d, bsize,
+ get_block_context(x, bsize));
+ if (bsize >= BLOCK_SIZE_SB8X8) {
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
+ pl = partition_plane_context(xd, bsize);
+ r += x->partition_cost[pl][PARTITION_NONE];
+ }
+
+ if (RDCOST(x->rdmult, x->rddiv, r, d) <
+ RDCOST(x->rdmult, x->rddiv, srate, sdist)) {
+ srate = r;
+ sdist = d;
+ if (bsize >= BLOCK_SIZE_SB8X8)
+ *(get_sb_partitioning(x, bsize)) = bsize;
+ }
+ }
+
+ *rate = srate;
+ *dist = sdist;
+
+ restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+
+ if (srate < INT_MAX && sdist < INT_MAX)
+ encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_SIZE_SB64X64, bsize);
+
+ if (bsize == BLOCK_SIZE_SB64X64) {
+ assert(tp_orig < *tp);
+ assert(srate < INT_MAX);
+ assert(sdist < INT_MAX);
+ } else {
+ assert(tp_orig == *tp);
+ }
+}
+
+static void encode_sb_row(VP9_COMP *cpi, int mi_row,
+ TOKENEXTRA **tp, int *totalrate) {
+ VP9_COMMON *const cm = &cpi->common;
+ int mi_col;
+
+ // Initialize the left context for the new SB row
+ vpx_memset(&cm->left_context, 0, sizeof(cm->left_context));
+ vpx_memset(cm->left_seg_context, 0, sizeof(cm->left_seg_context));
+
+ // Code each SB in the row
+ for (mi_col = cm->cur_tile_mi_col_start;
+ mi_col < cm->cur_tile_mi_col_end; mi_col += 64 / MI_SIZE) {
+ int dummy_rate, dummy_dist;
+ if (cpi->speed < 5) {
+ rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
+ &dummy_rate, &dummy_dist);
+ } else {
+ const int idx_str = cm->mode_info_stride * mi_row + mi_col;
+ MODE_INFO *m = cm->mi + idx_str;
+ // set_partitioning(cpi, m, BLOCK_SIZE_SB64X64);
+ choose_partitioning(cpi, cm->mi, mi_row, mi_col);
+ rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
+ &dummy_rate, &dummy_dist);
+ }
+ }
+}
+
+static void init_encode_frame_mb_context(VP9_COMP *cpi) {
+ MACROBLOCK *const x = &cpi->mb;
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &x->e_mbd;
+
+ x->act_zbin_adj = 0;
+ cpi->seg0_idx = 0;
+
+ xd->mode_info_stride = cm->mode_info_stride;
+ xd->frame_type = cm->frame_type;
+
+ xd->frames_since_golden = cm->frames_since_golden;
+ xd->frames_till_alt_ref_frame = cm->frames_till_alt_ref_frame;
+
+ // reset intra mode contexts
+ if (cm->frame_type == KEY_FRAME)
+ vp9_init_mbmode_probs(cm);
+
+ // Copy data over into macro block data structures.
+ vp9_setup_src_planes(x, cpi->Source, 0, 0);
+
+ // TODO(jkoleszar): are these initializations required?
+ setup_pre_planes(xd, &cm->yv12_fb[cm->ref_frame_map[cpi->lst_fb_idx]], NULL,
+ 0, 0, NULL, NULL);
+ setup_dst_planes(xd, &cm->yv12_fb[cm->new_fb_idx], 0, 0);
+
+ vp9_build_block_offsets(x);
+
+ vp9_setup_block_dptrs(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
+
+ xd->mode_info_context->mbmi.mode = DC_PRED;
+ xd->mode_info_context->mbmi.uv_mode = DC_PRED;
+
+ vp9_zero(cpi->y_mode_count)
+ vp9_zero(cpi->y_uv_mode_count)
+ vp9_zero(cm->fc.inter_mode_counts)
+ vp9_zero(cpi->partition_count);
+ vp9_zero(cpi->intra_inter_count);
+ vp9_zero(cpi->comp_inter_count);
+ vp9_zero(cpi->single_ref_count);
+ vp9_zero(cpi->comp_ref_count);
+ vp9_zero(cm->fc.tx_count_32x32p);
+ vp9_zero(cm->fc.tx_count_16x16p);
+ vp9_zero(cm->fc.tx_count_8x8p);
+ vp9_zero(cm->fc.mbskip_count);
+
+ // Note: this memset assumes above_context[0], [1] and [2]
+ // are allocated as part of the same buffer.
+ vpx_memset(cm->above_context[0], 0, sizeof(ENTROPY_CONTEXT) * 2 *
+ MAX_MB_PLANE * mi_cols_aligned_to_sb(cm));
+ vpx_memset(cm->above_seg_context, 0, sizeof(PARTITION_CONTEXT) *
+ mi_cols_aligned_to_sb(cm));
+}
+
+static void switch_lossless_mode(VP9_COMP *cpi, int lossless) {
+ if (lossless) {
+ cpi->mb.fwd_txm8x4 = vp9_short_walsh8x4;
+ cpi->mb.fwd_txm4x4 = vp9_short_walsh4x4;
+ cpi->mb.e_mbd.inv_txm4x4_1_add = vp9_short_iwalsh4x4_1_add;
+ cpi->mb.e_mbd.inv_txm4x4_add = vp9_short_iwalsh4x4_add;
+ cpi->mb.optimize = 0;
+ cpi->common.filter_level = 0;
+ cpi->zbin_mode_boost_enabled = 0;
+ cpi->common.txfm_mode = ONLY_4X4;
+ } else {
+ cpi->mb.fwd_txm8x4 = vp9_short_fdct8x4;
+ cpi->mb.fwd_txm4x4 = vp9_short_fdct4x4;
+ cpi->mb.e_mbd.inv_txm4x4_1_add = vp9_short_idct4x4_1_add;
+ cpi->mb.e_mbd.inv_txm4x4_add = vp9_short_idct4x4_add;
+ }
+}
+
+
+static void encode_frame_internal(VP9_COMP *cpi) {
+ int mi_row;
+ MACROBLOCK *const x = &cpi->mb;
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ int totalrate;
+
+// fprintf(stderr, "encode_frame_internal frame %d (%d) type %d\n",
+// cpi->common.current_video_frame, cpi->common.show_frame,
+// cm->frame_type);
+
+// debug output
+#if DBG_PRNT_SEGMAP
+ {
+ FILE *statsfile;
+ statsfile = fopen("segmap2.stt", "a");
+ fprintf(statsfile, "\n");
+ fclose(statsfile);
+ }
+#endif
+
+ totalrate = 0;
+
+ // Reset frame count of inter 0,0 motion vector usage.
+ cpi->inter_zz_count = 0;
+
+ vp9_zero(cm->fc.switchable_interp_count);
+ vp9_zero(cpi->best_switchable_interp_count);
+
+ xd->mode_info_context = cm->mi;
+ xd->prev_mode_info_context = cm->prev_mi;
+
+ vp9_zero(cpi->NMVcount);
+ vp9_zero(cpi->coef_counts);
+ vp9_zero(cm->fc.eob_branch_counts);
+
+ cpi->mb.e_mbd.lossless = cm->base_qindex == 0 &&
+ cm->y_dc_delta_q == 0 &&
+ cm->uv_dc_delta_q == 0 &&
+ cm->uv_ac_delta_q == 0;
+ switch_lossless_mode(cpi, cpi->mb.e_mbd.lossless);
+
+ vp9_frame_init_quantizer(cpi);
+
+ vp9_initialize_rd_consts(cpi, cm->base_qindex + cm->y_dc_delta_q);
+ vp9_initialize_me_consts(cpi, cm->base_qindex);
+
+ if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
+ // Initialize encode frame context.
+ init_encode_frame_mb_context(cpi);
+
+ // Build a frame level activity map
+ build_activity_map(cpi);
+ }
+
+ // re-initencode frame context.
+ init_encode_frame_mb_context(cpi);
+
+ vpx_memset(cpi->rd_comp_pred_diff, 0, sizeof(cpi->rd_comp_pred_diff));
+ vpx_memset(cpi->rd_tx_select_diff, 0, sizeof(cpi->rd_tx_select_diff));
+ vpx_memset(cpi->rd_tx_select_threshes, 0, sizeof(cpi->rd_tx_select_threshes));
+
+ set_prev_mi(cm);
+
+ {
+ struct vpx_usec_timer emr_timer;
+ vpx_usec_timer_start(&emr_timer);
+
+ {
+ // Take tiles into account and give start/end MB
+ int tile_col, tile_row;
+ TOKENEXTRA *tp = cpi->tok;
+
+ for (tile_row = 0; tile_row < cm->tile_rows; tile_row++) {
+ vp9_get_tile_row_offsets(cm, tile_row);
+
+ for (tile_col = 0; tile_col < cm->tile_columns; tile_col++) {
+ TOKENEXTRA *tp_old = tp;
+
+ // For each row of SBs in the frame
+ vp9_get_tile_col_offsets(cm, tile_col);
+ for (mi_row = cm->cur_tile_mi_row_start;
+ mi_row < cm->cur_tile_mi_row_end;
+ mi_row += 8)
+ encode_sb_row(cpi, mi_row, &tp, &totalrate);
+ cpi->tok_count[tile_row][tile_col] = (unsigned int)(tp - tp_old);
+ assert(tp - cpi->tok <=
+ get_token_alloc(cm->mb_rows, cm->mb_cols));
+ }
+ }
+ }
+
+ vpx_usec_timer_mark(&emr_timer);
+ cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
+ }
+
+ // 256 rate units to the bit,
+ // projected_frame_size in units of BYTES
+ cpi->projected_frame_size = totalrate >> 8;
+
+#if 0
+ // Keep record of the total distortion this time around for future use
+ cpi->last_frame_distortion = cpi->frame_distortion;
+#endif
+
+}
+
+static int check_dual_ref_flags(VP9_COMP *cpi) {
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+ int ref_flags = cpi->ref_frame_flags;
+
+ if (vp9_segfeature_active(xd, 1, SEG_LVL_REF_FRAME)) {
+ return 0;
+ } else {
+ return (!!(ref_flags & VP9_GOLD_FLAG) +
+ !!(ref_flags & VP9_LAST_FLAG) +
+ !!(ref_flags & VP9_ALT_FLAG)) >= 2;
+ }
+}
+
+static int get_skip_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs) {
+ int x, y;
+
+ for (y = 0; y < ymbs; y++) {
+ for (x = 0; x < xmbs; x++) {
+ if (!mi[y * mis + x].mbmi.mb_skip_coeff)
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static void set_txfm_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs,
+ TX_SIZE txfm_size) {
+ int x, y;
+
+ for (y = 0; y < ymbs; y++) {
+ for (x = 0; x < xmbs; x++)
+ mi[y * mis + x].mbmi.txfm_size = txfm_size;
+ }
+}
+
+static void reset_skip_txfm_size_b(VP9_COMP *cpi, MODE_INFO *mi,
+ int mis, TX_SIZE txfm_max,
+ int bw, int bh, int mi_row, int mi_col,
+ BLOCK_SIZE_TYPE bsize) {
+ VP9_COMMON *const cm = &cpi->common;
+ MB_MODE_INFO *const mbmi = &mi->mbmi;
+
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
+ return;
+
+ if (mbmi->txfm_size > txfm_max) {
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ const int segment_id = mbmi->segment_id;
+ const int ymbs = MIN(bh, cm->mi_rows - mi_row);
+ const int xmbs = MIN(bw, cm->mi_cols - mi_col);
+
+ xd->mode_info_context = mi;
+ assert(vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP) ||
+ get_skip_flag(mi, mis, ymbs, xmbs));
+ set_txfm_flag(mi, mis, ymbs, xmbs, txfm_max);
+ }
+}
+
+static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO *mi,
+ TX_SIZE txfm_max,
+ int mi_row, int mi_col,
+ BLOCK_SIZE_TYPE bsize) {
+ VP9_COMMON *const cm = &cpi->common;
+ const int mis = cm->mode_info_stride;
+ int bwl, bhl;
+ const int bsl = mi_width_log2(bsize), bs = 1 << (bsl - 1);
+
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
+ return;
+
+ bwl = mi_width_log2(mi->mbmi.sb_type);
+ bhl = mi_height_log2(mi->mbmi.sb_type);
+
+ if (bwl == bsl && bhl == bsl) {
+ reset_skip_txfm_size_b(cpi, mi, mis, txfm_max, 1 << bsl, 1 << bsl,
+ mi_row, mi_col, bsize);
+ } else if (bwl == bsl && bhl < bsl) {
+ reset_skip_txfm_size_b(cpi, mi, mis, txfm_max, 1 << bsl, bs,
+ mi_row, mi_col, bsize);
+ reset_skip_txfm_size_b(cpi, mi + bs * mis, mis, txfm_max, 1 << bsl, bs,
+ mi_row + bs, mi_col, bsize);
+ } else if (bwl < bsl && bhl == bsl) {
+ reset_skip_txfm_size_b(cpi, mi, mis, txfm_max, bs, 1 << bsl,
+ mi_row, mi_col, bsize);
+ reset_skip_txfm_size_b(cpi, mi + bs, mis, txfm_max, bs, 1 << bsl,
+ mi_row, mi_col + bs, bsize);
+ } else {
+ BLOCK_SIZE_TYPE subsize;
+ int n;
+
+ assert(bwl < bsl && bhl < bsl);
+ if (bsize == BLOCK_SIZE_SB64X64) {
+ subsize = BLOCK_SIZE_SB32X32;
+ } else if (bsize == BLOCK_SIZE_SB32X32) {
+ subsize = BLOCK_SIZE_MB16X16;
+ } else {
+ assert(bsize == BLOCK_SIZE_MB16X16);
+ subsize = BLOCK_SIZE_SB8X8;
+ }
+
+ for (n = 0; n < 4; n++) {
+ const int y_idx = n >> 1, x_idx = n & 0x01;
+
+ reset_skip_txfm_size_sb(cpi, mi + y_idx * bs * mis + x_idx * bs,
+ txfm_max, mi_row + y_idx * bs,
+ mi_col + x_idx * bs, subsize);
+ }
+ }
+}
+
+static void reset_skip_txfm_size(VP9_COMP *cpi, TX_SIZE txfm_max) {
+ VP9_COMMON *const cm = &cpi->common;
+ int mi_row, mi_col;
+ const int mis = cm->mode_info_stride;
+ MODE_INFO *mi, *mi_ptr = cm->mi;
+
+ for (mi_row = 0; mi_row < cm->mi_rows;
+ mi_row += 8, mi_ptr += 8 * mis) {
+ mi = mi_ptr;
+ for (mi_col = 0; mi_col < cm->mi_cols;
+ mi_col += 8, mi += 8) {
+ reset_skip_txfm_size_sb(cpi, mi, txfm_max,
+ mi_row, mi_col, BLOCK_SIZE_SB64X64);
+ }
+ }
+}
+
+void vp9_encode_frame(VP9_COMP *cpi) {
+ VP9_COMMON *const cm = &cpi->common;
+
+ // In the longer term the encoder should be generalized to match the
+ // decoder such that we allow compound where one of the 3 buffers has a
+ // differnt sign bias and that buffer is then the fixed ref. However, this
+ // requires further work in the rd loop. For now the only supported encoder
+ // side behaviour is where the ALT ref buffer has oppositie sign bias to
+ // the other two.
+ if ((cm->ref_frame_sign_bias[ALTREF_FRAME] ==
+ cm->ref_frame_sign_bias[GOLDEN_FRAME]) ||
+ (cm->ref_frame_sign_bias[ALTREF_FRAME] ==
+ cm->ref_frame_sign_bias[LAST_FRAME])) {
+ cm->allow_comp_inter_inter = 0;
+ } else {
+ cm->allow_comp_inter_inter = 1;
+ cm->comp_fixed_ref = ALTREF_FRAME;
+ cm->comp_var_ref[0] = LAST_FRAME;
+ cm->comp_var_ref[1] = GOLDEN_FRAME;
+ }
+
+ if (cpi->sf.RD) {
+ int i, frame_type, pred_type;
+ TXFM_MODE txfm_type;
+
+ /*
+ * This code does a single RD pass over the whole frame assuming
+ * either compound, single or hybrid prediction as per whatever has
+ * worked best for that type of frame in the past.
+ * It also predicts whether another coding mode would have worked
+ * better that this coding mode. If that is the case, it remembers
+ * that for subsequent frames.
+ * It does the same analysis for transform size selection also.
+ */
+ if (cpi->common.frame_type == KEY_FRAME)
+ frame_type = 0;
+ else if (cpi->is_src_frame_alt_ref && cpi->refresh_golden_frame)
+ frame_type = 3;
+ else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
+ frame_type = 1;
+ else
+ frame_type = 2;
+
+ /* prediction (compound, single or hybrid) mode selection */
+ if (frame_type == 3 || !cm->allow_comp_inter_inter)
+ pred_type = SINGLE_PREDICTION_ONLY;
+ else if (cpi->rd_prediction_type_threshes[frame_type][1] >
+ cpi->rd_prediction_type_threshes[frame_type][0] &&
+ cpi->rd_prediction_type_threshes[frame_type][1] >
+ cpi->rd_prediction_type_threshes[frame_type][2] &&
+ check_dual_ref_flags(cpi) && cpi->static_mb_pct == 100)
+ pred_type = COMP_PREDICTION_ONLY;
+ else if (cpi->rd_prediction_type_threshes[frame_type][0] >
+ cpi->rd_prediction_type_threshes[frame_type][2])
+ pred_type = SINGLE_PREDICTION_ONLY;
+ else
+ pred_type = HYBRID_PREDICTION;
+
+ /* transform size (4x4, 8x8, 16x16 or select-per-mb) selection */
+
+ cpi->mb.e_mbd.lossless = 0;
+ if (cpi->oxcf.lossless) {
+ txfm_type = ONLY_4X4;
+ cpi->mb.e_mbd.lossless = 1;
+ } else
+#if 0
+ /* FIXME (rbultje): this code is disabled until we support cost updates
+ * while a frame is being encoded; the problem is that each time we
+ * "revert" to 4x4 only (or even 8x8 only), the coefficient probabilities
+ * for 16x16 (and 8x8) start lagging behind, thus leading to them lagging
+ * further behind and not being chosen for subsequent frames either. This
+ * is essentially a local minimum problem that we can probably fix by
+ * estimating real costs more closely within a frame, perhaps by re-
+ * calculating costs on-the-fly as frame encoding progresses. */
+ if (cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] >
+ cpi->rd_tx_select_threshes[frame_type][ONLY_4X4] &&
+ cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] >
+ cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16] &&
+ cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] >
+ cpi->rd_tx_select_threshes[frame_type][ALLOW_8X8]) {
+ txfm_type = TX_MODE_SELECT;
+ } else if (cpi->rd_tx_select_threshes[frame_type][ONLY_4X4] >
+ cpi->rd_tx_select_threshes[frame_type][ALLOW_8X8]
+ && cpi->rd_tx_select_threshes[frame_type][ONLY_4X4] >
+ cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16]
+ ) {
+ txfm_type = ONLY_4X4;
+ } else if (cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16] >=
+ cpi->rd_tx_select_threshes[frame_type][ALLOW_8X8]) {
+ txfm_type = ALLOW_16X16;
+ } else
+ txfm_type = ALLOW_8X8;
+#else
+ txfm_type = cpi->rd_tx_select_threshes[frame_type][ALLOW_32X32] >
+ cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ?
+ ALLOW_32X32 : TX_MODE_SELECT;
+#endif
+ cpi->common.txfm_mode = txfm_type;
+ cpi->common.comp_pred_mode = pred_type;
+ encode_frame_internal(cpi);
+
+ for (i = 0; i < NB_PREDICTION_TYPES; ++i) {
+ const int diff = (int)(cpi->rd_comp_pred_diff[i] / cpi->common.MBs);
+ cpi->rd_prediction_type_threshes[frame_type][i] += diff;
+ cpi->rd_prediction_type_threshes[frame_type][i] >>= 1;
+ }
+
+ for (i = 0; i < NB_TXFM_MODES; ++i) {
+ int64_t pd = cpi->rd_tx_select_diff[i];
+ int diff;
+ if (i == TX_MODE_SELECT)
+ pd -= RDCOST(cpi->mb.rdmult, cpi->mb.rddiv,
+ 2048 * (TX_SIZE_MAX_SB - 1), 0);
+ diff = (int)(pd / cpi->common.MBs);
+ cpi->rd_tx_select_threshes[frame_type][i] += diff;
+ cpi->rd_tx_select_threshes[frame_type][i] /= 2;
+ }
+
+ if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
+ int single_count_zero = 0;
+ int comp_count_zero = 0;
+
+ for (i = 0; i < COMP_INTER_CONTEXTS; i++) {
+ single_count_zero += cpi->comp_inter_count[i][0];
+ comp_count_zero += cpi->comp_inter_count[i][1];
+ }
+
+ if (comp_count_zero == 0) {
+ cpi->common.comp_pred_mode = SINGLE_PREDICTION_ONLY;
+ vp9_zero(cpi->comp_inter_count);
+ } else if (single_count_zero == 0) {
+ cpi->common.comp_pred_mode = COMP_PREDICTION_ONLY;
+ vp9_zero(cpi->comp_inter_count);
+ }
+ }
+
+ if (cpi->common.txfm_mode == TX_MODE_SELECT) {
+ int count4x4 = 0;
+ int count8x8_lp = 0, count8x8_8x8p = 0;
+ int count16x16_16x16p = 0, count16x16_lp = 0;
+ int count32x32 = 0;
+
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++)
+ count4x4 += cm->fc.tx_count_32x32p[i][TX_4X4];
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++)
+ count4x4 += cm->fc.tx_count_16x16p[i][TX_4X4];
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++)
+ count4x4 += cm->fc.tx_count_8x8p[i][TX_4X4];
+
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++)
+ count8x8_lp += cm->fc.tx_count_32x32p[i][TX_8X8];
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++)
+ count8x8_lp += cm->fc.tx_count_16x16p[i][TX_8X8];
+
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++)
+ count8x8_8x8p += cm->fc.tx_count_8x8p[i][TX_8X8];
+
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++)
+ count16x16_16x16p += cm->fc.tx_count_16x16p[i][TX_16X16];
+
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++)
+ count16x16_lp += cm->fc.tx_count_32x32p[i][TX_16X16];
+
+ for (i = 0; i < TX_SIZE_CONTEXTS; i++)
+ count32x32 += cm->fc.tx_count_32x32p[i][TX_32X32];
+
+ if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 &&
+ count32x32 == 0) {
+ cpi->common.txfm_mode = ALLOW_8X8;
+ reset_skip_txfm_size(cpi, TX_8X8);
+ } else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 &&
+ count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) {
+ cpi->common.txfm_mode = ONLY_4X4;
+ reset_skip_txfm_size(cpi, TX_4X4);
+ } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) {
+ cpi->common.txfm_mode = ALLOW_32X32;
+ } else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) {
+ cpi->common.txfm_mode = ALLOW_16X16;
+ reset_skip_txfm_size(cpi, TX_16X16);
+ }
+ }
+
+ // Update interpolation filter strategy for next frame.
+ if ((cpi->common.frame_type != KEY_FRAME) && (cpi->sf.search_best_filter))
+ vp9_select_interp_filter_type(cpi);
+ } else {
+ encode_frame_internal(cpi);
+ }
+
+}
+
+void vp9_build_block_offsets(MACROBLOCK *x) {
+}
+
+static void sum_intra_stats(VP9_COMP *cpi, MACROBLOCK *x) {
+ const MACROBLOCKD *xd = &x->e_mbd;
+ const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
+ const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
+
+ ++cpi->y_uv_mode_count[m][uvm];
+ if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB8X8) {
+ const BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type;
+ const int bwl = b_width_log2(bsize), bhl = b_height_log2(bsize);
+ const int bsl = MIN(bwl, bhl);
+ ++cpi->y_mode_count[MIN(bsl, 3)][m];
+ } else {
+ int idx, idy;
+ int bw = 1 << b_width_log2(xd->mode_info_context->mbmi.sb_type);
+ int bh = 1 << b_height_log2(xd->mode_info_context->mbmi.sb_type);
+ for (idy = 0; idy < 2; idy += bh) {
+ for (idx = 0; idx < 2; idx += bw) {
+ int m = xd->mode_info_context->bmi[idy * 2 + idx].as_mode.first;
+ ++cpi->y_mode_count[0][m];
+ }
+ }
+ }
+}
+
+// Experimental stub function to create a per MB zbin adjustment based on
+// some previously calculated measure of MB activity.
+static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x) {
+#if USE_ACT_INDEX
+ x->act_zbin_adj = *(x->mb_activity_ptr);
+#else
+ int64_t a;
+ int64_t b;
+ int64_t act = *(x->mb_activity_ptr);
+
+ // Apply the masking to the RD multiplier.
+ a = act + 4 * cpi->activity_avg;
+ b = 4 * act + cpi->activity_avg;
+
+ if (act > cpi->activity_avg)
+ x->act_zbin_adj = (int)(((int64_t)b + (a >> 1)) / a) - 1;
+ else
+ x->act_zbin_adj = 1 - (int)(((int64_t)a + (b >> 1)) / b);
+#endif
+}
+
+static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
+ int output_enabled, int mi_row, int mi_col,
+ BLOCK_SIZE_TYPE bsize) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ int n;
+ MODE_INFO *mi = xd->mode_info_context;
+ MB_MODE_INFO *mbmi = &mi->mbmi;
+ unsigned int segment_id = mbmi->segment_id;
+ const int mis = cm->mode_info_stride;
+ const int bwl = mi_width_log2(bsize);
+ const int bw = 1 << bwl, bh = 1 << mi_height_log2(bsize);
+ x->rd_search = 0;
+
+ if (cm->frame_type == KEY_FRAME) {
+ if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
+ adjust_act_zbin(cpi, x);
+ vp9_update_zbin_extra(cpi, x);
+ }
+ } else {
+ vp9_setup_interp_filters(xd, mbmi->interp_filter, cm);
+
+ if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
+ // Adjust the zbin based on this MB rate.
+ adjust_act_zbin(cpi, x);
+ }
+
+ // Experimental code. Special case for gf and arf zeromv modes.
+ // Increase zbin size to suppress noise
+ cpi->zbin_mode_boost = 0;
+ if (cpi->zbin_mode_boost_enabled) {
+ if (mbmi->ref_frame[0] != INTRA_FRAME) {
+ if (mbmi->mode == ZEROMV) {
+ if (mbmi->ref_frame[0] != LAST_FRAME)
+ cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
+ else
+ cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
+ } else if (mbmi->sb_type < BLOCK_SIZE_SB8X8) {
+ cpi->zbin_mode_boost = SPLIT_MV_ZBIN_BOOST;
+ } else {
+ cpi->zbin_mode_boost = MV_ZBIN_BOOST;
+ }
+ } else {
+ cpi->zbin_mode_boost = INTRA_ZBIN_BOOST;
+ }
+ }
+
+ vp9_update_zbin_extra(cpi, x);
+ }
+
+ if (mbmi->ref_frame[0] == INTRA_FRAME) {
+ vp9_encode_intra_block_y(cm, x, (bsize < BLOCK_SIZE_SB8X8) ?
+ BLOCK_SIZE_SB8X8 : bsize);
+ vp9_encode_intra_block_uv(cm, x, (bsize < BLOCK_SIZE_SB8X8) ?
+ BLOCK_SIZE_SB8X8 : bsize);
+ if (output_enabled)
+ sum_intra_stats(cpi, x);
+ } else {
+ int idx = cm->ref_frame_map[get_ref_frame_idx(cpi, mbmi->ref_frame[0])];
+ YV12_BUFFER_CONFIG *ref_fb = &cm->yv12_fb[idx];
+ YV12_BUFFER_CONFIG *second_ref_fb = NULL;
+ if (mbmi->ref_frame[1] > 0) {
+ idx = cm->ref_frame_map[get_ref_frame_idx(cpi, mbmi->ref_frame[1])];
+ second_ref_fb = &cm->yv12_fb[idx];
+ }
+
+ assert(cm->frame_type != KEY_FRAME);
+
+ setup_pre_planes(xd, ref_fb, second_ref_fb,
+ mi_row, mi_col, xd->scale_factor, xd->scale_factor_uv);
+
+ vp9_build_inter_predictors_sb(xd, mi_row, mi_col,
+ bsize < BLOCK_SIZE_SB8X8 ? BLOCK_SIZE_SB8X8
+ : bsize);
+ }
+
+ if (xd->mode_info_context->mbmi.ref_frame[0] == INTRA_FRAME) {
+ vp9_tokenize_sb(cpi, xd, t, !output_enabled,
+ (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize);
+ } else if (!x->skip) {
+ vp9_encode_sb(cm, x, (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize);
+ vp9_tokenize_sb(cpi, xd, t, !output_enabled,
+ (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize);
+ } else {
+ // FIXME(rbultje): not tile-aware (mi - 1)
+ int mb_skip_context =
+ (mi - 1)->mbmi.mb_skip_coeff + (mi - mis)->mbmi.mb_skip_coeff;
+
+ mbmi->mb_skip_coeff = 1;
+ if (output_enabled)
+ cm->fc.mbskip_count[mb_skip_context][1]++;
+ vp9_reset_sb_tokens_context(xd,
+ (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize);
+ }
+
+ // copy skip flag on all mb_mode_info contexts in this SB
+ // if this was a skip at this txfm size
+ for (n = 1; n < bw * bh; n++) {
+ const int x_idx = n & (bw - 1), y_idx = n >> bwl;
+ if (mi_col + x_idx < cm->mi_cols && mi_row + y_idx < cm->mi_rows)
+ mi[x_idx + y_idx * mis].mbmi.mb_skip_coeff = mi->mbmi.mb_skip_coeff;
+ }
+
+ if (output_enabled) {
+ if (cm->txfm_mode == TX_MODE_SELECT &&
+ mbmi->sb_type >= BLOCK_SIZE_SB8X8 &&
+ !(mbmi->ref_frame[0] != INTRA_FRAME && (mbmi->mb_skip_coeff ||
+ vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)))) {
+ const int context = vp9_get_pred_context(cm, xd, PRED_TX_SIZE);
+ if (bsize >= BLOCK_SIZE_SB32X32) {
+ cm->fc.tx_count_32x32p[context][mbmi->txfm_size]++;
+ } else if (bsize >= BLOCK_SIZE_MB16X16) {
+ cm->fc.tx_count_16x16p[context][mbmi->txfm_size]++;
+ } else {
+ cm->fc.tx_count_8x8p[context][mbmi->txfm_size]++;
+ }
+ } else {
+ int x, y;
+ TX_SIZE sz = (cm->txfm_mode == TX_MODE_SELECT) ? TX_32X32 : cm->txfm_mode;
+ // The new intra coding scheme requires no change of transform size
+ if (mi->mbmi.ref_frame[0] != INTRA_FRAME) {
+ if (sz == TX_32X32 && bsize < BLOCK_SIZE_SB32X32)
+ sz = TX_16X16;
+ if (sz == TX_16X16 && bsize < BLOCK_SIZE_MB16X16)
+ sz = TX_8X8;
+ if (sz == TX_8X8 && bsize < BLOCK_SIZE_SB8X8)
+ sz = TX_4X4;
+ } else if (bsize >= BLOCK_SIZE_SB8X8) {
+ sz = mbmi->txfm_size;
+ } else {
+ sz = TX_4X4;
+ }
+
+ for (y = 0; y < bh; y++) {
+ for (x = 0; x < bw; x++) {
+ if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows) {
+ mi[mis * y + x].mbmi.txfm_size = sz;
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/libvpx/vp9/encoder/vp9_encodeframe.h b/libvpx/vp9/encoder/vp9_encodeframe.h
new file mode 100644
index 0000000..d37bdca
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_encodeframe.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_VP9_ENCODEFRAME_H_
+#define VP9_ENCODER_VP9_ENCODEFRAME_H_
+
+struct macroblock;
+struct yv12_buffer_config;
+
+void vp9_build_block_offsets(struct macroblock *x);
+
+void vp9_setup_src_planes(struct macroblock *x,
+ const struct yv12_buffer_config *src,
+ int mb_row, int mb_col);
+
+#endif // VP9_ENCODER_VP9_ENCODEFRAME_H_
diff --git a/libvpx/vp9/encoder/vp9_encodeintra.c b/libvpx/vp9/encoder/vp9_encodeintra.c
new file mode 100644
index 0000000..f29dba0
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_encodeintra.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vpx_config.h"
+#include "vp9_rtcd.h"
+#include "vp9/encoder/vp9_quantize.h"
+#include "vp9/common/vp9_reconintra.h"
+#include "vp9/encoder/vp9_encodemb.h"
+#include "vp9/encoder/vp9_encodeintra.h"
+
+int vp9_encode_intra(VP9_COMP *cpi, MACROBLOCK *x, int use_16x16_pred) {
+ MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
+ (void) cpi;
+ mbmi->mode = DC_PRED;
+ mbmi->ref_frame[0] = INTRA_FRAME;
+ if (use_16x16_pred) {
+ mbmi->txfm_size = mbmi->sb_type >= BLOCK_SIZE_MB16X16 ? TX_16X16 : TX_8X8;
+ vp9_encode_intra_block_y(&cpi->common, x, mbmi->sb_type);
+ } else {
+ mbmi->txfm_size = TX_4X4;
+ vp9_encode_intra_block_y(&cpi->common, x, mbmi->sb_type);
+ }
+
+ return vp9_get_mb_ss(x->plane[0].src_diff);
+}
diff --git a/libvpx/vp9/encoder/vp9_encodeintra.h b/libvpx/vp9/encoder/vp9_encodeintra.h
new file mode 100644
index 0000000..14d144b
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_encodeintra.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_ENCODER_VP9_ENCODEINTRA_H_
+#define VP9_ENCODER_VP9_ENCODEINTRA_H_
+
+#include "vp9/encoder/vp9_onyx_int.h"
+
+int vp9_encode_intra(VP9_COMP *cpi, MACROBLOCK *x, int use_16x16_pred);
+void vp9_encode_intra_block_y(VP9_COMMON *const cm, MACROBLOCK *mb,
+ BLOCK_SIZE_TYPE bs);
+void vp9_encode_intra_block_uv(VP9_COMMON *const cm, MACROBLOCK *mb,
+ BLOCK_SIZE_TYPE bs);
+
+#endif // VP9_ENCODER_VP9_ENCODEINTRA_H_
diff --git a/libvpx/vp9/encoder/vp9_encodemb.c b/libvpx/vp9/encoder/vp9_encodemb.c
new file mode 100644
index 0000000..4f45496
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_encodemb.c
@@ -0,0 +1,705 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vpx_config.h"
+#include "vp9/encoder/vp9_encodemb.h"
+#include "vp9/common/vp9_reconinter.h"
+#include "vp9/encoder/vp9_quantize.h"
+#include "vp9/encoder/vp9_tokenize.h"
+#include "vp9/common/vp9_reconintra.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp9/encoder/vp9_rdopt.h"
+#include "vp9/common/vp9_systemdependent.h"
+#include "vp9_rtcd.h"
+
+DECLARE_ALIGNED(16, extern const uint8_t,
+ vp9_pt_energy_class[MAX_ENTROPY_TOKENS]);
+
+void vp9_subtract_block(int rows, int cols,
+ int16_t *diff_ptr, int diff_stride,
+ const uint8_t *src_ptr, int src_stride,
+ const uint8_t *pred_ptr, int pred_stride) {
+ int r, c;
+
+ for (r = 0; r < rows; r++) {
+ for (c = 0; c < cols; c++)
+ diff_ptr[c] = src_ptr[c] - pred_ptr[c];
+
+ diff_ptr += diff_stride;
+ pred_ptr += pred_stride;
+ src_ptr += src_stride;
+ }
+}
+
+static void inverse_transform_b_4x4_add(MACROBLOCKD *xd, int eob,
+ int16_t *dqcoeff, uint8_t *dest,
+ int stride) {
+ if (eob <= 1)
+ xd->inv_txm4x4_1_add(dqcoeff, dest, stride);
+ else
+ xd->inv_txm4x4_add(dqcoeff, dest, stride);
+}
+
+
+static void subtract_plane(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize, int plane) {
+ struct macroblock_plane *const p = &x->plane[plane];
+ const MACROBLOCKD *const xd = &x->e_mbd;
+ const struct macroblockd_plane *const pd = &xd->plane[plane];
+ const int bw = plane_block_width(bsize, pd);
+ const int bh = plane_block_height(bsize, pd);
+
+ vp9_subtract_block(bh, bw, p->src_diff, bw,
+ p->src.buf, p->src.stride,
+ pd->dst.buf, pd->dst.stride);
+}
+
+void vp9_subtract_sby(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
+ subtract_plane(x, bsize, 0);
+}
+
+void vp9_subtract_sbuv(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
+ int i;
+
+ for (i = 1; i < MAX_MB_PLANE; i++)
+ subtract_plane(x, bsize, i);
+}
+
+void vp9_subtract_sb(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
+ vp9_subtract_sby(x, bsize);
+ vp9_subtract_sbuv(x, bsize);
+}
+
+
+#define RDTRUNC(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF )
+#define RDTRUNC_8x8(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF )
+typedef struct vp9_token_state vp9_token_state;
+
+struct vp9_token_state {
+ int rate;
+ int error;
+ int next;
+ signed char token;
+ short qc;
+};
+
+// TODO: experiments to find optimal multiple numbers
+#define Y1_RD_MULT 4
+#define UV_RD_MULT 2
+
+static const int plane_rd_mult[4] = {
+ Y1_RD_MULT,
+ UV_RD_MULT,
+};
+
+#define UPDATE_RD_COST()\
+{\
+ rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0);\
+ rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1);\
+ if (rd_cost0 == rd_cost1) {\
+ rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0);\
+ rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1);\
+ }\
+}
+
+// This function is a place holder for now but may ultimately need
+// to scan previous tokens to work out the correct context.
+static int trellis_get_coeff_context(const int *scan,
+ const int *nb,
+ int idx, int token,
+ uint8_t *token_cache,
+ int pad, int l) {
+ int bak = token_cache[scan[idx]], pt;
+ token_cache[scan[idx]] = vp9_pt_energy_class[token];
+ pt = vp9_get_coef_context(scan, nb, pad, token_cache, idx + 1, l);
+ token_cache[scan[idx]] = bak;
+ return pt;
+}
+
+static void optimize_b(VP9_COMMON *const cm, MACROBLOCK *mb,
+ int plane, int block, BLOCK_SIZE_TYPE bsize,
+ ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
+ TX_SIZE tx_size) {
+ const int ref = mb->e_mbd.mode_info_context->mbmi.ref_frame[0] != INTRA_FRAME;
+ MACROBLOCKD *const xd = &mb->e_mbd;
+ vp9_token_state tokens[1025][2];
+ unsigned best_index[1025][2];
+ const int16_t *coeff_ptr = BLOCK_OFFSET(mb->plane[plane].coeff,
+ block, 16);
+ int16_t *qcoeff_ptr;
+ int16_t *dqcoeff_ptr;
+ int eob = xd->plane[plane].eobs[block], final_eob, sz = 0;
+ const int i0 = 0;
+ int rc, x, next, i;
+ int64_t rdmult, rddiv, rd_cost0, rd_cost1;
+ int rate0, rate1, error0, error1, t0, t1;
+ int best, band, pt;
+ PLANE_TYPE type = xd->plane[plane].plane_type;
+ int err_mult = plane_rd_mult[type];
+ int default_eob, pad;
+ int const *scan, *nb;
+ const int mul = 1 + (tx_size == TX_32X32);
+ uint8_t token_cache[1024];
+ const int ib = txfrm_block_to_raster_block(xd, bsize, plane,
+ block, 2 * tx_size);
+ const int16_t *dequant_ptr = xd->plane[plane].dequant;
+ const uint8_t * band_translate;
+
+ assert((!type && !plane) || (type && plane));
+ dqcoeff_ptr = BLOCK_OFFSET(xd->plane[plane].dqcoeff, block, 16);
+ qcoeff_ptr = BLOCK_OFFSET(xd->plane[plane].qcoeff, block, 16);
+ switch (tx_size) {
+ default:
+ case TX_4X4: {
+ const TX_TYPE tx_type = plane == 0 ? get_tx_type_4x4(xd, ib) : DCT_DCT;
+ default_eob = 16;
+ scan = get_scan_4x4(tx_type);
+ band_translate = vp9_coefband_trans_4x4;
+ break;
+ }
+ case TX_8X8: {
+ const TX_TYPE tx_type = plane == 0 ? get_tx_type_8x8(xd, ib) : DCT_DCT;
+ scan = get_scan_8x8(tx_type);
+ default_eob = 64;
+ band_translate = vp9_coefband_trans_8x8plus;
+ break;
+ }
+ case TX_16X16: {
+ const TX_TYPE tx_type = plane == 0 ? get_tx_type_16x16(xd, ib) : DCT_DCT;
+ scan = get_scan_16x16(tx_type);
+ default_eob = 256;
+ band_translate = vp9_coefband_trans_8x8plus;
+ break;
+ }
+ case TX_32X32:
+ scan = vp9_default_scan_32x32;
+ default_eob = 1024;
+ band_translate = vp9_coefband_trans_8x8plus;
+ break;
+ }
+ assert(eob <= default_eob);
+
+ /* Now set up a Viterbi trellis to evaluate alternative roundings. */
+ rdmult = mb->rdmult * err_mult;
+ if (mb->e_mbd.mode_info_context->mbmi.ref_frame[0] == INTRA_FRAME)
+ rdmult = (rdmult * 9) >> 4;
+ rddiv = mb->rddiv;
+ memset(best_index, 0, sizeof(best_index));
+ /* Initialize the sentinel node of the trellis. */
+ tokens[eob][0].rate = 0;
+ tokens[eob][0].error = 0;
+ tokens[eob][0].next = default_eob;
+ tokens[eob][0].token = DCT_EOB_TOKEN;
+ tokens[eob][0].qc = 0;
+ *(tokens[eob] + 1) = *(tokens[eob] + 0);
+ next = eob;
+ for (i = 0; i < eob; i++)
+ token_cache[scan[i]] = vp9_pt_energy_class[vp9_dct_value_tokens_ptr[
+ qcoeff_ptr[scan[i]]].token];
+ nb = vp9_get_coef_neighbors_handle(scan, &pad);
+
+ for (i = eob; i-- > i0;) {
+ int base_bits, d2, dx;
+
+ rc = scan[i];
+ x = qcoeff_ptr[rc];
+ /* Only add a trellis state for non-zero coefficients. */
+ if (x) {
+ int shortcut = 0;
+ error0 = tokens[next][0].error;
+ error1 = tokens[next][1].error;
+ /* Evaluate the first possibility for this state. */
+ rate0 = tokens[next][0].rate;
+ rate1 = tokens[next][1].rate;
+ t0 = (vp9_dct_value_tokens_ptr + x)->token;
+ /* Consider both possible successor states. */
+ if (next < default_eob) {
+ band = get_coef_band(band_translate, i + 1);
+ pt = trellis_get_coeff_context(scan, nb, i, t0, token_cache,
+ pad, default_eob);
+ rate0 +=
+ mb->token_costs_noskip[tx_size][type][ref][band][pt]
+ [tokens[next][0].token];
+ rate1 +=
+ mb->token_costs_noskip[tx_size][type][ref][band][pt]
+ [tokens[next][1].token];
+ }
+ UPDATE_RD_COST();
+ /* And pick the best. */
+ best = rd_cost1 < rd_cost0;
+ base_bits = *(vp9_dct_value_cost_ptr + x);
+ dx = mul * (dqcoeff_ptr[rc] - coeff_ptr[rc]);
+ d2 = dx * dx;
+ tokens[i][0].rate = base_bits + (best ? rate1 : rate0);
+ tokens[i][0].error = d2 + (best ? error1 : error0);
+ tokens[i][0].next = next;
+ tokens[i][0].token = t0;
+ tokens[i][0].qc = x;
+ best_index[i][0] = best;
+
+ /* Evaluate the second possibility for this state. */
+ rate0 = tokens[next][0].rate;
+ rate1 = tokens[next][1].rate;
+
+ if ((abs(x)*dequant_ptr[rc != 0] > abs(coeff_ptr[rc]) * mul) &&
+ (abs(x)*dequant_ptr[rc != 0] < abs(coeff_ptr[rc]) * mul +
+ dequant_ptr[rc != 0]))
+ shortcut = 1;
+ else
+ shortcut = 0;
+
+ if (shortcut) {
+ sz = -(x < 0);
+ x -= 2 * sz + 1;
+ }
+
+ /* Consider both possible successor states. */
+ if (!x) {
+ /* If we reduced this coefficient to zero, check to see if
+ * we need to move the EOB back here.
+ */
+ t0 = tokens[next][0].token == DCT_EOB_TOKEN ?
+ DCT_EOB_TOKEN : ZERO_TOKEN;
+ t1 = tokens[next][1].token == DCT_EOB_TOKEN ?
+ DCT_EOB_TOKEN : ZERO_TOKEN;
+ } else {
+ t0 = t1 = (vp9_dct_value_tokens_ptr + x)->token;
+ }
+ if (next < default_eob) {
+ band = get_coef_band(band_translate, i + 1);
+ if (t0 != DCT_EOB_TOKEN) {
+ pt = trellis_get_coeff_context(scan, nb, i, t0, token_cache,
+ pad, default_eob);
+ if (!x)
+ rate0 += mb->token_costs[tx_size][type][ref][band][pt][
+ tokens[next][0].token];
+ else
+ rate0 += mb->token_costs_noskip[tx_size][type][ref][band][pt][
+ tokens[next][0].token];
+ }
+ if (t1 != DCT_EOB_TOKEN) {
+ pt = trellis_get_coeff_context(scan, nb, i, t1, token_cache,
+ pad, default_eob);
+ if (!x)
+ rate1 += mb->token_costs[tx_size][type][ref][band][pt][
+ tokens[next][1].token];
+ else
+ rate1 += mb->token_costs_noskip[tx_size][type][ref][band][pt][
+ tokens[next][1].token];
+ }
+ }
+
+ UPDATE_RD_COST();
+ /* And pick the best. */
+ best = rd_cost1 < rd_cost0;
+ base_bits = *(vp9_dct_value_cost_ptr + x);
+
+ if (shortcut) {
+ dx -= (dequant_ptr[rc != 0] + sz) ^ sz;
+ d2 = dx * dx;
+ }
+ tokens[i][1].rate = base_bits + (best ? rate1 : rate0);
+ tokens[i][1].error = d2 + (best ? error1 : error0);
+ tokens[i][1].next = next;
+ tokens[i][1].token = best ? t1 : t0;
+ tokens[i][1].qc = x;
+ best_index[i][1] = best;
+ /* Finally, make this the new head of the trellis. */
+ next = i;
+ }
+ /* There's no choice to make for a zero coefficient, so we don't
+ * add a new trellis node, but we do need to update the costs.
+ */
+ else {
+ band = get_coef_band(band_translate, i + 1);
+ t0 = tokens[next][0].token;
+ t1 = tokens[next][1].token;
+ /* Update the cost of each path if we're past the EOB token. */
+ if (t0 != DCT_EOB_TOKEN) {
+ tokens[next][0].rate +=
+ mb->token_costs[tx_size][type][ref][band][0][t0];
+ tokens[next][0].token = ZERO_TOKEN;
+ }
+ if (t1 != DCT_EOB_TOKEN) {
+ tokens[next][1].rate +=
+ mb->token_costs[tx_size][type][ref][band][0][t1];
+ tokens[next][1].token = ZERO_TOKEN;
+ }
+ /* Don't update next, because we didn't add a new node. */
+ }
+ }
+
+ /* Now pick the best path through the whole trellis. */
+ band = get_coef_band(band_translate, i + 1);
+ pt = combine_entropy_contexts(*a, *l);
+ rate0 = tokens[next][0].rate;
+ rate1 = tokens[next][1].rate;
+ error0 = tokens[next][0].error;
+ error1 = tokens[next][1].error;
+ t0 = tokens[next][0].token;
+ t1 = tokens[next][1].token;
+ rate0 += mb->token_costs_noskip[tx_size][type][ref][band][pt][t0];
+ rate1 += mb->token_costs_noskip[tx_size][type][ref][band][pt][t1];
+ UPDATE_RD_COST();
+ best = rd_cost1 < rd_cost0;
+ final_eob = i0 - 1;
+ vpx_memset(qcoeff_ptr, 0, sizeof(*qcoeff_ptr) * (16 << (tx_size * 2)));
+ vpx_memset(dqcoeff_ptr, 0, sizeof(*dqcoeff_ptr) * (16 << (tx_size * 2)));
+ for (i = next; i < eob; i = next) {
+ x = tokens[i][best].qc;
+ if (x) {
+ final_eob = i;
+ }
+ rc = scan[i];
+ qcoeff_ptr[rc] = x;
+ dqcoeff_ptr[rc] = (x * dequant_ptr[rc != 0]) / mul;
+
+ next = tokens[i][best].next;
+ best = best_index[i][best];
+ }
+ final_eob++;
+
+ xd->plane[plane].eobs[block] = final_eob;
+ *a = *l = (final_eob > 0);
+}
+
+struct optimize_block_args {
+ VP9_COMMON *cm;
+ MACROBLOCK *x;
+ struct optimize_ctx *ctx;
+};
+
+void vp9_optimize_b(int plane, int block, BLOCK_SIZE_TYPE bsize,
+ int ss_txfrm_size, VP9_COMMON *cm, MACROBLOCK *mb,
+ struct optimize_ctx *ctx) {
+ MACROBLOCKD *const xd = &mb->e_mbd;
+ int x, y;
+
+ // find current entropy context
+ txfrm_block_to_raster_xy(xd, bsize, plane, block, ss_txfrm_size, &x, &y);
+
+ optimize_b(cm, mb, plane, block, bsize,
+ &ctx->ta[plane][x], &ctx->tl[plane][y], ss_txfrm_size / 2);
+}
+
+static void optimize_block(int plane, int block, BLOCK_SIZE_TYPE bsize,
+ int ss_txfrm_size, void *arg) {
+ const struct optimize_block_args* const args = arg;
+ vp9_optimize_b(plane, block, bsize, ss_txfrm_size, args->cm, args->x,
+ args->ctx);
+}
+
+void vp9_optimize_init(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize,
+ struct optimize_ctx *ctx) {
+ int p;
+
+ for (p = 0; p < MAX_MB_PLANE; p++) {
+ const struct macroblockd_plane* const plane = &xd->plane[p];
+ const int bwl = b_width_log2(bsize) - plane->subsampling_x;
+ const int bhl = b_height_log2(bsize) - plane->subsampling_y;
+ const MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
+ const TX_SIZE tx_size = p ? get_uv_tx_size(mbmi)
+ : mbmi->txfm_size;
+ int i, j;
+
+ for (i = 0; i < 1 << bwl; i += 1 << tx_size) {
+ int c = 0;
+ ctx->ta[p][i] = 0;
+ for (j = 0; j < 1 << tx_size && !c; j++) {
+ c = ctx->ta[p][i] |= plane->above_context[i + j];
+ }
+ }
+ for (i = 0; i < 1 << bhl; i += 1 << tx_size) {
+ int c = 0;
+ ctx->tl[p][i] = 0;
+ for (j = 0; j < 1 << tx_size && !c; j++) {
+ c = ctx->tl[p][i] |= plane->left_context[i + j];
+ }
+ }
+ }
+}
+
+void vp9_optimize_sby(VP9_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
+ struct optimize_ctx ctx;
+ struct optimize_block_args arg = {cm, x, &ctx};
+ vp9_optimize_init(&x->e_mbd, bsize, &ctx);
+ foreach_transformed_block_in_plane(&x->e_mbd, bsize, 0, optimize_block, &arg);
+}
+
+void vp9_optimize_sbuv(VP9_COMMON *const cm, MACROBLOCK *x,
+ BLOCK_SIZE_TYPE bsize) {
+ struct optimize_ctx ctx;
+ struct optimize_block_args arg = {cm, x, &ctx};
+ vp9_optimize_init(&x->e_mbd, bsize, &ctx);
+ foreach_transformed_block_uv(&x->e_mbd, bsize, optimize_block, &arg);
+}
+
+struct encode_b_args {
+ VP9_COMMON *cm;
+ MACROBLOCK *x;
+ struct optimize_ctx *ctx;
+};
+
+static void xform_quant(int plane, int block, BLOCK_SIZE_TYPE bsize,
+ int ss_txfrm_size, void *arg) {
+ struct encode_b_args* const args = arg;
+ MACROBLOCK* const x = args->x;
+ MACROBLOCKD* const xd = &x->e_mbd;
+ const int bw = plane_block_width(bsize, &xd->plane[plane]);
+ const int raster_block = txfrm_block_to_raster_block(xd, bsize, plane,
+ block, ss_txfrm_size);
+ int16_t *const coeff = BLOCK_OFFSET(x->plane[plane].coeff, block, 16);
+ int16_t *const src_diff = raster_block_offset_int16(xd, bsize, plane,
+ raster_block,
+ x->plane[plane].src_diff);
+ TX_TYPE tx_type = DCT_DCT;
+
+ switch (ss_txfrm_size / 2) {
+ case TX_32X32:
+ if (x->rd_search)
+ vp9_short_fdct32x32_rd(src_diff, coeff, bw * 2);
+ else
+ vp9_short_fdct32x32(src_diff, coeff, bw * 2);
+ break;
+ case TX_16X16:
+ tx_type = plane == 0 ? get_tx_type_16x16(xd, raster_block) : DCT_DCT;
+ if (tx_type != DCT_DCT)
+ vp9_short_fht16x16(src_diff, coeff, bw, tx_type);
+ else
+ x->fwd_txm16x16(src_diff, coeff, bw * 2);
+ break;
+ case TX_8X8:
+ tx_type = plane == 0 ? get_tx_type_8x8(xd, raster_block) : DCT_DCT;
+ if (tx_type != DCT_DCT)
+ vp9_short_fht8x8(src_diff, coeff, bw, tx_type);
+ else
+ x->fwd_txm8x8(src_diff, coeff, bw * 2);
+ break;
+ case TX_4X4:
+ tx_type = plane == 0 ? get_tx_type_4x4(xd, raster_block) : DCT_DCT;
+ if (tx_type != DCT_DCT)
+ vp9_short_fht4x4(src_diff, coeff, bw, tx_type);
+ else
+ x->fwd_txm4x4(src_diff, coeff, bw * 2);
+ break;
+ default:
+ assert(0);
+ }
+
+ vp9_quantize(x, plane, block, 16 << ss_txfrm_size, tx_type);
+}
+
+static void encode_block(int plane, int block, BLOCK_SIZE_TYPE bsize,
+ int ss_txfrm_size, void *arg) {
+ struct encode_b_args *const args = arg;
+ MACROBLOCK *const x = args->x;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ const int raster_block = txfrm_block_to_raster_block(xd, bsize, plane,
+ block, ss_txfrm_size);
+ struct macroblockd_plane *const pd = &xd->plane[plane];
+ int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block, 16);
+ uint8_t *const dst = raster_block_offset_uint8(xd, bsize, plane,
+ raster_block,
+ pd->dst.buf, pd->dst.stride);
+ TX_TYPE tx_type = DCT_DCT;
+
+ xform_quant(plane, block, bsize, ss_txfrm_size, arg);
+
+ if (x->optimize)
+ vp9_optimize_b(plane, block, bsize, ss_txfrm_size, args->cm, x, args->ctx);
+
+ switch (ss_txfrm_size / 2) {
+ case TX_32X32:
+ vp9_short_idct32x32_add(dqcoeff, dst, pd->dst.stride);
+ break;
+ case TX_16X16:
+ tx_type = plane == 0 ? get_tx_type_16x16(xd, raster_block) : DCT_DCT;
+ if (tx_type == DCT_DCT)
+ vp9_short_idct16x16_add(dqcoeff, dst, pd->dst.stride);
+ else
+ vp9_short_iht16x16_add(dqcoeff, dst, pd->dst.stride, tx_type);
+ break;
+ case TX_8X8:
+ tx_type = plane == 0 ? get_tx_type_8x8(xd, raster_block) : DCT_DCT;
+ if (tx_type == DCT_DCT)
+ vp9_short_idct8x8_add(dqcoeff, dst, pd->dst.stride);
+ else
+ vp9_short_iht8x8_add(dqcoeff, dst, pd->dst.stride, tx_type);
+ break;
+ case TX_4X4:
+ tx_type = plane == 0 ? get_tx_type_4x4(xd, raster_block) : DCT_DCT;
+ if (tx_type == DCT_DCT)
+ // this is like vp9_short_idct4x4 but has a special case around eob<=1
+ // which is significant (not just an optimization) for the lossless
+ // case.
+ inverse_transform_b_4x4_add(xd, pd->eobs[block], dqcoeff,
+ dst, pd->dst.stride);
+ else
+ vp9_short_iht4x4_add(dqcoeff, dst, pd->dst.stride, tx_type);
+ break;
+ }
+}
+
+void vp9_xform_quant_sby(VP9_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
+ MACROBLOCKD* const xd = &x->e_mbd;
+ struct encode_b_args arg = {cm, x, NULL};
+
+ foreach_transformed_block_in_plane(xd, bsize, 0, xform_quant, &arg);
+}
+
+void vp9_xform_quant_sbuv(VP9_COMMON *cm, MACROBLOCK *x,
+ BLOCK_SIZE_TYPE bsize) {
+ MACROBLOCKD* const xd = &x->e_mbd;
+ struct encode_b_args arg = {cm, x, NULL};
+
+ foreach_transformed_block_uv(xd, bsize, xform_quant, &arg);
+}
+
+void vp9_encode_sby(VP9_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+ struct optimize_ctx ctx;
+ struct encode_b_args arg = {cm, x, &ctx};
+
+ vp9_subtract_sby(x, bsize);
+ if (x->optimize)
+ vp9_optimize_init(xd, bsize, &ctx);
+
+ foreach_transformed_block_in_plane(xd, bsize, 0, encode_block, &arg);
+}
+
+void vp9_encode_sbuv(VP9_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+ struct optimize_ctx ctx;
+ struct encode_b_args arg = {cm, x, &ctx};
+
+ vp9_subtract_sbuv(x, bsize);
+ if (x->optimize)
+ vp9_optimize_init(xd, bsize, &ctx);
+
+ foreach_transformed_block_uv(xd, bsize, encode_block, &arg);
+}
+
+void vp9_encode_sb(VP9_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+ struct optimize_ctx ctx;
+ struct encode_b_args arg = {cm, x, &ctx};
+
+ vp9_subtract_sb(x, bsize);
+ if (x->optimize)
+ vp9_optimize_init(xd, bsize, &ctx);
+
+ foreach_transformed_block(xd, bsize, encode_block, &arg);
+}
+
+static void encode_block_intra(int plane, int block, BLOCK_SIZE_TYPE bsize,
+ int ss_txfrm_size, void *arg) {
+ struct encode_b_args* const args = arg;
+ MACROBLOCK *const x = args->x;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
+ const TX_SIZE tx_size = (TX_SIZE)(ss_txfrm_size / 2);
+ struct macroblock_plane *const p = &x->plane[plane];
+ struct macroblockd_plane *const pd = &xd->plane[plane];
+ int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block, 16);
+ const int bw = plane_block_width(bsize, pd);
+ const int raster_block = txfrm_block_to_raster_block(xd, bsize, plane,
+ block, ss_txfrm_size);
+
+ uint8_t *const src = raster_block_offset_uint8(xd, bsize, plane, raster_block,
+ p->src.buf, p->src.stride);
+ uint8_t *const dst = raster_block_offset_uint8(xd, bsize, plane, raster_block,
+ pd->dst.buf, pd->dst.stride);
+ int16_t *const src_diff = raster_block_offset_int16(xd, bsize, plane,
+ raster_block,
+ p->src_diff);
+
+ const int txfm_b_size = 4 << tx_size;
+ int ib = raster_block;
+ int tx_ib = ib >> tx_size;
+ int plane_b_size;
+
+ TX_TYPE tx_type;
+ int mode, b_mode;
+
+ if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0) {
+ extend_for_intra(xd, plane, block, bsize, ss_txfrm_size);
+ }
+
+ mode = plane == 0? mbmi->mode: mbmi->uv_mode;
+ if (plane == 0 &&
+ mbmi->sb_type < BLOCK_SIZE_SB8X8 &&
+ mbmi->ref_frame[0] == INTRA_FRAME)
+ b_mode = xd->mode_info_context->bmi[ib].as_mode.first;
+ else
+ b_mode = mode;
+
+ assert(b_mode >= DC_PRED && b_mode <= TM_PRED);
+
+ plane_b_size = b_width_log2(bsize) - pd->subsampling_x;
+ vp9_predict_intra_block(xd, tx_ib, plane_b_size, tx_size, b_mode,
+ dst, pd->dst.stride);
+ vp9_subtract_block(txfm_b_size, txfm_b_size, src_diff, bw,
+ src, p->src.stride, dst, pd->dst.stride);
+
+ xform_quant(plane, block, bsize, ss_txfrm_size, arg);
+
+
+ // if (x->optimize)
+ // vp9_optimize_b(plane, block, bsize, ss_txfrm_size,
+ // args->cm, x, args->ctx);
+
+ switch (ss_txfrm_size / 2) {
+ case TX_32X32:
+ vp9_short_idct32x32_add(dqcoeff, dst, pd->dst.stride);
+ break;
+ case TX_16X16:
+ tx_type = plane == 0 ? get_tx_type_16x16(xd, raster_block) : DCT_DCT;
+ if (tx_type == DCT_DCT)
+ vp9_short_idct16x16_add(dqcoeff, dst, pd->dst.stride);
+ else
+ vp9_short_iht16x16_add(dqcoeff, dst, pd->dst.stride, tx_type);
+ break;
+ case TX_8X8:
+ tx_type = plane == 0 ? get_tx_type_8x8(xd, raster_block) : DCT_DCT;
+ if (tx_type == DCT_DCT)
+ vp9_short_idct8x8_add(dqcoeff, dst, pd->dst.stride);
+ else
+ vp9_short_iht8x8_add(dqcoeff, dst, pd->dst.stride, tx_type);
+ break;
+ case TX_4X4:
+ tx_type = plane == 0 ? get_tx_type_4x4(xd, raster_block) : DCT_DCT;
+ if (tx_type == DCT_DCT)
+ // this is like vp9_short_idct4x4 but has a special case around eob<=1
+ // which is significant (not just an optimization) for the lossless
+ // case.
+ inverse_transform_b_4x4_add(xd, pd->eobs[block], dqcoeff,
+ dst, pd->dst.stride);
+ else
+ vp9_short_iht4x4_add(dqcoeff, dst, pd->dst.stride, tx_type);
+ break;
+ }
+}
+
+void vp9_encode_intra_block_y(VP9_COMMON *cm, MACROBLOCK *x,
+ BLOCK_SIZE_TYPE bsize) {
+ MACROBLOCKD* const xd = &x->e_mbd;
+ struct optimize_ctx ctx;
+ struct encode_b_args arg = {cm, x, &ctx};
+
+ foreach_transformed_block_in_plane(xd, bsize, 0,
+ encode_block_intra, &arg);
+}
+void vp9_encode_intra_block_uv(VP9_COMMON *cm, MACROBLOCK *x,
+ BLOCK_SIZE_TYPE bsize) {
+ MACROBLOCKD* const xd = &x->e_mbd;
+ struct optimize_ctx ctx;
+ struct encode_b_args arg = {cm, x, &ctx};
+ foreach_transformed_block_uv(xd, bsize, encode_block_intra, &arg);
+}
+
diff --git a/libvpx/vp9/encoder/vp9_encodemb.h b/libvpx/vp9/encoder/vp9_encodemb.h
new file mode 100644
index 0000000..5796903
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_encodemb.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_ENCODER_VP9_ENCODEMB_H_
+#define VP9_ENCODER_VP9_ENCODEMB_H_
+
+#include "./vpx_config.h"
+#include "vp9/encoder/vp9_block.h"
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vp9/common/vp9_onyxc_int.h"
+
+typedef struct {
+ MB_PREDICTION_MODE mode;
+ MV_REFERENCE_FRAME ref_frame;
+ MV_REFERENCE_FRAME second_ref_frame;
+} MODE_DEFINITION;
+
+struct optimize_ctx {
+ ENTROPY_CONTEXT ta[MAX_MB_PLANE][16];
+ ENTROPY_CONTEXT tl[MAX_MB_PLANE][16];
+};
+
+void vp9_optimize_init(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize,
+ struct optimize_ctx *ctx);
+void vp9_optimize_b(int plane, int block, BLOCK_SIZE_TYPE bsize,
+ int ss_txfrm_size, VP9_COMMON *cm, MACROBLOCK *x,
+ struct optimize_ctx *ctx);
+void vp9_optimize_sby(VP9_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
+void vp9_optimize_sbuv(VP9_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
+
+void vp9_encode_sb(VP9_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
+void vp9_encode_sby(VP9_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
+void vp9_encode_sbuv(VP9_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
+
+void vp9_xform_quant_sby(VP9_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
+void vp9_xform_quant_sbuv(VP9_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
+
+void vp9_subtract_block(int rows, int cols,
+ int16_t *diff_ptr, int diff_stride,
+ const uint8_t *src_ptr, int src_stride,
+ const uint8_t *pred_ptr, int pred_stride);
+void vp9_subtract_sby(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
+void vp9_subtract_sbuv(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
+void vp9_subtract_sb(MACROBLOCK *xd, BLOCK_SIZE_TYPE bsize);
+
+#endif // VP9_ENCODER_VP9_ENCODEMB_H_
diff --git a/libvpx/vp9/encoder/vp9_encodemv.c b/libvpx/vp9/encoder/vp9_encodemv.c
new file mode 100644
index 0000000..a582d18
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_encodemv.c
@@ -0,0 +1,610 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vp9/common/vp9_common.h"
+#include "vp9/encoder/vp9_encodemv.h"
+#include "vp9/common/vp9_entropymode.h"
+#include "vp9/common/vp9_systemdependent.h"
+
+#include <math.h>
+
+#ifdef ENTROPY_STATS
+extern unsigned int active_section;
+#endif
+
+#ifdef NMV_STATS
+nmv_context_counts tnmvcounts;
+#endif
+
+static void encode_mv_component(vp9_writer* w, int comp,
+ const nmv_component* mvcomp, int usehp) {
+ int offset;
+ const int sign = comp < 0;
+ const int mag = sign ? -comp : comp;
+ const int mv_class = vp9_get_mv_class(mag - 1, &offset);
+ const int d = offset >> 3; // int mv data
+ const int fr = (offset >> 1) & 3; // fractional mv data
+ const int hp = offset & 1; // high precision mv data
+
+ assert(comp != 0);
+
+ // Sign
+ vp9_write(w, sign, mvcomp->sign);
+
+ // Class
+ write_token(w, vp9_mv_class_tree, mvcomp->classes,
+ &vp9_mv_class_encodings[mv_class]);
+
+ // Integer bits
+ if (mv_class == MV_CLASS_0) {
+ write_token(w, vp9_mv_class0_tree, mvcomp->class0,
+ &vp9_mv_class0_encodings[d]);
+ } else {
+ int i;
+ const int n = mv_class + CLASS0_BITS - 1; // number of bits
+ for (i = 0; i < n; ++i)
+ vp9_write(w, (d >> i) & 1, mvcomp->bits[i]);
+ }
+
+ // Fractional bits
+ write_token(w, vp9_mv_fp_tree,
+ mv_class == MV_CLASS_0 ? mvcomp->class0_fp[d] : mvcomp->fp,
+ &vp9_mv_fp_encodings[fr]);
+
+ // High precision bit
+ if (usehp)
+ vp9_write(w, hp,
+ mv_class == MV_CLASS_0 ? mvcomp->class0_hp : mvcomp->hp);
+}
+
+
+static void build_nmv_component_cost_table(int *mvcost,
+ const nmv_component* const mvcomp,
+ int usehp) {
+ int i, v;
+ int sign_cost[2], class_cost[MV_CLASSES], class0_cost[CLASS0_SIZE];
+ int bits_cost[MV_OFFSET_BITS][2];
+ int class0_fp_cost[CLASS0_SIZE][4], fp_cost[4];
+ int class0_hp_cost[2], hp_cost[2];
+
+ sign_cost[0] = vp9_cost_zero(mvcomp->sign);
+ sign_cost[1] = vp9_cost_one(mvcomp->sign);
+ vp9_cost_tokens(class_cost, mvcomp->classes, vp9_mv_class_tree);
+ vp9_cost_tokens(class0_cost, mvcomp->class0, vp9_mv_class0_tree);
+ for (i = 0; i < MV_OFFSET_BITS; ++i) {
+ bits_cost[i][0] = vp9_cost_zero(mvcomp->bits[i]);
+ bits_cost[i][1] = vp9_cost_one(mvcomp->bits[i]);
+ }
+
+ for (i = 0; i < CLASS0_SIZE; ++i)
+ vp9_cost_tokens(class0_fp_cost[i], mvcomp->class0_fp[i], vp9_mv_fp_tree);
+ vp9_cost_tokens(fp_cost, mvcomp->fp, vp9_mv_fp_tree);
+
+ if (usehp) {
+ class0_hp_cost[0] = vp9_cost_zero(mvcomp->class0_hp);
+ class0_hp_cost[1] = vp9_cost_one(mvcomp->class0_hp);
+ hp_cost[0] = vp9_cost_zero(mvcomp->hp);
+ hp_cost[1] = vp9_cost_one(mvcomp->hp);
+ }
+ mvcost[0] = 0;
+ for (v = 1; v <= MV_MAX; ++v) {
+ int z, c, o, d, e, f, cost = 0;
+ z = v - 1;
+ c = vp9_get_mv_class(z, &o);
+ cost += class_cost[c];
+ d = (o >> 3); /* int mv data */
+ f = (o >> 1) & 3; /* fractional pel mv data */
+ e = (o & 1); /* high precision mv data */
+ if (c == MV_CLASS_0) {
+ cost += class0_cost[d];
+ } else {
+ int i, b;
+ b = c + CLASS0_BITS - 1; /* number of bits */
+ for (i = 0; i < b; ++i)
+ cost += bits_cost[i][((d >> i) & 1)];
+ }
+ if (c == MV_CLASS_0) {
+ cost += class0_fp_cost[d][f];
+ } else {
+ cost += fp_cost[f];
+ }
+ if (usehp) {
+ if (c == MV_CLASS_0) {
+ cost += class0_hp_cost[e];
+ } else {
+ cost += hp_cost[e];
+ }
+ }
+ mvcost[v] = cost + sign_cost[0];
+ mvcost[-v] = cost + sign_cost[1];
+ }
+}
+
+static int update_nmv_savings(const unsigned int ct[2],
+ const vp9_prob cur_p,
+ const vp9_prob new_p,
+ const vp9_prob upd_p) {
+
+#ifdef LOW_PRECISION_MV_UPDATE
+ vp9_prob mod_p = new_p | 1;
+#else
+ vp9_prob mod_p = new_p;
+#endif
+ const int cur_b = cost_branch256(ct, cur_p);
+ const int mod_b = cost_branch256(ct, mod_p);
+ const int cost = 7 * 256 +
+#ifndef LOW_PRECISION_MV_UPDATE
+ 256 +
+#endif
+ (vp9_cost_one(upd_p) - vp9_cost_zero(upd_p));
+ if (cur_b - mod_b - cost > 0) {
+ return cur_b - mod_b - cost;
+ } else {
+ return 0 - vp9_cost_zero(upd_p);
+ }
+}
+
+static int update_nmv(
+ vp9_writer *const bc,
+ const unsigned int ct[2],
+ vp9_prob *const cur_p,
+ const vp9_prob new_p,
+ const vp9_prob upd_p) {
+
+#ifdef LOW_PRECISION_MV_UPDATE
+ vp9_prob mod_p = new_p | 1;
+#else
+ vp9_prob mod_p = new_p;
+#endif
+
+ const int cur_b = cost_branch256(ct, *cur_p);
+ const int mod_b = cost_branch256(ct, mod_p);
+ const int cost = 7 * 256 +
+#ifndef LOW_PRECISION_MV_UPDATE
+ 256 +
+#endif
+ (vp9_cost_one(upd_p) - vp9_cost_zero(upd_p));
+
+ if (cur_b - mod_b > cost) {
+ *cur_p = mod_p;
+ vp9_write(bc, 1, upd_p);
+#ifdef LOW_PRECISION_MV_UPDATE
+ vp9_write_literal(bc, mod_p >> 1, 7);
+#else
+ vp9_write_literal(bc, mod_p, 8);
+#endif
+ return 1;
+ } else {
+ vp9_write(bc, 0, upd_p);
+ return 0;
+ }
+}
+
+void print_nmvcounts(nmv_context_counts tnmvcounts) {
+ int i, j, k;
+ printf("\nCounts =\n { ");
+ for (j = 0; j < MV_JOINTS; ++j)
+ printf("%d, ", tnmvcounts.joints[j]);
+ printf("},\n");
+ for (i = 0; i < 2; ++i) {
+ printf(" {\n");
+ printf(" %d/%d,\n", tnmvcounts.comps[i].sign[0],
+ tnmvcounts.comps[i].sign[1]);
+ printf(" { ");
+ for (j = 0; j < MV_CLASSES; ++j)
+ printf("%d, ", tnmvcounts.comps[i].classes[j]);
+ printf("},\n");
+ printf(" { ");
+ for (j = 0; j < CLASS0_SIZE; ++j)
+ printf("%d, ", tnmvcounts.comps[i].class0[j]);
+ printf("},\n");
+ printf(" { ");
+ for (j = 0; j < MV_OFFSET_BITS; ++j)
+ printf("%d/%d, ", tnmvcounts.comps[i].bits[j][0],
+ tnmvcounts.comps[i].bits[j][1]);
+ printf("},\n");
+
+ printf(" {");
+ for (j = 0; j < CLASS0_SIZE; ++j) {
+ printf("{");
+ for (k = 0; k < 4; ++k)
+ printf("%d, ", tnmvcounts.comps[i].class0_fp[j][k]);
+ printf("}, ");
+ }
+ printf("},\n");
+
+ printf(" { ");
+ for (j = 0; j < 4; ++j)
+ printf("%d, ", tnmvcounts.comps[i].fp[j]);
+ printf("},\n");
+
+ printf(" %d/%d,\n",
+ tnmvcounts.comps[i].class0_hp[0],
+ tnmvcounts.comps[i].class0_hp[1]);
+ printf(" %d/%d,\n",
+ tnmvcounts.comps[i].hp[0],
+ tnmvcounts.comps[i].hp[1]);
+ printf(" },\n");
+ }
+}
+
+#ifdef NMV_STATS
+void init_nmvstats() {
+ vp9_zero(tnmvcounts);
+}
+
+void print_nmvstats() {
+ nmv_context prob;
+ unsigned int branch_ct_joint[MV_JOINTS - 1][2];
+ unsigned int branch_ct_sign[2][2];
+ unsigned int branch_ct_classes[2][MV_CLASSES - 1][2];
+ unsigned int branch_ct_class0[2][CLASS0_SIZE - 1][2];
+ unsigned int branch_ct_bits[2][MV_OFFSET_BITS][2];
+ unsigned int branch_ct_class0_fp[2][CLASS0_SIZE][4 - 1][2];
+ unsigned int branch_ct_fp[2][4 - 1][2];
+ unsigned int branch_ct_class0_hp[2][2];
+ unsigned int branch_ct_hp[2][2];
+ int i, j, k;
+ vp9_counts_to_nmv_context(&tnmvcounts, &prob, 1,
+ branch_ct_joint, branch_ct_sign, branch_ct_classes,
+ branch_ct_class0, branch_ct_bits,
+ branch_ct_class0_fp, branch_ct_fp,
+ branch_ct_class0_hp, branch_ct_hp);
+
+ printf("\nCounts =\n { ");
+ for (j = 0; j < MV_JOINTS; ++j)
+ printf("%d, ", tnmvcounts.joints[j]);
+ printf("},\n");
+ for (i = 0; i < 2; ++i) {
+ printf(" {\n");
+ printf(" %d/%d,\n", tnmvcounts.comps[i].sign[0],
+ tnmvcounts.comps[i].sign[1]);
+ printf(" { ");
+ for (j = 0; j < MV_CLASSES; ++j)
+ printf("%d, ", tnmvcounts.comps[i].classes[j]);
+ printf("},\n");
+ printf(" { ");
+ for (j = 0; j < CLASS0_SIZE; ++j)
+ printf("%d, ", tnmvcounts.comps[i].class0[j]);
+ printf("},\n");
+ printf(" { ");
+ for (j = 0; j < MV_OFFSET_BITS; ++j)
+ printf("%d/%d, ", tnmvcounts.comps[i].bits[j][0],
+ tnmvcounts.comps[i].bits[j][1]);
+ printf("},\n");
+
+ printf(" {");
+ for (j = 0; j < CLASS0_SIZE; ++j) {
+ printf("{");
+ for (k = 0; k < 4; ++k)
+ printf("%d, ", tnmvcounts.comps[i].class0_fp[j][k]);
+ printf("}, ");
+ }
+ printf("},\n");
+
+ printf(" { ");
+ for (j = 0; j < 4; ++j)
+ printf("%d, ", tnmvcounts.comps[i].fp[j]);
+ printf("},\n");
+
+ printf(" %d/%d,\n",
+ tnmvcounts.comps[i].class0_hp[0],
+ tnmvcounts.comps[i].class0_hp[1]);
+ printf(" %d/%d,\n",
+ tnmvcounts.comps[i].hp[0],
+ tnmvcounts.comps[i].hp[1]);
+ printf(" },\n");
+ }
+
+ printf("\nProbs =\n { ");
+ for (j = 0; j < MV_JOINTS - 1; ++j)
+ printf("%d, ", prob.joints[j]);
+ printf("},\n");
+ for (i=0; i< 2; ++i) {
+ printf(" {\n");
+ printf(" %d,\n", prob.comps[i].sign);
+ printf(" { ");
+ for (j = 0; j < MV_CLASSES - 1; ++j)
+ printf("%d, ", prob.comps[i].classes[j]);
+ printf("},\n");
+ printf(" { ");
+ for (j = 0; j < CLASS0_SIZE - 1; ++j)
+ printf("%d, ", prob.comps[i].class0[j]);
+ printf("},\n");
+ printf(" { ");
+ for (j = 0; j < MV_OFFSET_BITS; ++j)
+ printf("%d, ", prob.comps[i].bits[j]);
+ printf("},\n");
+ printf(" { ");
+ for (j = 0; j < CLASS0_SIZE; ++j) {
+ printf("{");
+ for (k = 0; k < 3; ++k)
+ printf("%d, ", prob.comps[i].class0_fp[j][k]);
+ printf("}, ");
+ }
+ printf("},\n");
+ printf(" { ");
+ for (j = 0; j < 3; ++j)
+ printf("%d, ", prob.comps[i].fp[j]);
+ printf("},\n");
+
+ printf(" %d,\n", prob.comps[i].class0_hp);
+ printf(" %d,\n", prob.comps[i].hp);
+ printf(" },\n");
+ }
+}
+
+static void add_nmvcount(nmv_context_counts* const dst,
+ const nmv_context_counts* const src) {
+ int i, j, k;
+ for (j = 0; j < MV_JOINTS; ++j) {
+ dst->joints[j] += src->joints[j];
+ }
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < MV_VALS; ++j) {
+ dst->comps[i].mvcount[j] += src->comps[i].mvcount[j];
+ }
+ dst->comps[i].sign[0] += src->comps[i].sign[0];
+ dst->comps[i].sign[1] += src->comps[i].sign[1];
+ for (j = 0; j < MV_CLASSES; ++j) {
+ dst->comps[i].classes[j] += src->comps[i].classes[j];
+ }
+ for (j = 0; j < CLASS0_SIZE; ++j) {
+ dst->comps[i].class0[j] += src->comps[i].class0[j];
+ }
+ for (j = 0; j < MV_OFFSET_BITS; ++j) {
+ dst->comps[i].bits[j][0] += src->comps[i].bits[j][0];
+ dst->comps[i].bits[j][1] += src->comps[i].bits[j][1];
+ }
+ }
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < CLASS0_SIZE; ++j) {
+ for (k = 0; k < 4; ++k) {
+ dst->comps[i].class0_fp[j][k] += src->comps[i].class0_fp[j][k];
+ }
+ }
+ for (j = 0; j < 4; ++j) {
+ dst->comps[i].fp[j] += src->comps[i].fp[j];
+ }
+ dst->comps[i].class0_hp[0] += src->comps[i].class0_hp[0];
+ dst->comps[i].class0_hp[1] += src->comps[i].class0_hp[1];
+ dst->comps[i].hp[0] += src->comps[i].hp[0];
+ dst->comps[i].hp[1] += src->comps[i].hp[1];
+ }
+}
+#endif
+
+void vp9_write_nmv_probs(VP9_COMP* const cpi, int usehp, vp9_writer* const bc) {
+ int i, j;
+ nmv_context prob;
+ unsigned int branch_ct_joint[MV_JOINTS - 1][2];
+ unsigned int branch_ct_sign[2][2];
+ unsigned int branch_ct_classes[2][MV_CLASSES - 1][2];
+ unsigned int branch_ct_class0[2][CLASS0_SIZE - 1][2];
+ unsigned int branch_ct_bits[2][MV_OFFSET_BITS][2];
+ unsigned int branch_ct_class0_fp[2][CLASS0_SIZE][4 - 1][2];
+ unsigned int branch_ct_fp[2][4 - 1][2];
+ unsigned int branch_ct_class0_hp[2][2];
+ unsigned int branch_ct_hp[2][2];
+#ifdef MV_GROUP_UPDATE
+ int savings = 0;
+#endif
+
+#ifdef NMV_STATS
+ if (!cpi->dummy_packing)
+ add_nmvcount(&tnmvcounts, &cpi->NMVcount);
+#endif
+ vp9_counts_to_nmv_context(&cpi->NMVcount, &prob, usehp,
+ branch_ct_joint, branch_ct_sign, branch_ct_classes,
+ branch_ct_class0, branch_ct_bits,
+ branch_ct_class0_fp, branch_ct_fp,
+ branch_ct_class0_hp, branch_ct_hp);
+ /* write updates if they help */
+#ifdef MV_GROUP_UPDATE
+ for (j = 0; j < MV_JOINTS - 1; ++j) {
+ savings += update_nmv_savings(branch_ct_joint[j],
+ cpi->common.fc.nmvc.joints[j],
+ prob.joints[j],
+ VP9_NMV_UPDATE_PROB);
+ }
+ for (i = 0; i < 2; ++i) {
+ savings += update_nmv_savings(branch_ct_sign[i],
+ cpi->common.fc.nmvc.comps[i].sign,
+ prob.comps[i].sign,
+ VP9_NMV_UPDATE_PROB);
+ for (j = 0; j < MV_CLASSES - 1; ++j) {
+ savings += update_nmv_savings(branch_ct_classes[i][j],
+ cpi->common.fc.nmvc.comps[i].classes[j],
+ prob.comps[i].classes[j],
+ VP9_NMV_UPDATE_PROB);
+ }
+ for (j = 0; j < CLASS0_SIZE - 1; ++j) {
+ savings += update_nmv_savings(branch_ct_class0[i][j],
+ cpi->common.fc.nmvc.comps[i].class0[j],
+ prob.comps[i].class0[j],
+ VP9_NMV_UPDATE_PROB);
+ }
+ for (j = 0; j < MV_OFFSET_BITS; ++j) {
+ savings += update_nmv_savings(branch_ct_bits[i][j],
+ cpi->common.fc.nmvc.comps[i].bits[j],
+ prob.comps[i].bits[j],
+ VP9_NMV_UPDATE_PROB);
+ }
+ }
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < CLASS0_SIZE; ++j) {
+ int k;
+ for (k = 0; k < 3; ++k) {
+ savings += update_nmv_savings(branch_ct_class0_fp[i][j][k],
+ cpi->common.fc.nmvc.comps[i].class0_fp[j][k],
+ prob.comps[i].class0_fp[j][k],
+ VP9_NMV_UPDATE_PROB);
+ }
+ }
+ for (j = 0; j < 3; ++j) {
+ savings += update_nmv_savings(branch_ct_fp[i][j],
+ cpi->common.fc.nmvc.comps[i].fp[j],
+ prob.comps[i].fp[j],
+ VP9_NMV_UPDATE_PROB);
+ }
+ }
+ if (usehp) {
+ for (i = 0; i < 2; ++i) {
+ savings += update_nmv_savings(branch_ct_class0_hp[i],
+ cpi->common.fc.nmvc.comps[i].class0_hp,
+ prob.comps[i].class0_hp,
+ VP9_NMV_UPDATE_PROB);
+ savings += update_nmv_savings(branch_ct_hp[i],
+ cpi->common.fc.nmvc.comps[i].hp,
+ prob.comps[i].hp,
+ VP9_NMV_UPDATE_PROB);
+ }
+ }
+ if (savings <= 0) {
+ vp9_write_bit(bc, 0);
+ return;
+ }
+ vp9_write_bit(bc, 1);
+#endif
+
+ for (j = 0; j < MV_JOINTS - 1; ++j) {
+ update_nmv(bc, branch_ct_joint[j],
+ &cpi->common.fc.nmvc.joints[j],
+ prob.joints[j],
+ VP9_NMV_UPDATE_PROB);
+ }
+ for (i = 0; i < 2; ++i) {
+ update_nmv(bc, branch_ct_sign[i],
+ &cpi->common.fc.nmvc.comps[i].sign,
+ prob.comps[i].sign,
+ VP9_NMV_UPDATE_PROB);
+ for (j = 0; j < MV_CLASSES - 1; ++j) {
+ update_nmv(bc, branch_ct_classes[i][j],
+ &cpi->common.fc.nmvc.comps[i].classes[j],
+ prob.comps[i].classes[j],
+ VP9_NMV_UPDATE_PROB);
+ }
+ for (j = 0; j < CLASS0_SIZE - 1; ++j) {
+ update_nmv(bc, branch_ct_class0[i][j],
+ &cpi->common.fc.nmvc.comps[i].class0[j],
+ prob.comps[i].class0[j],
+ VP9_NMV_UPDATE_PROB);
+ }
+ for (j = 0; j < MV_OFFSET_BITS; ++j) {
+ update_nmv(bc, branch_ct_bits[i][j],
+ &cpi->common.fc.nmvc.comps[i].bits[j],
+ prob.comps[i].bits[j],
+ VP9_NMV_UPDATE_PROB);
+ }
+ }
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < CLASS0_SIZE; ++j) {
+ int k;
+ for (k = 0; k < 3; ++k) {
+ update_nmv(bc, branch_ct_class0_fp[i][j][k],
+ &cpi->common.fc.nmvc.comps[i].class0_fp[j][k],
+ prob.comps[i].class0_fp[j][k],
+ VP9_NMV_UPDATE_PROB);
+ }
+ }
+ for (j = 0; j < 3; ++j) {
+ update_nmv(bc, branch_ct_fp[i][j],
+ &cpi->common.fc.nmvc.comps[i].fp[j],
+ prob.comps[i].fp[j],
+ VP9_NMV_UPDATE_PROB);
+ }
+ }
+ if (usehp) {
+ for (i = 0; i < 2; ++i) {
+ update_nmv(bc, branch_ct_class0_hp[i],
+ &cpi->common.fc.nmvc.comps[i].class0_hp,
+ prob.comps[i].class0_hp,
+ VP9_NMV_UPDATE_PROB);
+ update_nmv(bc, branch_ct_hp[i],
+ &cpi->common.fc.nmvc.comps[i].hp,
+ prob.comps[i].hp,
+ VP9_NMV_UPDATE_PROB);
+ }
+ }
+}
+
+void vp9_encode_mv(vp9_writer* w, const MV* mv, const MV* ref,
+ const nmv_context* mvctx, int usehp) {
+ const MV diff = {mv->row - ref->row,
+ mv->col - ref->col};
+ const MV_JOINT_TYPE j = vp9_get_mv_joint(&diff);
+ usehp = usehp && vp9_use_nmv_hp(ref);
+
+ write_token(w, vp9_mv_joint_tree, mvctx->joints, &vp9_mv_joint_encodings[j]);
+ if (mv_joint_vertical(j))
+ encode_mv_component(w, diff.row, &mvctx->comps[0], usehp);
+
+ if (mv_joint_horizontal(j))
+ encode_mv_component(w, diff.col, &mvctx->comps[1], usehp);
+}
+
+void vp9_build_nmv_cost_table(int *mvjoint,
+ int *mvcost[2],
+ const nmv_context* const mvctx,
+ int usehp,
+ int mvc_flag_v,
+ int mvc_flag_h) {
+ vp9_clear_system_state();
+ vp9_cost_tokens(mvjoint, mvctx->joints, vp9_mv_joint_tree);
+ if (mvc_flag_v)
+ build_nmv_component_cost_table(mvcost[0], &mvctx->comps[0], usehp);
+ if (mvc_flag_h)
+ build_nmv_component_cost_table(mvcost[1], &mvctx->comps[1], usehp);
+}
+
+void vp9_update_nmv_count(VP9_COMP *cpi, MACROBLOCK *x,
+ int_mv *best_ref_mv, int_mv *second_best_ref_mv) {
+ MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
+ MV mv;
+ int bwl = b_width_log2(mbmi->sb_type), bw = 1 << bwl;
+ int bhl = b_height_log2(mbmi->sb_type), bh = 1 << bhl;
+ int idx, idy;
+
+ if (mbmi->sb_type < BLOCK_SIZE_SB8X8) {
+ int i;
+ PARTITION_INFO *pi = x->partition_info;
+ for (idy = 0; idy < 2; idy += bh) {
+ for (idx = 0; idx < 2; idx += bw) {
+ i = idy * 2 + idx;
+ if (pi->bmi[i].mode == NEWMV) {
+ mv.row = (pi->bmi[i].mv.as_mv.row - best_ref_mv->as_mv.row);
+ mv.col = (pi->bmi[i].mv.as_mv.col - best_ref_mv->as_mv.col);
+ vp9_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount,
+ x->e_mbd.allow_high_precision_mv);
+ if (x->e_mbd.mode_info_context->mbmi.ref_frame[1] > INTRA_FRAME) {
+ mv.row = pi->bmi[i].second_mv.as_mv.row -
+ second_best_ref_mv->as_mv.row;
+ mv.col = pi->bmi[i].second_mv.as_mv.col -
+ second_best_ref_mv->as_mv.col;
+ vp9_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount,
+ x->e_mbd.allow_high_precision_mv);
+ }
+ }
+ }
+ }
+ } else if (mbmi->mode == NEWMV) {
+ mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
+ mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
+ vp9_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount,
+ x->e_mbd.allow_high_precision_mv);
+ if (mbmi->ref_frame[1] > INTRA_FRAME) {
+ mv.row = (mbmi->mv[1].as_mv.row - second_best_ref_mv->as_mv.row);
+ mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
+ vp9_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount,
+ x->e_mbd.allow_high_precision_mv);
+ }
+ }
+}
diff --git a/libvpx/vp9/encoder/vp9_encodemv.h b/libvpx/vp9/encoder/vp9_encodemv.h
new file mode 100644
index 0000000..cb25d85
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_encodemv.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_VP9_ENCODEMV_H_
+#define VP9_ENCODER_VP9_ENCODEMV_H_
+
+#include "vp9/encoder/vp9_onyx_int.h"
+
+void vp9_write_nmv_probs(VP9_COMP* const, int usehp, vp9_writer* const);
+
+void vp9_encode_mv(vp9_writer* w, const MV* mv, const MV* ref,
+ const nmv_context* mvctx, int usehp);
+
+void vp9_build_nmv_cost_table(int *mvjoint,
+ int *mvcost[2],
+ const nmv_context* const mvctx,
+ int usehp,
+ int mvc_flag_v,
+ int mvc_flag_h);
+void vp9_update_nmv_count(VP9_COMP *cpi, MACROBLOCK *x,
+ int_mv *best_ref_mv, int_mv *second_best_ref_mv);
+
+void print_nmvcounts(nmv_context_counts tnmvcounts);
+
+#endif // VP9_ENCODER_VP9_ENCODEMV_H_
diff --git a/libvpx/vp9/encoder/vp9_firstpass.c b/libvpx/vp9/encoder/vp9_firstpass.c
new file mode 100644
index 0000000..5e26cd8
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_firstpass.c
@@ -0,0 +1,2648 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "math.h"
+#include "limits.h"
+#include "vp9/encoder/vp9_block.h"
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vp9/encoder/vp9_variance.h"
+#include "vp9/encoder/vp9_encodeintra.h"
+#include "vp9/encoder/vp9_mcomp.h"
+#include "vp9/encoder/vp9_firstpass.h"
+#include "vpx_scale/vpx_scale.h"
+#include "vp9/encoder/vp9_encodeframe.h"
+#include "vp9/encoder/vp9_encodemb.h"
+#include "vp9/common/vp9_extend.h"
+#include "vp9/common/vp9_systemdependent.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_scale/yv12config.h"
+#include <stdio.h>
+#include "vp9/encoder/vp9_quantize.h"
+#include "vp9/encoder/vp9_rdopt.h"
+#include "vp9/encoder/vp9_ratectrl.h"
+#include "vp9/common/vp9_quant_common.h"
+#include "vp9/common/vp9_entropymv.h"
+#include "vp9/encoder/vp9_encodemv.h"
+#include "./vpx_scale_rtcd.h"
+// TODO(jkoleszar): for setup_dst_planes
+#include "vp9/common/vp9_reconinter.h"
+
+#define OUTPUT_FPF 0
+
+#define IIFACTOR 12.5
+#define IIKFACTOR1 12.5
+#define IIKFACTOR2 15.0
+#define RMAX 512.0
+#define GF_RMAX 96.0
+#define ERR_DIVISOR 150.0
+#define MIN_DECAY_FACTOR 0.1
+
+#define KF_MB_INTRA_MIN 150
+#define GF_MB_INTRA_MIN 100
+
+#define DOUBLE_DIVIDE_CHECK(x) ((x) < 0 ? (x) - 0.000001 : (x) + 0.000001)
+
+#define POW1 (double)cpi->oxcf.two_pass_vbrbias/100.0
+#define POW2 (double)cpi->oxcf.two_pass_vbrbias/100.0
+
+static void swap_yv12(YV12_BUFFER_CONFIG *a, YV12_BUFFER_CONFIG *b) {
+ YV12_BUFFER_CONFIG temp = *a;
+ *a = *b;
+ *b = temp;
+}
+
+static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame);
+
+static int select_cq_level(int qindex) {
+ int ret_val = QINDEX_RANGE - 1;
+ int i;
+
+ double target_q = (vp9_convert_qindex_to_q(qindex) * 0.5847) + 1.0;
+
+ for (i = 0; i < QINDEX_RANGE; i++) {
+ if (target_q <= vp9_convert_qindex_to_q(i)) {
+ ret_val = i;
+ break;
+ }
+ }
+
+ return ret_val;
+}
+
+
+// Resets the first pass file to the given position using a relative seek from the current position
+static void reset_fpf_position(VP9_COMP *cpi, FIRSTPASS_STATS *position) {
+ cpi->twopass.stats_in = position;
+}
+
+static int lookup_next_frame_stats(VP9_COMP *cpi, FIRSTPASS_STATS *next_frame) {
+ if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end)
+ return EOF;
+
+ *next_frame = *cpi->twopass.stats_in;
+ return 1;
+}
+
+// Read frame stats at an offset from the current position
+static int read_frame_stats(VP9_COMP *cpi,
+ FIRSTPASS_STATS *frame_stats,
+ int offset) {
+ FIRSTPASS_STATS *fps_ptr = cpi->twopass.stats_in;
+
+ // Check legality of offset
+ if (offset >= 0) {
+ if (&fps_ptr[offset] >= cpi->twopass.stats_in_end)
+ return EOF;
+ } else if (offset < 0) {
+ if (&fps_ptr[offset] < cpi->twopass.stats_in_start)
+ return EOF;
+ }
+
+ *frame_stats = fps_ptr[offset];
+ return 1;
+}
+
+static int input_stats(VP9_COMP *cpi, FIRSTPASS_STATS *fps) {
+ if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end)
+ return EOF;
+
+ *fps = *cpi->twopass.stats_in;
+ cpi->twopass.stats_in =
+ (void *)((char *)cpi->twopass.stats_in + sizeof(FIRSTPASS_STATS));
+ return 1;
+}
+
+static void output_stats(const VP9_COMP *cpi,
+ struct vpx_codec_pkt_list *pktlist,
+ FIRSTPASS_STATS *stats) {
+ struct vpx_codec_cx_pkt pkt;
+ pkt.kind = VPX_CODEC_STATS_PKT;
+ pkt.data.twopass_stats.buf = stats;
+ pkt.data.twopass_stats.sz = sizeof(FIRSTPASS_STATS);
+ vpx_codec_pkt_list_add(pktlist, &pkt);
+
+// TEMP debug code
+#if OUTPUT_FPF
+
+ {
+ FILE *fpfile;
+ fpfile = fopen("firstpass.stt", "a");
+
+ fprintf(stdout, "%12.0f %12.0f %12.0f %12.0f %12.0f %12.4f %12.4f"
+ "%12.4f %12.4f %12.4f %12.4f %12.4f %12.4f %12.4f"
+ "%12.0f %12.0f %12.4f %12.0f %12.0f %12.4f\n",
+ stats->frame,
+ stats->intra_error,
+ stats->coded_error,
+ stats->sr_coded_error,
+ stats->ssim_weighted_pred_err,
+ stats->pcnt_inter,
+ stats->pcnt_motion,
+ stats->pcnt_second_ref,
+ stats->pcnt_neutral,
+ stats->MVr,
+ stats->mvr_abs,
+ stats->MVc,
+ stats->mvc_abs,
+ stats->MVrv,
+ stats->MVcv,
+ stats->mv_in_out_count,
+ stats->new_mv_count,
+ stats->count,
+ stats->duration);
+ fclose(fpfile);
+ }
+#endif
+}
+
+static void zero_stats(FIRSTPASS_STATS *section) {
+ section->frame = 0.0;
+ section->intra_error = 0.0;
+ section->coded_error = 0.0;
+ section->sr_coded_error = 0.0;
+ section->ssim_weighted_pred_err = 0.0;
+ section->pcnt_inter = 0.0;
+ section->pcnt_motion = 0.0;
+ section->pcnt_second_ref = 0.0;
+ section->pcnt_neutral = 0.0;
+ section->MVr = 0.0;
+ section->mvr_abs = 0.0;
+ section->MVc = 0.0;
+ section->mvc_abs = 0.0;
+ section->MVrv = 0.0;
+ section->MVcv = 0.0;
+ section->mv_in_out_count = 0.0;
+ section->new_mv_count = 0.0;
+ section->count = 0.0;
+ section->duration = 1.0;
+}
+
+static void accumulate_stats(FIRSTPASS_STATS *section, FIRSTPASS_STATS *frame) {
+ section->frame += frame->frame;
+ section->intra_error += frame->intra_error;
+ section->coded_error += frame->coded_error;
+ section->sr_coded_error += frame->sr_coded_error;
+ section->ssim_weighted_pred_err += frame->ssim_weighted_pred_err;
+ section->pcnt_inter += frame->pcnt_inter;
+ section->pcnt_motion += frame->pcnt_motion;
+ section->pcnt_second_ref += frame->pcnt_second_ref;
+ section->pcnt_neutral += frame->pcnt_neutral;
+ section->MVr += frame->MVr;
+ section->mvr_abs += frame->mvr_abs;
+ section->MVc += frame->MVc;
+ section->mvc_abs += frame->mvc_abs;
+ section->MVrv += frame->MVrv;
+ section->MVcv += frame->MVcv;
+ section->mv_in_out_count += frame->mv_in_out_count;
+ section->new_mv_count += frame->new_mv_count;
+ section->count += frame->count;
+ section->duration += frame->duration;
+}
+
+static void subtract_stats(FIRSTPASS_STATS *section, FIRSTPASS_STATS *frame) {
+ section->frame -= frame->frame;
+ section->intra_error -= frame->intra_error;
+ section->coded_error -= frame->coded_error;
+ section->sr_coded_error -= frame->sr_coded_error;
+ section->ssim_weighted_pred_err -= frame->ssim_weighted_pred_err;
+ section->pcnt_inter -= frame->pcnt_inter;
+ section->pcnt_motion -= frame->pcnt_motion;
+ section->pcnt_second_ref -= frame->pcnt_second_ref;
+ section->pcnt_neutral -= frame->pcnt_neutral;
+ section->MVr -= frame->MVr;
+ section->mvr_abs -= frame->mvr_abs;
+ section->MVc -= frame->MVc;
+ section->mvc_abs -= frame->mvc_abs;
+ section->MVrv -= frame->MVrv;
+ section->MVcv -= frame->MVcv;
+ section->mv_in_out_count -= frame->mv_in_out_count;
+ section->new_mv_count -= frame->new_mv_count;
+ section->count -= frame->count;
+ section->duration -= frame->duration;
+}
+
+static void avg_stats(FIRSTPASS_STATS *section) {
+ if (section->count < 1.0)
+ return;
+
+ section->intra_error /= section->count;
+ section->coded_error /= section->count;
+ section->sr_coded_error /= section->count;
+ section->ssim_weighted_pred_err /= section->count;
+ section->pcnt_inter /= section->count;
+ section->pcnt_second_ref /= section->count;
+ section->pcnt_neutral /= section->count;
+ section->pcnt_motion /= section->count;
+ section->MVr /= section->count;
+ section->mvr_abs /= section->count;
+ section->MVc /= section->count;
+ section->mvc_abs /= section->count;
+ section->MVrv /= section->count;
+ section->MVcv /= section->count;
+ section->mv_in_out_count /= section->count;
+ section->duration /= section->count;
+}
+
+// Calculate a modified Error used in distributing bits between easier and harder frames
+static double calculate_modified_err(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+ const FIRSTPASS_STATS *const stats = &cpi->twopass.total_stats;
+ const double av_err = stats->ssim_weighted_pred_err / stats->count;
+ const double this_err = this_frame->ssim_weighted_pred_err;
+ return av_err * pow(this_err / DOUBLE_DIVIDE_CHECK(av_err),
+ this_err > av_err ? POW1 : POW2);
+}
+
+static const double weight_table[256] = {
+ 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000,
+ 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000,
+ 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000,
+ 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000,
+ 0.020000, 0.031250, 0.062500, 0.093750, 0.125000, 0.156250, 0.187500, 0.218750,
+ 0.250000, 0.281250, 0.312500, 0.343750, 0.375000, 0.406250, 0.437500, 0.468750,
+ 0.500000, 0.531250, 0.562500, 0.593750, 0.625000, 0.656250, 0.687500, 0.718750,
+ 0.750000, 0.781250, 0.812500, 0.843750, 0.875000, 0.906250, 0.937500, 0.968750,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
+ 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000
+};
+
+static double simple_weight(YV12_BUFFER_CONFIG *source) {
+ int i, j;
+
+ uint8_t *src = source->y_buffer;
+ double sum_weights = 0.0;
+
+ // Loop throught the Y plane raw examining levels and creating a weight for the image
+ i = source->y_height;
+ do {
+ j = source->y_width;
+ do {
+ sum_weights += weight_table[ *src];
+ src++;
+ } while (--j);
+ src -= source->y_width;
+ src += source->y_stride;
+ } while (--i);
+
+ sum_weights /= (source->y_height * source->y_width);
+
+ return sum_weights;
+}
+
+
+// This function returns the current per frame maximum bitrate target.
+static int frame_max_bits(VP9_COMP *cpi) {
+ // Max allocation for a single frame based on the max section guidelines
+ // passed in and how many bits are left.
+ // For VBR base this on the bits and frames left plus the
+ // two_pass_vbrmax_section rate passed in by the user.
+ const double max_bits = (1.0 * cpi->twopass.bits_left /
+ (cpi->twopass.total_stats.count - cpi->common.current_video_frame)) *
+ (cpi->oxcf.two_pass_vbrmax_section / 100.0);
+
+ // Trap case where we are out of bits.
+ return MAX((int)max_bits, 0);
+}
+
+void vp9_init_first_pass(VP9_COMP *cpi) {
+ zero_stats(&cpi->twopass.total_stats);
+}
+
+void vp9_end_first_pass(VP9_COMP *cpi) {
+ output_stats(cpi, cpi->output_pkt_list, &cpi->twopass.total_stats);
+}
+
+static void zz_motion_search(VP9_COMP *cpi, MACROBLOCK *x, YV12_BUFFER_CONFIG *recon_buffer, int *best_motion_err, int recon_yoffset) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+
+ // Set up pointers for this macro block recon buffer
+ xd->plane[0].pre[0].buf = recon_buffer->y_buffer + recon_yoffset;
+
+ switch (xd->mode_info_context->mbmi.sb_type) {
+ case BLOCK_SIZE_SB8X8:
+ vp9_mse8x8(x->plane[0].src.buf, x->plane[0].src.stride,
+ xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
+ (unsigned int *)(best_motion_err));
+ break;
+ case BLOCK_SIZE_SB16X8:
+ vp9_mse16x8(x->plane[0].src.buf, x->plane[0].src.stride,
+ xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
+ (unsigned int *)(best_motion_err));
+ break;
+ case BLOCK_SIZE_SB8X16:
+ vp9_mse8x16(x->plane[0].src.buf, x->plane[0].src.stride,
+ xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
+ (unsigned int *)(best_motion_err));
+ break;
+ default:
+ vp9_mse16x16(x->plane[0].src.buf, x->plane[0].src.stride,
+ xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
+ (unsigned int *)(best_motion_err));
+ break;
+ }
+}
+
+static enum BlockSize get_bs(BLOCK_SIZE_TYPE b) {
+ switch (b) {
+ case BLOCK_SIZE_SB8X8:
+ return BLOCK_8X8;
+ case BLOCK_SIZE_SB16X8:
+ return BLOCK_16X8;
+ case BLOCK_SIZE_SB8X16:
+ return BLOCK_8X16;
+ default:
+ return BLOCK_16X16;
+ }
+}
+
+static void first_pass_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
+ int_mv *ref_mv, MV *best_mv,
+ YV12_BUFFER_CONFIG *recon_buffer,
+ int *best_motion_err, int recon_yoffset) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+ int num00;
+
+ int_mv tmp_mv;
+ int_mv ref_mv_full;
+
+ int tmp_err;
+ int step_param = 3;
+ int further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param;
+ int n;
+ vp9_variance_fn_ptr_t v_fn_ptr =
+ cpi->fn_ptr[get_bs(xd->mode_info_context->mbmi.sb_type)];
+ int new_mv_mode_penalty = 256;
+
+ int sr = 0;
+ int quart_frm = MIN(cpi->common.width, cpi->common.height);
+
+ // refine the motion search range accroding to the frame dimension
+ // for first pass test
+ while ((quart_frm << sr) < MAX_FULL_PEL_VAL)
+ sr++;
+ if (sr)
+ sr--;
+
+ step_param += sr;
+ further_steps -= sr;
+
+ // override the default variance function to use MSE
+ switch (xd->mode_info_context->mbmi.sb_type) {
+ case BLOCK_SIZE_SB8X8:
+ v_fn_ptr.vf = vp9_mse8x8;
+ break;
+ case BLOCK_SIZE_SB16X8:
+ v_fn_ptr.vf = vp9_mse16x8;
+ break;
+ case BLOCK_SIZE_SB8X16:
+ v_fn_ptr.vf = vp9_mse8x16;
+ break;
+ default:
+ v_fn_ptr.vf = vp9_mse16x16;
+ break;
+ }
+
+ // Set up pointers for this macro block recon buffer
+ xd->plane[0].pre[0].buf = recon_buffer->y_buffer + recon_yoffset;
+
+ // Initial step/diamond search centred on best mv
+ tmp_mv.as_int = 0;
+ ref_mv_full.as_mv.col = ref_mv->as_mv.col >> 3;
+ ref_mv_full.as_mv.row = ref_mv->as_mv.row >> 3;
+ tmp_err = cpi->diamond_search_sad(x, &ref_mv_full, &tmp_mv, step_param,
+ x->sadperbit16, &num00, &v_fn_ptr,
+ x->nmvjointcost,
+ x->mvcost, ref_mv);
+ if (tmp_err < INT_MAX - new_mv_mode_penalty)
+ tmp_err += new_mv_mode_penalty;
+
+ if (tmp_err < *best_motion_err) {
+ *best_motion_err = tmp_err;
+ best_mv->row = tmp_mv.as_mv.row;
+ best_mv->col = tmp_mv.as_mv.col;
+ }
+
+ // Further step/diamond searches as necessary
+ n = num00;
+ num00 = 0;
+
+ while (n < further_steps) {
+ n++;
+
+ if (num00)
+ num00--;
+ else {
+ tmp_err = cpi->diamond_search_sad(x, &ref_mv_full, &tmp_mv,
+ step_param + n, x->sadperbit16,
+ &num00, &v_fn_ptr,
+ x->nmvjointcost,
+ x->mvcost, ref_mv);
+ if (tmp_err < INT_MAX - new_mv_mode_penalty)
+ tmp_err += new_mv_mode_penalty;
+
+ if (tmp_err < *best_motion_err) {
+ *best_motion_err = tmp_err;
+ best_mv->row = tmp_mv.as_mv.row;
+ best_mv->col = tmp_mv.as_mv.col;
+ }
+ }
+ }
+}
+
+void vp9_first_pass(VP9_COMP *cpi) {
+ int mb_row, mb_col;
+ MACROBLOCK *const x = &cpi->mb;
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &x->e_mbd;
+
+ int recon_yoffset, recon_uvoffset;
+ const int lst_yv12_idx = cm->ref_frame_map[cpi->lst_fb_idx];
+ const int gld_yv12_idx = cm->ref_frame_map[cpi->gld_fb_idx];
+ YV12_BUFFER_CONFIG *const lst_yv12 = &cm->yv12_fb[lst_yv12_idx];
+ YV12_BUFFER_CONFIG *const new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
+ YV12_BUFFER_CONFIG *const gld_yv12 = &cm->yv12_fb[gld_yv12_idx];
+ const int recon_y_stride = lst_yv12->y_stride;
+ const int recon_uv_stride = lst_yv12->uv_stride;
+ int64_t intra_error = 0;
+ int64_t coded_error = 0;
+ int64_t sr_coded_error = 0;
+
+ int sum_mvr = 0, sum_mvc = 0;
+ int sum_mvr_abs = 0, sum_mvc_abs = 0;
+ int sum_mvrs = 0, sum_mvcs = 0;
+ int mvcount = 0;
+ int intercount = 0;
+ int second_ref_count = 0;
+ int intrapenalty = 256;
+ int neutral_count = 0;
+ int new_mv_count = 0;
+ int sum_in_vectors = 0;
+ uint32_t lastmv_as_int = 0;
+
+ int_mv zero_ref_mv;
+
+ zero_ref_mv.as_int = 0;
+
+ vp9_clear_system_state(); // __asm emms;
+
+ vp9_setup_src_planes(x, cpi->Source, 0, 0);
+ setup_pre_planes(xd, lst_yv12, NULL, 0, 0, NULL, NULL);
+ setup_dst_planes(xd, new_yv12, 0, 0);
+
+ x->partition_info = x->pi;
+
+ xd->mode_info_context = cm->mi;
+
+ vp9_build_block_offsets(x);
+
+ vp9_setup_block_dptrs(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
+
+ vp9_frame_init_quantizer(cpi);
+
+ // Initialise the MV cost table to the defaults
+ // if( cm->current_video_frame == 0)
+ // if ( 0 )
+ {
+ vp9_init_mv_probs(cm);
+ vp9_initialize_rd_consts(cpi, cm->base_qindex + cm->y_dc_delta_q);
+ }
+
+ // for each macroblock row in image
+ for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
+ int_mv best_ref_mv;
+
+ best_ref_mv.as_int = 0;
+
+ // reset above block coeffs
+ xd->up_available = (mb_row != 0);
+ recon_yoffset = (mb_row * recon_y_stride * 16);
+ recon_uvoffset = (mb_row * recon_uv_stride * 8);
+
+ // Set up limit values for motion vectors to prevent them extending outside the UMV borders
+ x->mv_row_min = -((mb_row * 16) + (VP9BORDERINPIXELS - 8));
+ x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16)
+ + (VP9BORDERINPIXELS - 8);
+
+ // for each macroblock col in image
+ for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
+ int this_error;
+ int gf_motion_error = INT_MAX;
+ int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
+
+ xd->plane[0].dst.buf = new_yv12->y_buffer + recon_yoffset;
+ xd->plane[1].dst.buf = new_yv12->u_buffer + recon_uvoffset;
+ xd->plane[2].dst.buf = new_yv12->v_buffer + recon_uvoffset;
+ xd->left_available = (mb_col != 0);
+
+ if (mb_col * 2 + 1 < cm->mi_cols) {
+ if (mb_row * 2 + 1 < cm->mi_rows) {
+ xd->mode_info_context->mbmi.sb_type = BLOCK_SIZE_MB16X16;
+ } else {
+ xd->mode_info_context->mbmi.sb_type = BLOCK_SIZE_SB16X8;
+ }
+ } else {
+ if (mb_row * 2 + 1 < cm->mi_rows) {
+ xd->mode_info_context->mbmi.sb_type = BLOCK_SIZE_SB8X16;
+ } else {
+ xd->mode_info_context->mbmi.sb_type = BLOCK_SIZE_SB8X8;
+ }
+ }
+ xd->mode_info_context->mbmi.ref_frame[0] = INTRA_FRAME;
+ set_mi_row_col(cm, xd,
+ mb_row << 1,
+ 1 << mi_height_log2(xd->mode_info_context->mbmi.sb_type),
+ mb_col << 1,
+ 1 << mi_height_log2(xd->mode_info_context->mbmi.sb_type));
+
+ // do intra 16x16 prediction
+ this_error = vp9_encode_intra(cpi, x, use_dc_pred);
+
+ // "intrapenalty" below deals with situations where the intra and inter error scores are very low (eg a plain black frame)
+ // We do not have special cases in first pass for 0,0 and nearest etc so all inter modes carry an overhead cost estimate fot the mv.
+ // When the error score is very low this causes us to pick all or lots of INTRA modes and throw lots of key frames.
+ // This penalty adds a cost matching that of a 0,0 mv to the intra case.
+ this_error += intrapenalty;
+
+ // Cumulative intra error total
+ intra_error += (int64_t)this_error;
+
+ // Set up limit values for motion vectors to prevent them extending outside the UMV borders
+ x->mv_col_min = -((mb_col * 16) + (VP9BORDERINPIXELS - 8));
+ x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16)
+ + (VP9BORDERINPIXELS - 8);
+
+ // Other than for the first frame do a motion search
+ if (cm->current_video_frame > 0) {
+ int tmp_err;
+ int motion_error = INT_MAX;
+ int_mv mv, tmp_mv;
+
+ // Simple 0,0 motion with no mv overhead
+ zz_motion_search(cpi, x, lst_yv12, &motion_error, recon_yoffset);
+ mv.as_int = tmp_mv.as_int = 0;
+
+ // Test last reference frame using the previous best mv as the
+ // starting point (best reference) for the search
+ first_pass_motion_search(cpi, x, &best_ref_mv,
+ &mv.as_mv, lst_yv12,
+ &motion_error, recon_yoffset);
+
+ // If the current best reference mv is not centred on 0,0 then do a 0,0 based search as well
+ if (best_ref_mv.as_int) {
+ tmp_err = INT_MAX;
+ first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv.as_mv,
+ lst_yv12, &tmp_err, recon_yoffset);
+
+ if (tmp_err < motion_error) {
+ motion_error = tmp_err;
+ mv.as_int = tmp_mv.as_int;
+ }
+ }
+
+ // Experimental search in an older reference frame
+ if (cm->current_video_frame > 1) {
+ // Simple 0,0 motion with no mv overhead
+ zz_motion_search(cpi, x, gld_yv12,
+ &gf_motion_error, recon_yoffset);
+
+ first_pass_motion_search(cpi, x, &zero_ref_mv,
+ &tmp_mv.as_mv, gld_yv12,
+ &gf_motion_error, recon_yoffset);
+
+ if ((gf_motion_error < motion_error) &&
+ (gf_motion_error < this_error)) {
+ second_ref_count++;
+ }
+
+ // Reset to last frame as reference buffer
+ xd->plane[0].pre[0].buf = lst_yv12->y_buffer + recon_yoffset;
+ xd->plane[1].pre[0].buf = lst_yv12->u_buffer + recon_uvoffset;
+ xd->plane[2].pre[0].buf = lst_yv12->v_buffer + recon_uvoffset;
+
+ // In accumulating a score for the older reference frame
+ // take the best of the motion predicted score and
+ // the intra coded error (just as will be done for)
+ // accumulation of "coded_error" for the last frame.
+ if (gf_motion_error < this_error)
+ sr_coded_error += gf_motion_error;
+ else
+ sr_coded_error += this_error;
+ } else
+ sr_coded_error += motion_error;
+
+ /* Intra assumed best */
+ best_ref_mv.as_int = 0;
+
+ if (motion_error <= this_error) {
+ // Keep a count of cases where the inter and intra were
+ // very close and very low. This helps with scene cut
+ // detection for example in cropped clips with black bars
+ // at the sides or top and bottom.
+ if ((((this_error - intrapenalty) * 9) <=
+ (motion_error * 10)) &&
+ (this_error < (2 * intrapenalty))) {
+ neutral_count++;
+ }
+
+ mv.as_mv.row <<= 3;
+ mv.as_mv.col <<= 3;
+ this_error = motion_error;
+ vp9_set_mbmode_and_mvs(x, NEWMV, &mv);
+ xd->mode_info_context->mbmi.txfm_size = TX_4X4;
+ xd->mode_info_context->mbmi.ref_frame[0] = LAST_FRAME;
+ xd->mode_info_context->mbmi.ref_frame[1] = NONE;
+ vp9_build_inter_predictors_sby(xd, mb_row << 1,
+ mb_col << 1,
+ xd->mode_info_context->mbmi.sb_type);
+ vp9_encode_sby(cm, x, xd->mode_info_context->mbmi.sb_type);
+ sum_mvr += mv.as_mv.row;
+ sum_mvr_abs += abs(mv.as_mv.row);
+ sum_mvc += mv.as_mv.col;
+ sum_mvc_abs += abs(mv.as_mv.col);
+ sum_mvrs += mv.as_mv.row * mv.as_mv.row;
+ sum_mvcs += mv.as_mv.col * mv.as_mv.col;
+ intercount++;
+
+ best_ref_mv.as_int = mv.as_int;
+
+ // Was the vector non-zero
+ if (mv.as_int) {
+ mvcount++;
+
+ // Was it different from the last non zero vector
+ if (mv.as_int != lastmv_as_int)
+ new_mv_count++;
+ lastmv_as_int = mv.as_int;
+
+ // Does the Row vector point inwards or outwards
+ if (mb_row < cm->mb_rows / 2) {
+ if (mv.as_mv.row > 0)
+ sum_in_vectors--;
+ else if (mv.as_mv.row < 0)
+ sum_in_vectors++;
+ } else if (mb_row > cm->mb_rows / 2) {
+ if (mv.as_mv.row > 0)
+ sum_in_vectors++;
+ else if (mv.as_mv.row < 0)
+ sum_in_vectors--;
+ }
+
+ // Does the Row vector point inwards or outwards
+ if (mb_col < cm->mb_cols / 2) {
+ if (mv.as_mv.col > 0)
+ sum_in_vectors--;
+ else if (mv.as_mv.col < 0)
+ sum_in_vectors++;
+ } else if (mb_col > cm->mb_cols / 2) {
+ if (mv.as_mv.col > 0)
+ sum_in_vectors++;
+ else if (mv.as_mv.col < 0)
+ sum_in_vectors--;
+ }
+ }
+ }
+ } else
+ sr_coded_error += (int64_t)this_error;
+
+ coded_error += (int64_t)this_error;
+
+ // adjust to the next column of macroblocks
+ x->plane[0].src.buf += 16;
+ x->plane[1].src.buf += 8;
+ x->plane[2].src.buf += 8;
+
+ recon_yoffset += 16;
+ recon_uvoffset += 8;
+ }
+
+ // adjust to the next row of mbs
+ x->plane[0].src.buf += 16 * x->plane[0].src.stride - 16 * cm->mb_cols;
+ x->plane[1].src.buf += 8 * x->plane[1].src.stride - 8 * cm->mb_cols;
+ x->plane[2].src.buf += 8 * x->plane[1].src.stride - 8 * cm->mb_cols;
+
+ vp9_clear_system_state(); // __asm emms;
+ }
+
+ vp9_clear_system_state(); // __asm emms;
+ {
+ double weight = 0.0;
+
+ FIRSTPASS_STATS fps;
+
+ fps.frame = cm->current_video_frame;
+ fps.intra_error = (double)(intra_error >> 8);
+ fps.coded_error = (double)(coded_error >> 8);
+ fps.sr_coded_error = (double)(sr_coded_error >> 8);
+ weight = simple_weight(cpi->Source);
+
+
+ if (weight < 0.1)
+ weight = 0.1;
+
+ fps.ssim_weighted_pred_err = fps.coded_error * weight;
+
+ fps.pcnt_inter = 0.0;
+ fps.pcnt_motion = 0.0;
+ fps.MVr = 0.0;
+ fps.mvr_abs = 0.0;
+ fps.MVc = 0.0;
+ fps.mvc_abs = 0.0;
+ fps.MVrv = 0.0;
+ fps.MVcv = 0.0;
+ fps.mv_in_out_count = 0.0;
+ fps.new_mv_count = 0.0;
+ fps.count = 1.0;
+
+ fps.pcnt_inter = 1.0 * (double)intercount / cm->MBs;
+ fps.pcnt_second_ref = 1.0 * (double)second_ref_count / cm->MBs;
+ fps.pcnt_neutral = 1.0 * (double)neutral_count / cm->MBs;
+
+ if (mvcount > 0) {
+ fps.MVr = (double)sum_mvr / (double)mvcount;
+ fps.mvr_abs = (double)sum_mvr_abs / (double)mvcount;
+ fps.MVc = (double)sum_mvc / (double)mvcount;
+ fps.mvc_abs = (double)sum_mvc_abs / (double)mvcount;
+ fps.MVrv = ((double)sum_mvrs - (fps.MVr * fps.MVr / (double)mvcount)) / (double)mvcount;
+ fps.MVcv = ((double)sum_mvcs - (fps.MVc * fps.MVc / (double)mvcount)) / (double)mvcount;
+ fps.mv_in_out_count = (double)sum_in_vectors / (double)(mvcount * 2);
+ fps.new_mv_count = new_mv_count;
+
+ fps.pcnt_motion = 1.0 * (double)mvcount / cpi->common.MBs;
+ }
+
+ // TODO: handle the case when duration is set to 0, or something less
+ // than the full time between subsequent values of cpi->source_time_stamp.
+ fps.duration = (double)(cpi->source->ts_end
+ - cpi->source->ts_start);
+
+ // don't want to do output stats with a stack variable!
+ cpi->twopass.this_frame_stats = fps;
+ output_stats(cpi, cpi->output_pkt_list, &cpi->twopass.this_frame_stats);
+ accumulate_stats(&cpi->twopass.total_stats, &fps);
+ }
+
+ // Copy the previous Last Frame back into gf and and arf buffers if
+ // the prediction is good enough... but also dont allow it to lag too far
+ if ((cpi->twopass.sr_update_lag > 3) ||
+ ((cm->current_video_frame > 0) &&
+ (cpi->twopass.this_frame_stats.pcnt_inter > 0.20) &&
+ ((cpi->twopass.this_frame_stats.intra_error /
+ DOUBLE_DIVIDE_CHECK(cpi->twopass.this_frame_stats.coded_error)) >
+ 2.0))) {
+ vp8_yv12_copy_frame(lst_yv12, gld_yv12);
+ cpi->twopass.sr_update_lag = 1;
+ } else
+ cpi->twopass.sr_update_lag++;
+
+ // swap frame pointers so last frame refers to the frame we just compressed
+ swap_yv12(lst_yv12, new_yv12);
+
+ vp9_extend_frame_borders(lst_yv12, cm->subsampling_x, cm->subsampling_y);
+
+ // Special case for the first frame. Copy into the GF buffer as a second reference.
+ if (cm->current_video_frame == 0)
+ vp8_yv12_copy_frame(lst_yv12, gld_yv12);
+
+ // use this to see what the first pass reconstruction looks like
+ if (0) {
+ char filename[512];
+ FILE *recon_file;
+ sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame);
+
+ if (cm->current_video_frame == 0)
+ recon_file = fopen(filename, "wb");
+ else
+ recon_file = fopen(filename, "ab");
+
+ (void)fwrite(lst_yv12->buffer_alloc, lst_yv12->frame_size, 1, recon_file);
+ fclose(recon_file);
+ }
+
+ cm->current_video_frame++;
+
+}
+
+// Estimate a cost per mb attributable to overheads such as the coding of
+// modes and motion vectors.
+// Currently simplistic in its assumptions for testing.
+//
+
+
+static double bitcost(double prob) {
+ return -(log(prob) / log(2.0));
+}
+
+static int64_t estimate_modemvcost(VP9_COMP *cpi,
+ FIRSTPASS_STATS *fpstats) {
+#if 0
+ int mv_cost;
+ int mode_cost;
+
+ double av_pct_inter = fpstats->pcnt_inter / fpstats->count;
+ double av_pct_motion = fpstats->pcnt_motion / fpstats->count;
+ double av_intra = (1.0 - av_pct_inter);
+
+ double zz_cost;
+ double motion_cost;
+ double intra_cost;
+
+ zz_cost = bitcost(av_pct_inter - av_pct_motion);
+ motion_cost = bitcost(av_pct_motion);
+ intra_cost = bitcost(av_intra);
+
+ // Estimate of extra bits per mv overhead for mbs
+ // << 9 is the normalization to the (bits * 512) used in vp9_bits_per_mb
+ mv_cost = ((int)(fpstats->new_mv_count / fpstats->count) * 8) << 9;
+
+ // Crude estimate of overhead cost from modes
+ // << 9 is the normalization to (bits * 512) used in vp9_bits_per_mb
+ mode_cost =
+ (int)((((av_pct_inter - av_pct_motion) * zz_cost) +
+ (av_pct_motion * motion_cost) +
+ (av_intra * intra_cost)) * cpi->common.MBs) << 9;
+
+ // return mv_cost + mode_cost;
+ // TODO PGW Fix overhead costs for extended Q range
+#endif
+ return 0;
+}
+
+static double calc_correction_factor(double err_per_mb,
+ double err_divisor,
+ double pt_low,
+ double pt_high,
+ int q) {
+ const double error_term = err_per_mb / err_divisor;
+
+ // Adjustment based on actual quantizer to power term.
+ const double power_term = MIN(vp9_convert_qindex_to_q(q) * 0.01 + pt_low,
+ pt_high);
+
+ // Calculate correction factor
+ if (power_term < 1.0)
+ assert(error_term >= 0.0);
+
+ return fclamp(pow(error_term, power_term), 0.05, 5.0);
+}
+
+// Given a current maxQ value sets a range for future values.
+// PGW TODO..
+// This code removes direct dependency on QIndex to determine the range
+// (now uses the actual quantizer) but has not been tuned.
+static void adjust_maxq_qrange(VP9_COMP *cpi) {
+ int i;
+ // Set the max corresponding to cpi->avg_q * 2.0
+ double q = cpi->avg_q * 2.0;
+ cpi->twopass.maxq_max_limit = cpi->worst_quality;
+ for (i = cpi->best_quality; i <= cpi->worst_quality; i++) {
+ cpi->twopass.maxq_max_limit = i;
+ if (vp9_convert_qindex_to_q(i) >= q)
+ break;
+ }
+
+ // Set the min corresponding to cpi->avg_q * 0.5
+ q = cpi->avg_q * 0.5;
+ cpi->twopass.maxq_min_limit = cpi->best_quality;
+ for (i = cpi->worst_quality; i >= cpi->best_quality; i--) {
+ cpi->twopass.maxq_min_limit = i;
+ if (vp9_convert_qindex_to_q(i) <= q)
+ break;
+ }
+}
+
+static int estimate_max_q(VP9_COMP *cpi,
+ FIRSTPASS_STATS *fpstats,
+ int section_target_bandwitdh) {
+ int q;
+ int num_mbs = cpi->common.MBs;
+ int target_norm_bits_per_mb;
+
+ double section_err = fpstats->coded_error / fpstats->count;
+ double sr_correction;
+ double err_per_mb = section_err / num_mbs;
+ double err_correction_factor;
+ double speed_correction = 1.0;
+
+ if (section_target_bandwitdh <= 0)
+ return cpi->twopass.maxq_max_limit; // Highest value allowed
+
+ target_norm_bits_per_mb = section_target_bandwitdh < (1 << 20)
+ ? (512 * section_target_bandwitdh) / num_mbs
+ : 512 * (section_target_bandwitdh / num_mbs);
+
+ // Look at the drop in prediction quality between the last frame
+ // and the GF buffer (which contained an older frame).
+ if (fpstats->sr_coded_error > fpstats->coded_error) {
+ double sr_err_diff = (fpstats->sr_coded_error - fpstats->coded_error) /
+ (fpstats->count * cpi->common.MBs);
+ sr_correction = fclamp(pow(sr_err_diff / 32.0, 0.25), 0.75, 1.25);
+ } else {
+ sr_correction = 0.75;
+ }
+
+ // Calculate a corrective factor based on a rolling ratio of bits spent
+ // vs target bits
+ if (cpi->rolling_target_bits > 0 &&
+ cpi->active_worst_quality < cpi->worst_quality) {
+ double rolling_ratio = (double)cpi->rolling_actual_bits /
+ (double)cpi->rolling_target_bits;
+
+ if (rolling_ratio < 0.95)
+ cpi->twopass.est_max_qcorrection_factor -= 0.005;
+ else if (rolling_ratio > 1.05)
+ cpi->twopass.est_max_qcorrection_factor += 0.005;
+
+ cpi->twopass.est_max_qcorrection_factor = fclamp(
+ cpi->twopass.est_max_qcorrection_factor, 0.1, 10.0);
+ }
+
+ // Corrections for higher compression speed settings
+ // (reduced compression expected)
+ if (cpi->compressor_speed == 1)
+ speed_correction = cpi->oxcf.cpu_used <= 5 ?
+ 1.04 + (cpi->oxcf.cpu_used * 0.04) :
+ 1.25;
+
+ // Try and pick a max Q that will be high enough to encode the
+ // content at the given rate.
+ for (q = cpi->twopass.maxq_min_limit; q < cpi->twopass.maxq_max_limit; q++) {
+ int bits_per_mb_at_this_q;
+
+ err_correction_factor = calc_correction_factor(err_per_mb,
+ ERR_DIVISOR, 0.4, 0.90, q) *
+ sr_correction * speed_correction *
+ cpi->twopass.est_max_qcorrection_factor;
+
+ bits_per_mb_at_this_q = vp9_bits_per_mb(INTER_FRAME, q,
+ err_correction_factor);
+
+ if (bits_per_mb_at_this_q <= target_norm_bits_per_mb)
+ break;
+ }
+
+ // Restriction on active max q for constrained quality mode.
+ if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY &&
+ q < cpi->cq_target_quality)
+ q = cpi->cq_target_quality;
+
+ // Adjust maxq_min_limit and maxq_max_limit limits based on
+ // average q observed in clip for non kf/gf/arf frames
+ // Give average a chance to settle though.
+ // PGW TODO.. This code is broken for the extended Q range
+ if (cpi->ni_frames > ((int)cpi->twopass.total_stats.count >> 8) &&
+ cpi->ni_frames > 25)
+ adjust_maxq_qrange(cpi);
+
+ return q;
+}
+
+// For cq mode estimate a cq level that matches the observed
+// complexity and data rate.
+static int estimate_cq(VP9_COMP *cpi,
+ FIRSTPASS_STATS *fpstats,
+ int section_target_bandwitdh) {
+ int q;
+ int num_mbs = cpi->common.MBs;
+ int target_norm_bits_per_mb;
+
+ double section_err = (fpstats->coded_error / fpstats->count);
+ double err_per_mb = section_err / num_mbs;
+ double err_correction_factor;
+ double sr_err_diff;
+ double sr_correction;
+ double speed_correction = 1.0;
+ double clip_iiratio;
+ double clip_iifactor;
+
+ target_norm_bits_per_mb = (section_target_bandwitdh < (1 << 20))
+ ? (512 * section_target_bandwitdh) / num_mbs
+ : 512 * (section_target_bandwitdh / num_mbs);
+
+
+ // Corrections for higher compression speed settings
+ // (reduced compression expected)
+ if (cpi->compressor_speed == 1) {
+ if (cpi->oxcf.cpu_used <= 5)
+ speed_correction = 1.04 + (cpi->oxcf.cpu_used * 0.04);
+ else
+ speed_correction = 1.25;
+ }
+
+ // Look at the drop in prediction quality between the last frame
+ // and the GF buffer (which contained an older frame).
+ if (fpstats->sr_coded_error > fpstats->coded_error) {
+ sr_err_diff =
+ (fpstats->sr_coded_error - fpstats->coded_error) /
+ (fpstats->count * cpi->common.MBs);
+ sr_correction = (sr_err_diff / 32.0);
+ sr_correction = pow(sr_correction, 0.25);
+ if (sr_correction < 0.75)
+ sr_correction = 0.75;
+ else if (sr_correction > 1.25)
+ sr_correction = 1.25;
+ } else {
+ sr_correction = 0.75;
+ }
+
+ // II ratio correction factor for clip as a whole
+ clip_iiratio = cpi->twopass.total_stats.intra_error /
+ DOUBLE_DIVIDE_CHECK(cpi->twopass.total_stats.coded_error);
+ clip_iifactor = 1.0 - ((clip_iiratio - 10.0) * 0.025);
+ if (clip_iifactor < 0.80)
+ clip_iifactor = 0.80;
+
+ // Try and pick a Q that can encode the content at the given rate.
+ for (q = 0; q < MAXQ; q++) {
+ int bits_per_mb_at_this_q;
+
+ // Error per MB based correction factor
+ err_correction_factor =
+ calc_correction_factor(err_per_mb, 100.0, 0.4, 0.90, q) *
+ sr_correction * speed_correction * clip_iifactor;
+
+ bits_per_mb_at_this_q =
+ vp9_bits_per_mb(INTER_FRAME, q, err_correction_factor);
+
+ if (bits_per_mb_at_this_q <= target_norm_bits_per_mb)
+ break;
+ }
+
+ // Clip value to range "best allowed to (worst allowed - 1)"
+ q = select_cq_level(q);
+ if (q >= cpi->worst_quality)
+ q = cpi->worst_quality - 1;
+ if (q < cpi->best_quality)
+ q = cpi->best_quality;
+
+ return q;
+}
+
+
+extern void vp9_new_frame_rate(VP9_COMP *cpi, double framerate);
+
+void vp9_init_second_pass(VP9_COMP *cpi) {
+ FIRSTPASS_STATS this_frame;
+ FIRSTPASS_STATS *start_pos;
+
+ double lower_bounds_min_rate = FRAME_OVERHEAD_BITS * cpi->oxcf.frame_rate;
+ double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth
+ * cpi->oxcf.two_pass_vbrmin_section / 100);
+
+ if (two_pass_min_rate < lower_bounds_min_rate)
+ two_pass_min_rate = lower_bounds_min_rate;
+
+ zero_stats(&cpi->twopass.total_stats);
+ zero_stats(&cpi->twopass.total_left_stats);
+
+ if (!cpi->twopass.stats_in_end)
+ return;
+
+ cpi->twopass.total_stats = *cpi->twopass.stats_in_end;
+ cpi->twopass.total_left_stats = cpi->twopass.total_stats;
+
+ // each frame can have a different duration, as the frame rate in the source
+ // isn't guaranteed to be constant. The frame rate prior to the first frame
+ // encoded in the second pass is a guess. However the sum duration is not.
+ // Its calculated based on the actual durations of all frames from the first
+ // pass.
+ vp9_new_frame_rate(cpi, 10000000.0 * cpi->twopass.total_stats.count /
+ cpi->twopass.total_stats.duration);
+
+ cpi->output_frame_rate = cpi->oxcf.frame_rate;
+ cpi->twopass.bits_left = (int64_t)(cpi->twopass.total_stats.duration *
+ cpi->oxcf.target_bandwidth / 10000000.0);
+ cpi->twopass.bits_left -= (int64_t)(cpi->twopass.total_stats.duration *
+ two_pass_min_rate / 10000000.0);
+
+ // Calculate a minimum intra value to be used in determining the IIratio
+ // scores used in the second pass. We have this minimum to make sure
+ // that clips that are static but "low complexity" in the intra domain
+ // are still boosted appropriately for KF/GF/ARF
+ cpi->twopass.kf_intra_err_min = KF_MB_INTRA_MIN * cpi->common.MBs;
+ cpi->twopass.gf_intra_err_min = GF_MB_INTRA_MIN * cpi->common.MBs;
+
+ // This variable monitors how far behind the second ref update is lagging
+ cpi->twopass.sr_update_lag = 1;
+
+ // Scan the first pass file and calculate an average Intra / Inter error score ratio for the sequence
+ {
+ double sum_iiratio = 0.0;
+ double IIRatio;
+
+ start_pos = cpi->twopass.stats_in; // Note starting "file" position
+
+ while (input_stats(cpi, &this_frame) != EOF) {
+ IIRatio = this_frame.intra_error / DOUBLE_DIVIDE_CHECK(this_frame.coded_error);
+ IIRatio = (IIRatio < 1.0) ? 1.0 : (IIRatio > 20.0) ? 20.0 : IIRatio;
+ sum_iiratio += IIRatio;
+ }
+
+ cpi->twopass.avg_iiratio = sum_iiratio /
+ DOUBLE_DIVIDE_CHECK((double)cpi->twopass.total_stats.count);
+
+ // Reset file position
+ reset_fpf_position(cpi, start_pos);
+ }
+
+ // Scan the first pass file and calculate a modified total error based upon the bias/power function
+ // used to allocate bits
+ {
+ start_pos = cpi->twopass.stats_in; // Note starting "file" position
+
+ cpi->twopass.modified_error_total = 0.0;
+ cpi->twopass.modified_error_used = 0.0;
+
+ while (input_stats(cpi, &this_frame) != EOF) {
+ cpi->twopass.modified_error_total += calculate_modified_err(cpi, &this_frame);
+ }
+ cpi->twopass.modified_error_left = cpi->twopass.modified_error_total;
+
+ reset_fpf_position(cpi, start_pos); // Reset file position
+
+ }
+}
+
+void vp9_end_second_pass(VP9_COMP *cpi) {
+}
+
+// This function gives and estimate of how badly we believe
+// the prediction quality is decaying from frame to frame.
+static double get_prediction_decay_rate(VP9_COMP *cpi,
+ FIRSTPASS_STATS *next_frame) {
+ double prediction_decay_rate;
+ double second_ref_decay;
+ double mb_sr_err_diff;
+
+ // Initial basis is the % mbs inter coded
+ prediction_decay_rate = next_frame->pcnt_inter;
+
+ // Look at the observed drop in prediction quality between the last frame
+ // and the GF buffer (which contains an older frame).
+ mb_sr_err_diff = (next_frame->sr_coded_error - next_frame->coded_error) /
+ cpi->common.MBs;
+ if (mb_sr_err_diff <= 512.0) {
+ second_ref_decay = 1.0 - (mb_sr_err_diff / 512.0);
+ second_ref_decay = pow(second_ref_decay, 0.5);
+ if (second_ref_decay < 0.85)
+ second_ref_decay = 0.85;
+ else if (second_ref_decay > 1.0)
+ second_ref_decay = 1.0;
+ } else {
+ second_ref_decay = 0.85;
+ }
+
+ if (second_ref_decay < prediction_decay_rate)
+ prediction_decay_rate = second_ref_decay;
+
+ return prediction_decay_rate;
+}
+
+// Function to test for a condition where a complex transition is followed
+// by a static section. For example in slide shows where there is a fade
+// between slides. This is to help with more optimal kf and gf positioning.
+static int detect_transition_to_still(
+ VP9_COMP *cpi,
+ int frame_interval,
+ int still_interval,
+ double loop_decay_rate,
+ double last_decay_rate) {
+ int trans_to_still = 0;
+
+ // Break clause to detect very still sections after motion
+ // For example a static image after a fade or other transition
+ // instead of a clean scene cut.
+ if (frame_interval > MIN_GF_INTERVAL &&
+ loop_decay_rate >= 0.999 &&
+ last_decay_rate < 0.9) {
+ int j;
+ FIRSTPASS_STATS *position = cpi->twopass.stats_in;
+ FIRSTPASS_STATS tmp_next_frame;
+ double zz_inter;
+
+ // Look ahead a few frames to see if static condition
+ // persists...
+ for (j = 0; j < still_interval; j++) {
+ if (EOF == input_stats(cpi, &tmp_next_frame))
+ break;
+
+ zz_inter =
+ (tmp_next_frame.pcnt_inter - tmp_next_frame.pcnt_motion);
+ if (zz_inter < 0.999)
+ break;
+ }
+ // Reset file position
+ reset_fpf_position(cpi, position);
+
+ // Only if it does do we signal a transition to still
+ if (j == still_interval)
+ trans_to_still = 1;
+ }
+
+ return trans_to_still;
+}
+
+// This function detects a flash through the high relative pcnt_second_ref
+// score in the frame following a flash frame. The offset passed in should
+// reflect this
+static int detect_flash(VP9_COMP *cpi, int offset) {
+ FIRSTPASS_STATS next_frame;
+
+ int flash_detected = 0;
+
+ // Read the frame data.
+ // The return is FALSE (no flash detected) if not a valid frame
+ if (read_frame_stats(cpi, &next_frame, offset) != EOF) {
+ // What we are looking for here is a situation where there is a
+ // brief break in prediction (such as a flash) but subsequent frames
+ // are reasonably well predicted by an earlier (pre flash) frame.
+ // The recovery after a flash is indicated by a high pcnt_second_ref
+ // comapred to pcnt_inter.
+ if (next_frame.pcnt_second_ref > next_frame.pcnt_inter &&
+ next_frame.pcnt_second_ref >= 0.5)
+ flash_detected = 1;
+ }
+
+ return flash_detected;
+}
+
+// Update the motion related elements to the GF arf boost calculation
+static void accumulate_frame_motion_stats(
+ VP9_COMP *cpi,
+ FIRSTPASS_STATS *this_frame,
+ double *this_frame_mv_in_out,
+ double *mv_in_out_accumulator,
+ double *abs_mv_in_out_accumulator,
+ double *mv_ratio_accumulator) {
+ // double this_frame_mv_in_out;
+ double this_frame_mvr_ratio;
+ double this_frame_mvc_ratio;
+ double motion_pct;
+
+ // Accumulate motion stats.
+ motion_pct = this_frame->pcnt_motion;
+
+ // Accumulate Motion In/Out of frame stats
+ *this_frame_mv_in_out = this_frame->mv_in_out_count * motion_pct;
+ *mv_in_out_accumulator += this_frame->mv_in_out_count * motion_pct;
+ *abs_mv_in_out_accumulator +=
+ fabs(this_frame->mv_in_out_count * motion_pct);
+
+ // Accumulate a measure of how uniform (or conversely how random)
+ // the motion field is. (A ratio of absmv / mv)
+ if (motion_pct > 0.05) {
+ this_frame_mvr_ratio = fabs(this_frame->mvr_abs) /
+ DOUBLE_DIVIDE_CHECK(fabs(this_frame->MVr));
+
+ this_frame_mvc_ratio = fabs(this_frame->mvc_abs) /
+ DOUBLE_DIVIDE_CHECK(fabs(this_frame->MVc));
+
+ *mv_ratio_accumulator +=
+ (this_frame_mvr_ratio < this_frame->mvr_abs)
+ ? (this_frame_mvr_ratio * motion_pct)
+ : this_frame->mvr_abs * motion_pct;
+
+ *mv_ratio_accumulator +=
+ (this_frame_mvc_ratio < this_frame->mvc_abs)
+ ? (this_frame_mvc_ratio * motion_pct)
+ : this_frame->mvc_abs * motion_pct;
+
+ }
+}
+
+// Calculate a baseline boost number for the current frame.
+static double calc_frame_boost(
+ VP9_COMP *cpi,
+ FIRSTPASS_STATS *this_frame,
+ double this_frame_mv_in_out) {
+ double frame_boost;
+
+ // Underlying boost factor is based on inter intra error ratio
+ if (this_frame->intra_error > cpi->twopass.gf_intra_err_min)
+ frame_boost = (IIFACTOR * this_frame->intra_error /
+ DOUBLE_DIVIDE_CHECK(this_frame->coded_error));
+ else
+ frame_boost = (IIFACTOR * cpi->twopass.gf_intra_err_min /
+ DOUBLE_DIVIDE_CHECK(this_frame->coded_error));
+
+ // Increase boost for frames where new data coming into frame
+ // (eg zoom out). Slightly reduce boost if there is a net balance
+ // of motion out of the frame (zoom in).
+ // The range for this_frame_mv_in_out is -1.0 to +1.0
+ if (this_frame_mv_in_out > 0.0)
+ frame_boost += frame_boost * (this_frame_mv_in_out * 2.0);
+ // In extreme case boost is halved
+ else
+ frame_boost += frame_boost * (this_frame_mv_in_out / 2.0);
+
+ // Clip to maximum
+ if (frame_boost > GF_RMAX)
+ frame_boost = GF_RMAX;
+
+ return frame_boost;
+}
+
+static int calc_arf_boost(VP9_COMP *cpi, int offset,
+ int f_frames, int b_frames,
+ int *f_boost, int *b_boost) {
+ FIRSTPASS_STATS this_frame;
+
+ int i;
+ double boost_score = 0.0;
+ double mv_ratio_accumulator = 0.0;
+ double decay_accumulator = 1.0;
+ double this_frame_mv_in_out = 0.0;
+ double mv_in_out_accumulator = 0.0;
+ double abs_mv_in_out_accumulator = 0.0;
+ int arf_boost;
+ int flash_detected = 0;
+
+ // Search forward from the proposed arf/next gf position
+ for (i = 0; i < f_frames; i++) {
+ if (read_frame_stats(cpi, &this_frame, (i + offset)) == EOF)
+ break;
+
+ // Update the motion related elements to the boost calculation
+ accumulate_frame_motion_stats(cpi, &this_frame,
+ &this_frame_mv_in_out, &mv_in_out_accumulator,
+ &abs_mv_in_out_accumulator, &mv_ratio_accumulator);
+
+ // We want to discount the flash frame itself and the recovery
+ // frame that follows as both will have poor scores.
+ flash_detected = detect_flash(cpi, (i + offset)) ||
+ detect_flash(cpi, (i + offset + 1));
+
+ // Cumulative effect of prediction quality decay
+ if (!flash_detected) {
+ decay_accumulator *= get_prediction_decay_rate(cpi, &this_frame);
+ decay_accumulator = decay_accumulator < MIN_DECAY_FACTOR
+ ? MIN_DECAY_FACTOR : decay_accumulator;
+ }
+
+ boost_score += (decay_accumulator *
+ calc_frame_boost(cpi, &this_frame, this_frame_mv_in_out));
+ }
+
+ *f_boost = (int)boost_score;
+
+ // Reset for backward looking loop
+ boost_score = 0.0;
+ mv_ratio_accumulator = 0.0;
+ decay_accumulator = 1.0;
+ this_frame_mv_in_out = 0.0;
+ mv_in_out_accumulator = 0.0;
+ abs_mv_in_out_accumulator = 0.0;
+
+ // Search backward towards last gf position
+ for (i = -1; i >= -b_frames; i--) {
+ if (read_frame_stats(cpi, &this_frame, (i + offset)) == EOF)
+ break;
+
+ // Update the motion related elements to the boost calculation
+ accumulate_frame_motion_stats(cpi, &this_frame,
+ &this_frame_mv_in_out, &mv_in_out_accumulator,
+ &abs_mv_in_out_accumulator, &mv_ratio_accumulator);
+
+ // We want to discount the the flash frame itself and the recovery
+ // frame that follows as both will have poor scores.
+ flash_detected = detect_flash(cpi, (i + offset)) ||
+ detect_flash(cpi, (i + offset + 1));
+
+ // Cumulative effect of prediction quality decay
+ if (!flash_detected) {
+ decay_accumulator *= get_prediction_decay_rate(cpi, &this_frame);
+ decay_accumulator = decay_accumulator < MIN_DECAY_FACTOR
+ ? MIN_DECAY_FACTOR : decay_accumulator;
+ }
+
+ boost_score += (decay_accumulator *
+ calc_frame_boost(cpi, &this_frame, this_frame_mv_in_out));
+
+ }
+ *b_boost = (int)boost_score;
+
+ arf_boost = (*f_boost + *b_boost);
+ if (arf_boost < ((b_frames + f_frames) * 20))
+ arf_boost = ((b_frames + f_frames) * 20);
+
+ return arf_boost;
+}
+
+#if CONFIG_MULTIPLE_ARF
+// Work out the frame coding order for a GF or an ARF group.
+// The current implementation codes frames in their natural order for a
+// GF group, and inserts additional ARFs into an ARF group using a
+// binary split approach.
+// NOTE: this function is currently implemented recursively.
+static void schedule_frames(VP9_COMP *cpi, const int start, const int end,
+ const int arf_idx, const int gf_or_arf_group,
+ const int level) {
+ int i, abs_end, half_range;
+ int *cfo = cpi->frame_coding_order;
+ int idx = cpi->new_frame_coding_order_period;
+
+ // If (end < 0) an ARF should be coded at position (-end).
+ assert(start >= 0);
+
+ // printf("start:%d end:%d\n", start, end);
+
+ // GF Group: code frames in logical order.
+ if (gf_or_arf_group == 0) {
+ assert(end >= start);
+ for (i = start; i <= end; ++i) {
+ cfo[idx] = i;
+ cpi->arf_buffer_idx[idx] = arf_idx;
+ cpi->arf_weight[idx] = -1;
+ ++idx;
+ }
+ cpi->new_frame_coding_order_period = idx;
+ return;
+ }
+
+ // ARF Group: work out the ARF schedule.
+ // Mark ARF frames as negative.
+ if (end < 0) {
+ // printf("start:%d end:%d\n", -end, -end);
+ // ARF frame is at the end of the range.
+ cfo[idx] = end;
+ // What ARF buffer does this ARF use as predictor.
+ cpi->arf_buffer_idx[idx] = (arf_idx > 2) ? (arf_idx - 1) : 2;
+ cpi->arf_weight[idx] = level;
+ ++idx;
+ abs_end = -end;
+ } else {
+ abs_end = end;
+ }
+
+ half_range = (abs_end - start) >> 1;
+
+ // ARFs may not be adjacent, they must be separated by at least
+ // MIN_GF_INTERVAL non-ARF frames.
+ if ((start + MIN_GF_INTERVAL) >= (abs_end - MIN_GF_INTERVAL)) {
+ // printf("start:%d end:%d\n", start, abs_end);
+ // Update the coding order and active ARF.
+ for (i = start; i <= abs_end; ++i) {
+ cfo[idx] = i;
+ cpi->arf_buffer_idx[idx] = arf_idx;
+ cpi->arf_weight[idx] = -1;
+ ++idx;
+ }
+ cpi->new_frame_coding_order_period = idx;
+ } else {
+ // Place a new ARF at the mid-point of the range.
+ cpi->new_frame_coding_order_period = idx;
+ schedule_frames(cpi, start, -(start + half_range), arf_idx + 1,
+ gf_or_arf_group, level + 1);
+ schedule_frames(cpi, start + half_range + 1, abs_end, arf_idx,
+ gf_or_arf_group, level + 1);
+ }
+}
+
+#define FIXED_ARF_GROUP_SIZE 16
+
+void define_fixed_arf_period(VP9_COMP *cpi) {
+ int i;
+ int max_level = INT_MIN;
+
+ assert(cpi->multi_arf_enabled);
+ assert(cpi->oxcf.lag_in_frames >= FIXED_ARF_GROUP_SIZE);
+
+ // Save the weight of the last frame in the sequence before next
+ // sequence pattern overwrites it.
+ cpi->this_frame_weight = cpi->arf_weight[cpi->sequence_number];
+ assert(cpi->this_frame_weight >= 0);
+
+ // Initialize frame coding order variables.
+ cpi->new_frame_coding_order_period = 0;
+ cpi->next_frame_in_order = 0;
+ cpi->arf_buffered = 0;
+ vp9_zero(cpi->frame_coding_order);
+ vp9_zero(cpi->arf_buffer_idx);
+ vpx_memset(cpi->arf_weight, -1, sizeof(cpi->arf_weight));
+
+ if (cpi->twopass.frames_to_key <= (FIXED_ARF_GROUP_SIZE + 8)) {
+ // Setup a GF group close to the keyframe.
+ cpi->source_alt_ref_pending = 0;
+ cpi->baseline_gf_interval = cpi->twopass.frames_to_key;
+ schedule_frames(cpi, 0, (cpi->baseline_gf_interval - 1), 2, 0, 0);
+ } else {
+ // Setup a fixed period ARF group.
+ cpi->source_alt_ref_pending = 1;
+ cpi->baseline_gf_interval = FIXED_ARF_GROUP_SIZE;
+ schedule_frames(cpi, 0, -(cpi->baseline_gf_interval - 1), 2, 1, 0);
+ }
+
+ // Replace level indicator of -1 with correct level.
+ for (i = 0; i < cpi->new_frame_coding_order_period; ++i) {
+ if (cpi->arf_weight[i] > max_level) {
+ max_level = cpi->arf_weight[i];
+ }
+ }
+ ++max_level;
+ for (i = 0; i < cpi->new_frame_coding_order_period; ++i) {
+ if (cpi->arf_weight[i] == -1) {
+ cpi->arf_weight[i] = max_level;
+ }
+ }
+ cpi->max_arf_level = max_level;
+#if 0
+ printf("\nSchedule: ");
+ for (i = 0; i < cpi->new_frame_coding_order_period; ++i) {
+ printf("%4d ", cpi->frame_coding_order[i]);
+ }
+ printf("\n");
+ printf("ARFref: ");
+ for (i = 0; i < cpi->new_frame_coding_order_period; ++i) {
+ printf("%4d ", cpi->arf_buffer_idx[i]);
+ }
+ printf("\n");
+ printf("Weight: ");
+ for (i = 0; i < cpi->new_frame_coding_order_period; ++i) {
+ printf("%4d ", cpi->arf_weight[i]);
+ }
+ printf("\n");
+#endif
+}
+#endif
+
+// Analyse and define a gf/arf group.
+static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+ FIRSTPASS_STATS next_frame;
+ FIRSTPASS_STATS *start_pos;
+ int i;
+ double boost_score = 0.0;
+ double old_boost_score = 0.0;
+ double gf_group_err = 0.0;
+ double gf_first_frame_err = 0.0;
+ double mod_frame_err = 0.0;
+
+ double mv_ratio_accumulator = 0.0;
+ double decay_accumulator = 1.0;
+ double zero_motion_accumulator = 1.0;
+
+ double loop_decay_rate = 1.00; // Starting decay rate
+ double last_loop_decay_rate = 1.00;
+
+ double this_frame_mv_in_out = 0.0;
+ double mv_in_out_accumulator = 0.0;
+ double abs_mv_in_out_accumulator = 0.0;
+ double mv_ratio_accumulator_thresh;
+ int max_bits = frame_max_bits(cpi); // Max for a single frame
+
+ unsigned int allow_alt_ref =
+ cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames;
+
+ int f_boost = 0;
+ int b_boost = 0;
+ int flash_detected;
+ int active_max_gf_interval;
+
+ cpi->twopass.gf_group_bits = 0;
+
+ vp9_clear_system_state(); // __asm emms;
+
+ start_pos = cpi->twopass.stats_in;
+
+ vpx_memset(&next_frame, 0, sizeof(next_frame)); // assure clean
+
+ // Load stats for the current frame.
+ mod_frame_err = calculate_modified_err(cpi, this_frame);
+
+ // Note the error of the frame at the start of the group (this will be
+ // the GF frame error if we code a normal gf
+ gf_first_frame_err = mod_frame_err;
+
+ // Special treatment if the current frame is a key frame (which is also
+ // a gf). If it is then its error score (and hence bit allocation) need
+ // to be subtracted out from the calculation for the GF group
+ if (cpi->common.frame_type == KEY_FRAME)
+ gf_group_err -= gf_first_frame_err;
+
+ // Motion breakout threshold for loop below depends on image size.
+ mv_ratio_accumulator_thresh = (cpi->common.width + cpi->common.height) / 10.0;
+
+ // Work out a maximum interval for the GF.
+ // If the image appears completely static we can extend beyond this.
+ // The value chosen depends on the active Q range. At low Q we have
+ // bits to spare and are better with a smaller interval and smaller boost.
+ // At high Q when there are few bits to spare we are better with a longer
+ // interval to spread the cost of the GF.
+ active_max_gf_interval =
+ 12 + ((int)vp9_convert_qindex_to_q(cpi->active_worst_quality) >> 5);
+
+ if (active_max_gf_interval > cpi->max_gf_interval)
+ active_max_gf_interval = cpi->max_gf_interval;
+
+ i = 0;
+ while (((i < cpi->twopass.static_scene_max_gf_interval) ||
+ ((cpi->twopass.frames_to_key - i) < MIN_GF_INTERVAL)) &&
+ (i < cpi->twopass.frames_to_key)) {
+ i++; // Increment the loop counter
+
+ // Accumulate error score of frames in this gf group
+ mod_frame_err = calculate_modified_err(cpi, this_frame);
+ gf_group_err += mod_frame_err;
+
+ if (EOF == input_stats(cpi, &next_frame))
+ break;
+
+ // Test for the case where there is a brief flash but the prediction
+ // quality back to an earlier frame is then restored.
+ flash_detected = detect_flash(cpi, 0);
+
+ // Update the motion related elements to the boost calculation
+ accumulate_frame_motion_stats(cpi, &next_frame,
+ &this_frame_mv_in_out, &mv_in_out_accumulator,
+ &abs_mv_in_out_accumulator, &mv_ratio_accumulator);
+
+ // Cumulative effect of prediction quality decay
+ if (!flash_detected) {
+ last_loop_decay_rate = loop_decay_rate;
+ loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame);
+ decay_accumulator = decay_accumulator * loop_decay_rate;
+
+ // Monitor for static sections.
+ if ((next_frame.pcnt_inter - next_frame.pcnt_motion) <
+ zero_motion_accumulator) {
+ zero_motion_accumulator =
+ (next_frame.pcnt_inter - next_frame.pcnt_motion);
+ }
+
+ // Break clause to detect very still sections after motion
+ // (for example a static image after a fade or other transition).
+ if (detect_transition_to_still(cpi, i, 5, loop_decay_rate,
+ last_loop_decay_rate)) {
+ allow_alt_ref = 0;
+ break;
+ }
+ }
+
+ // Calculate a boost number for this frame
+ boost_score +=
+ (decay_accumulator *
+ calc_frame_boost(cpi, &next_frame, this_frame_mv_in_out));
+
+ // Break out conditions.
+ if (
+ // Break at cpi->max_gf_interval unless almost totally static
+ (i >= active_max_gf_interval && (zero_motion_accumulator < 0.995)) ||
+ (
+ // Don't break out with a very short interval
+ (i > MIN_GF_INTERVAL) &&
+ // Don't break out very close to a key frame
+ ((cpi->twopass.frames_to_key - i) >= MIN_GF_INTERVAL) &&
+ ((boost_score > 125.0) || (next_frame.pcnt_inter < 0.75)) &&
+ (!flash_detected) &&
+ ((mv_ratio_accumulator > mv_ratio_accumulator_thresh) ||
+ (abs_mv_in_out_accumulator > 3.0) ||
+ (mv_in_out_accumulator < -2.0) ||
+ ((boost_score - old_boost_score) < IIFACTOR))
+ )) {
+ boost_score = old_boost_score;
+ break;
+ }
+
+ *this_frame = next_frame;
+
+ old_boost_score = boost_score;
+ }
+
+ // Don't allow a gf too near the next kf
+ if ((cpi->twopass.frames_to_key - i) < MIN_GF_INTERVAL) {
+ while (i < cpi->twopass.frames_to_key) {
+ i++;
+
+ if (EOF == input_stats(cpi, this_frame))
+ break;
+
+ if (i < cpi->twopass.frames_to_key) {
+ mod_frame_err = calculate_modified_err(cpi, this_frame);
+ gf_group_err += mod_frame_err;
+ }
+ }
+ }
+
+ // Set the interval until the next gf or arf.
+ cpi->baseline_gf_interval = i;
+
+#if CONFIG_MULTIPLE_ARF
+ if (cpi->multi_arf_enabled) {
+ // Initialize frame coding order variables.
+ cpi->new_frame_coding_order_period = 0;
+ cpi->next_frame_in_order = 0;
+ cpi->arf_buffered = 0;
+ vp9_zero(cpi->frame_coding_order);
+ vp9_zero(cpi->arf_buffer_idx);
+ vpx_memset(cpi->arf_weight, -1, sizeof(cpi->arf_weight));
+ }
+#endif
+
+ // Should we use the alternate reference frame
+ if (allow_alt_ref &&
+ (i < cpi->oxcf.lag_in_frames) &&
+ (i >= MIN_GF_INTERVAL) &&
+ // dont use ARF very near next kf
+ (i <= (cpi->twopass.frames_to_key - MIN_GF_INTERVAL)) &&
+ ((next_frame.pcnt_inter > 0.75) ||
+ (next_frame.pcnt_second_ref > 0.5)) &&
+ ((mv_in_out_accumulator / (double)i > -0.2) ||
+ (mv_in_out_accumulator > -2.0)) &&
+ (boost_score > 100)) {
+ // Alternative boost calculation for alt ref
+ cpi->gfu_boost = calc_arf_boost(cpi, 0, (i - 1), (i - 1), &f_boost, &b_boost);
+ cpi->source_alt_ref_pending = 1;
+
+#if CONFIG_MULTIPLE_ARF
+ // Set the ARF schedule.
+ if (cpi->multi_arf_enabled) {
+ schedule_frames(cpi, 0, -(cpi->baseline_gf_interval - 1), 2, 1, 0);
+ }
+#endif
+ } else {
+ cpi->gfu_boost = (int)boost_score;
+ cpi->source_alt_ref_pending = 0;
+#if CONFIG_MULTIPLE_ARF
+ // Set the GF schedule.
+ if (cpi->multi_arf_enabled) {
+ schedule_frames(cpi, 0, cpi->baseline_gf_interval - 1, 2, 0, 0);
+ assert(cpi->new_frame_coding_order_period == cpi->baseline_gf_interval);
+ }
+#endif
+ }
+
+#if CONFIG_MULTIPLE_ARF
+ if (cpi->multi_arf_enabled && (cpi->common.frame_type != KEY_FRAME)) {
+ int max_level = INT_MIN;
+ // Replace level indicator of -1 with correct level.
+ for (i = 0; i < cpi->frame_coding_order_period; ++i) {
+ if (cpi->arf_weight[i] > max_level) {
+ max_level = cpi->arf_weight[i];
+ }
+ }
+ ++max_level;
+ for (i = 0; i < cpi->frame_coding_order_period; ++i) {
+ if (cpi->arf_weight[i] == -1) {
+ cpi->arf_weight[i] = max_level;
+ }
+ }
+ cpi->max_arf_level = max_level;
+ }
+#if 0
+ if (cpi->multi_arf_enabled) {
+ printf("\nSchedule: ");
+ for (i = 0; i < cpi->new_frame_coding_order_period; ++i) {
+ printf("%4d ", cpi->frame_coding_order[i]);
+ }
+ printf("\n");
+ printf("ARFref: ");
+ for (i = 0; i < cpi->new_frame_coding_order_period; ++i) {
+ printf("%4d ", cpi->arf_buffer_idx[i]);
+ }
+ printf("\n");
+ printf("Weight: ");
+ for (i = 0; i < cpi->new_frame_coding_order_period; ++i) {
+ printf("%4d ", cpi->arf_weight[i]);
+ }
+ printf("\n");
+ }
+#endif
+#endif
+
+ // Now decide how many bits should be allocated to the GF group as a
+ // proportion of those remaining in the kf group.
+ // The final key frame group in the clip is treated as a special case
+ // where cpi->twopass.kf_group_bits is tied to cpi->twopass.bits_left.
+ // This is also important for short clips where there may only be one
+ // key frame.
+ if (cpi->twopass.frames_to_key >= (int)(cpi->twopass.total_stats.count -
+ cpi->common.current_video_frame)) {
+ cpi->twopass.kf_group_bits =
+ (cpi->twopass.bits_left > 0) ? cpi->twopass.bits_left : 0;
+ }
+
+ // Calculate the bits to be allocated to the group as a whole
+ if ((cpi->twopass.kf_group_bits > 0) &&
+ (cpi->twopass.kf_group_error_left > 0)) {
+ cpi->twopass.gf_group_bits =
+ (int64_t)(cpi->twopass.kf_group_bits *
+ (gf_group_err / cpi->twopass.kf_group_error_left));
+ } else
+ cpi->twopass.gf_group_bits = 0;
+
+ cpi->twopass.gf_group_bits =
+ (cpi->twopass.gf_group_bits < 0)
+ ? 0
+ : (cpi->twopass.gf_group_bits > cpi->twopass.kf_group_bits)
+ ? cpi->twopass.kf_group_bits : cpi->twopass.gf_group_bits;
+
+ // Clip cpi->twopass.gf_group_bits based on user supplied data rate
+ // variability limit (cpi->oxcf.two_pass_vbrmax_section)
+ if (cpi->twopass.gf_group_bits >
+ (int64_t)max_bits * cpi->baseline_gf_interval)
+ cpi->twopass.gf_group_bits = (int64_t)max_bits * cpi->baseline_gf_interval;
+
+ // Reset the file position
+ reset_fpf_position(cpi, start_pos);
+
+ // Update the record of error used so far (only done once per gf group)
+ cpi->twopass.modified_error_used += gf_group_err;
+
+ // Assign bits to the arf or gf.
+ for (i = 0;
+ i <= (cpi->source_alt_ref_pending && cpi->common.frame_type != KEY_FRAME);
+ ++i) {
+ int allocation_chunks;
+ int q = cpi->oxcf.fixed_q < 0 ? cpi->last_q[INTER_FRAME]
+ : cpi->oxcf.fixed_q;
+ int gf_bits;
+
+ int boost = (cpi->gfu_boost * vp9_gfboost_qadjust(q)) / 100;
+
+ // Set max and minimum boost and hence minimum allocation
+ boost = clamp(boost, 125, (cpi->baseline_gf_interval + 1) * 200);
+
+ if (cpi->source_alt_ref_pending && i == 0)
+ allocation_chunks = ((cpi->baseline_gf_interval + 1) * 100) + boost;
+ else
+ allocation_chunks = (cpi->baseline_gf_interval * 100) + (boost - 100);
+
+ // Prevent overflow
+ if (boost > 1023) {
+ int divisor = boost >> 10;
+ boost /= divisor;
+ allocation_chunks /= divisor;
+ }
+
+ // Calculate the number of bits to be spent on the gf or arf based on
+ // the boost number
+ gf_bits = (int)((double)boost * (cpi->twopass.gf_group_bits /
+ (double)allocation_chunks));
+
+ // If the frame that is to be boosted is simpler than the average for
+ // the gf/arf group then use an alternative calculation
+ // based on the error score of the frame itself
+ if (mod_frame_err < gf_group_err / (double)cpi->baseline_gf_interval) {
+ double alt_gf_grp_bits =
+ (double)cpi->twopass.kf_group_bits *
+ (mod_frame_err * (double)cpi->baseline_gf_interval) /
+ DOUBLE_DIVIDE_CHECK(cpi->twopass.kf_group_error_left);
+
+ int alt_gf_bits = (int)((double)boost * (alt_gf_grp_bits /
+ (double)allocation_chunks));
+
+ if (gf_bits > alt_gf_bits)
+ gf_bits = alt_gf_bits;
+ }
+ // Else if it is harder than other frames in the group make sure it at
+ // least receives an allocation in keeping with its relative error
+ // score, otherwise it may be worse off than an "un-boosted" frame
+ else {
+ int alt_gf_bits = (int)((double)cpi->twopass.kf_group_bits *
+ mod_frame_err /
+ DOUBLE_DIVIDE_CHECK(cpi->twopass.kf_group_error_left));
+
+ if (alt_gf_bits > gf_bits)
+ gf_bits = alt_gf_bits;
+ }
+
+ // Dont allow a negative value for gf_bits
+ if (gf_bits < 0)
+ gf_bits = 0;
+
+ // Add in minimum for a frame
+ gf_bits += cpi->min_frame_bandwidth;
+
+ if (i == 0) {
+ cpi->twopass.gf_bits = gf_bits;
+ }
+ if (i == 1 || (!cpi->source_alt_ref_pending
+ && (cpi->common.frame_type != KEY_FRAME))) {
+ // Per frame bit target for this frame
+ cpi->per_frame_bandwidth = gf_bits;
+ }
+ }
+
+ {
+ // Adjust KF group bits and error remaining
+ cpi->twopass.kf_group_error_left -= (int64_t)gf_group_err;
+ cpi->twopass.kf_group_bits -= cpi->twopass.gf_group_bits;
+
+ if (cpi->twopass.kf_group_bits < 0)
+ cpi->twopass.kf_group_bits = 0;
+
+ // Note the error score left in the remaining frames of the group.
+ // For normal GFs we want to remove the error score for the first frame
+ // of the group (except in Key frame case where this has already
+ // happened)
+ if (!cpi->source_alt_ref_pending && cpi->common.frame_type != KEY_FRAME)
+ cpi->twopass.gf_group_error_left = (int64_t)(gf_group_err
+ - gf_first_frame_err);
+ else
+ cpi->twopass.gf_group_error_left = (int64_t)gf_group_err;
+
+ cpi->twopass.gf_group_bits -= cpi->twopass.gf_bits
+ - cpi->min_frame_bandwidth;
+
+ if (cpi->twopass.gf_group_bits < 0)
+ cpi->twopass.gf_group_bits = 0;
+
+ // This condition could fail if there are two kfs very close together
+ // despite (MIN_GF_INTERVAL) and would cause a divide by 0 in the
+ // calculation of alt_extra_bits.
+ if (cpi->baseline_gf_interval >= 3) {
+ const int boost = cpi->source_alt_ref_pending ? b_boost : cpi->gfu_boost;
+
+ if (boost >= 150) {
+ int alt_extra_bits;
+ int pct_extra = (boost - 100) / 50;
+ pct_extra = (pct_extra > 20) ? 20 : pct_extra;
+
+ alt_extra_bits = (int)((cpi->twopass.gf_group_bits * pct_extra) / 100);
+ cpi->twopass.gf_group_bits -= alt_extra_bits;
+ }
+ }
+ }
+
+ if (cpi->common.frame_type != KEY_FRAME) {
+ FIRSTPASS_STATS sectionstats;
+
+ zero_stats(&sectionstats);
+ reset_fpf_position(cpi, start_pos);
+
+ for (i = 0; i < cpi->baseline_gf_interval; i++) {
+ input_stats(cpi, &next_frame);
+ accumulate_stats(&sectionstats, &next_frame);
+ }
+
+ avg_stats(&sectionstats);
+
+ cpi->twopass.section_intra_rating = (int)
+ (sectionstats.intra_error /
+ DOUBLE_DIVIDE_CHECK(sectionstats.coded_error));
+
+ reset_fpf_position(cpi, start_pos);
+ }
+}
+
+// Allocate bits to a normal frame that is neither a gf an arf or a key frame.
+static void assign_std_frame_bits(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+ int target_frame_size;
+
+ double modified_err;
+ double err_fraction;
+
+ // Max for a single frame.
+ int max_bits = frame_max_bits(cpi);
+
+ // Calculate modified prediction error used in bit allocation.
+ modified_err = calculate_modified_err(cpi, this_frame);
+
+ if (cpi->twopass.gf_group_error_left > 0)
+ // What portion of the remaining GF group error is used by this frame.
+ err_fraction = modified_err / cpi->twopass.gf_group_error_left;
+ else
+ err_fraction = 0.0;
+
+ // How many of those bits available for allocation should we give it?
+ target_frame_size = (int)((double)cpi->twopass.gf_group_bits * err_fraction);
+
+ // Clip target size to 0 - max_bits (or cpi->twopass.gf_group_bits) at
+ // the top end.
+ if (target_frame_size < 0)
+ target_frame_size = 0;
+ else {
+ if (target_frame_size > max_bits)
+ target_frame_size = max_bits;
+
+ if (target_frame_size > cpi->twopass.gf_group_bits)
+ target_frame_size = (int)cpi->twopass.gf_group_bits;
+ }
+
+ // Adjust error and bits remaining.
+ cpi->twopass.gf_group_error_left -= (int64_t)modified_err;
+ cpi->twopass.gf_group_bits -= target_frame_size;
+
+ if (cpi->twopass.gf_group_bits < 0)
+ cpi->twopass.gf_group_bits = 0;
+
+ // Add in the minimum number of bits that is set aside for every frame.
+ target_frame_size += cpi->min_frame_bandwidth;
+
+ // Per frame bit target for this frame.
+ cpi->per_frame_bandwidth = target_frame_size;
+}
+
+// Make a damped adjustment to the active max q.
+static int adjust_active_maxq(int old_maxqi, int new_maxqi) {
+ int i;
+ const double old_q = vp9_convert_qindex_to_q(old_maxqi);
+ const double new_q = vp9_convert_qindex_to_q(new_maxqi);
+ const double target_q = ((old_q * 7.0) + new_q) / 8.0;
+
+ if (target_q > old_q) {
+ for (i = old_maxqi; i <= new_maxqi; i++)
+ if (vp9_convert_qindex_to_q(i) >= target_q)
+ return i;
+ } else {
+ for (i = old_maxqi; i >= new_maxqi; i--)
+ if (vp9_convert_qindex_to_q(i) <= target_q)
+ return i;
+ }
+
+ return new_maxqi;
+}
+
+void vp9_second_pass(VP9_COMP *cpi) {
+ int tmp_q;
+ int frames_left = (int)(cpi->twopass.total_stats.count -
+ cpi->common.current_video_frame);
+
+ FIRSTPASS_STATS this_frame;
+ FIRSTPASS_STATS this_frame_copy;
+
+ double this_frame_intra_error;
+ double this_frame_coded_error;
+
+ if (!cpi->twopass.stats_in)
+ return;
+
+ vp9_clear_system_state();
+
+ // Special case code for first frame.
+ if (cpi->common.current_video_frame == 0) {
+ cpi->twopass.est_max_qcorrection_factor = 1.0;
+
+ // Set a cq_level in constrained quality mode.
+ if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
+ int est_cq = estimate_cq(cpi, &cpi->twopass.total_left_stats,
+ (int)(cpi->twopass.bits_left / frames_left));
+
+ cpi->cq_target_quality = cpi->oxcf.cq_level;
+ if (est_cq > cpi->cq_target_quality)
+ cpi->cq_target_quality = est_cq;
+ }
+
+ // guess at maxq needed in 2nd pass
+ cpi->twopass.maxq_max_limit = cpi->worst_quality;
+ cpi->twopass.maxq_min_limit = cpi->best_quality;
+
+ tmp_q = estimate_max_q(cpi, &cpi->twopass.total_left_stats,
+ (int)(cpi->twopass.bits_left / frames_left));
+
+ cpi->active_worst_quality = tmp_q;
+ cpi->ni_av_qi = tmp_q;
+ cpi->avg_q = vp9_convert_qindex_to_q(tmp_q);
+
+#ifndef ONE_SHOT_Q_ESTIMATE
+ // Limit the maxq value returned subsequently.
+ // This increases the risk of overspend or underspend if the initial
+ // estimate for the clip is bad, but helps prevent excessive
+ // variation in Q, especially near the end of a clip
+ // where for example a small overspend may cause Q to crash
+ adjust_maxq_qrange(cpi);
+#endif
+ }
+
+#ifndef ONE_SHOT_Q_ESTIMATE
+ // The last few frames of a clip almost always have to few or too many
+ // bits and for the sake of over exact rate control we dont want to make
+ // radical adjustments to the allowed quantizer range just to use up a
+ // few surplus bits or get beneath the target rate.
+ else if ((cpi->common.current_video_frame <
+ (((unsigned int)cpi->twopass.total_stats.count * 255) >> 8)) &&
+ ((cpi->common.current_video_frame + cpi->baseline_gf_interval) <
+ (unsigned int)cpi->twopass.total_stats.count)) {
+ if (frames_left < 1)
+ frames_left = 1;
+
+ tmp_q = estimate_max_q(
+ cpi,
+ &cpi->twopass.total_left_stats,
+ (int)(cpi->twopass.bits_left / frames_left));
+
+ // Make a damped adjustment to active max Q
+ cpi->active_worst_quality =
+ adjust_active_maxq(cpi->active_worst_quality, tmp_q);
+ }
+#endif
+
+ vpx_memset(&this_frame, 0, sizeof(FIRSTPASS_STATS));
+ if (EOF == input_stats(cpi, &this_frame))
+ return;
+
+ this_frame_intra_error = this_frame.intra_error;
+ this_frame_coded_error = this_frame.coded_error;
+
+ // keyframe and section processing !
+ if (cpi->twopass.frames_to_key == 0) {
+ // Define next KF group and assign bits to it
+ this_frame_copy = this_frame;
+ find_next_key_frame(cpi, &this_frame_copy);
+ }
+
+ // Is this a GF / ARF (Note that a KF is always also a GF)
+ if (cpi->frames_till_gf_update_due == 0) {
+ // Define next gf group and assign bits to it
+ this_frame_copy = this_frame;
+
+#if CONFIG_MULTIPLE_ARF
+ if (cpi->multi_arf_enabled) {
+ define_fixed_arf_period(cpi);
+ } else {
+#endif
+ define_gf_group(cpi, &this_frame_copy);
+#if CONFIG_MULTIPLE_ARF
+ }
+#endif
+
+ // If we are going to code an altref frame at the end of the group
+ // and the current frame is not a key frame....
+ // If the previous group used an arf this frame has already benefited
+ // from that arf boost and it should not be given extra bits
+ // If the previous group was NOT coded using arf we may want to apply
+ // some boost to this GF as well
+ if (cpi->source_alt_ref_pending && (cpi->common.frame_type != KEY_FRAME)) {
+ // Assign a standard frames worth of bits from those allocated
+ // to the GF group
+ int bak = cpi->per_frame_bandwidth;
+ this_frame_copy = this_frame;
+ assign_std_frame_bits(cpi, &this_frame_copy);
+ cpi->per_frame_bandwidth = bak;
+ }
+ } else {
+ // Otherwise this is an ordinary frame
+ // Assign bits from those allocated to the GF group
+ this_frame_copy = this_frame;
+ assign_std_frame_bits(cpi, &this_frame_copy);
+ }
+
+ // Keep a globally available copy of this and the next frame's iiratio.
+ cpi->twopass.this_iiratio = (int)(this_frame_intra_error /
+ DOUBLE_DIVIDE_CHECK(this_frame_coded_error));
+ {
+ FIRSTPASS_STATS next_frame;
+ if (lookup_next_frame_stats(cpi, &next_frame) != EOF) {
+ cpi->twopass.next_iiratio = (int)(next_frame.intra_error /
+ DOUBLE_DIVIDE_CHECK(next_frame.coded_error));
+ }
+ }
+
+ // Set nominal per second bandwidth for this frame
+ cpi->target_bandwidth = (int)(cpi->per_frame_bandwidth
+ * cpi->output_frame_rate);
+ if (cpi->target_bandwidth < 0)
+ cpi->target_bandwidth = 0;
+
+ cpi->twopass.frames_to_key--;
+
+ // Update the total stats remaining structure
+ subtract_stats(&cpi->twopass.total_left_stats, &this_frame);
+}
+
+static int test_candidate_kf(VP9_COMP *cpi,
+ FIRSTPASS_STATS *last_frame,
+ FIRSTPASS_STATS *this_frame,
+ FIRSTPASS_STATS *next_frame) {
+ int is_viable_kf = 0;
+
+ // Does the frame satisfy the primary criteria of a key frame
+ // If so, then examine how well it predicts subsequent frames
+ if ((this_frame->pcnt_second_ref < 0.10) &&
+ (next_frame->pcnt_second_ref < 0.10) &&
+ ((this_frame->pcnt_inter < 0.05) ||
+ (
+ ((this_frame->pcnt_inter - this_frame->pcnt_neutral) < .35) &&
+ ((this_frame->intra_error / DOUBLE_DIVIDE_CHECK(this_frame->coded_error)) < 2.5) &&
+ ((fabs(last_frame->coded_error - this_frame->coded_error) / DOUBLE_DIVIDE_CHECK(this_frame->coded_error) > .40) ||
+ (fabs(last_frame->intra_error - this_frame->intra_error) / DOUBLE_DIVIDE_CHECK(this_frame->intra_error) > .40) ||
+ ((next_frame->intra_error / DOUBLE_DIVIDE_CHECK(next_frame->coded_error)) > 3.5)
+ )
+ )
+ )
+ ) {
+ int i;
+ FIRSTPASS_STATS *start_pos;
+
+ FIRSTPASS_STATS local_next_frame;
+
+ double boost_score = 0.0;
+ double old_boost_score = 0.0;
+ double decay_accumulator = 1.0;
+ double next_iiratio;
+
+ local_next_frame = *next_frame;
+
+ // Note the starting file position so we can reset to it
+ start_pos = cpi->twopass.stats_in;
+
+ // Examine how well the key frame predicts subsequent frames
+ for (i = 0; i < 16; i++) {
+ next_iiratio = (IIKFACTOR1 * local_next_frame.intra_error / DOUBLE_DIVIDE_CHECK(local_next_frame.coded_error));
+
+ if (next_iiratio > RMAX)
+ next_iiratio = RMAX;
+
+ // Cumulative effect of decay in prediction quality
+ if (local_next_frame.pcnt_inter > 0.85)
+ decay_accumulator = decay_accumulator * local_next_frame.pcnt_inter;
+ else
+ decay_accumulator = decay_accumulator * ((0.85 + local_next_frame.pcnt_inter) / 2.0);
+
+ // decay_accumulator = decay_accumulator * local_next_frame.pcnt_inter;
+
+ // Keep a running total
+ boost_score += (decay_accumulator * next_iiratio);
+
+ // Test various breakout clauses
+ if ((local_next_frame.pcnt_inter < 0.05) ||
+ (next_iiratio < 1.5) ||
+ (((local_next_frame.pcnt_inter -
+ local_next_frame.pcnt_neutral) < 0.20) &&
+ (next_iiratio < 3.0)) ||
+ ((boost_score - old_boost_score) < 3.0) ||
+ (local_next_frame.intra_error < 200)
+ ) {
+ break;
+ }
+
+ old_boost_score = boost_score;
+
+ // Get the next frame details
+ if (EOF == input_stats(cpi, &local_next_frame))
+ break;
+ }
+
+ // If there is tolerable prediction for at least the next 3 frames then
+ // break out else discard this potential key frame and move on
+ if (boost_score > 30.0 && (i > 3))
+ is_viable_kf = 1;
+ else {
+ // Reset the file position
+ reset_fpf_position(cpi, start_pos);
+
+ is_viable_kf = 0;
+ }
+ }
+
+ return is_viable_kf;
+}
+static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+ int i, j;
+ FIRSTPASS_STATS last_frame;
+ FIRSTPASS_STATS first_frame;
+ FIRSTPASS_STATS next_frame;
+ FIRSTPASS_STATS *start_position;
+
+ double decay_accumulator = 1.0;
+ double zero_motion_accumulator = 1.0;
+ double boost_score = 0;
+ double loop_decay_rate;
+
+ double kf_mod_err = 0.0;
+ double kf_group_err = 0.0;
+ double kf_group_intra_err = 0.0;
+ double kf_group_coded_err = 0.0;
+ double recent_loop_decay[8] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
+
+ vpx_memset(&next_frame, 0, sizeof(next_frame)); // assure clean
+
+ vp9_clear_system_state(); // __asm emms;
+ start_position = cpi->twopass.stats_in;
+
+ cpi->common.frame_type = KEY_FRAME;
+
+ // is this a forced key frame by interval
+ cpi->this_key_frame_forced = cpi->next_key_frame_forced;
+
+ // Clear the alt ref active flag as this can never be active on a key frame
+ cpi->source_alt_ref_active = 0;
+
+ // Kf is always a gf so clear frames till next gf counter
+ cpi->frames_till_gf_update_due = 0;
+
+ cpi->twopass.frames_to_key = 1;
+
+ // Take a copy of the initial frame details
+ first_frame = *this_frame;
+
+ cpi->twopass.kf_group_bits = 0; // Total bits available to kf group
+ cpi->twopass.kf_group_error_left = 0; // Group modified error score.
+
+ kf_mod_err = calculate_modified_err(cpi, this_frame);
+
+ // find the next keyframe
+ i = 0;
+ while (cpi->twopass.stats_in < cpi->twopass.stats_in_end) {
+ // Accumulate kf group error
+ kf_group_err += calculate_modified_err(cpi, this_frame);
+
+ // These figures keep intra and coded error counts for all frames including key frames in the group.
+ // The effect of the key frame itself can be subtracted out using the first_frame data collected above
+ kf_group_intra_err += this_frame->intra_error;
+ kf_group_coded_err += this_frame->coded_error;
+
+ // load a the next frame's stats
+ last_frame = *this_frame;
+ input_stats(cpi, this_frame);
+
+ // Provided that we are not at the end of the file...
+ if (cpi->oxcf.auto_key
+ && lookup_next_frame_stats(cpi, &next_frame) != EOF) {
+ // Normal scene cut check
+ if (test_candidate_kf(cpi, &last_frame, this_frame, &next_frame))
+ break;
+
+
+ // How fast is prediction quality decaying
+ loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame);
+
+ // We want to know something about the recent past... rather than
+ // as used elsewhere where we are concened with decay in prediction
+ // quality since the last GF or KF.
+ recent_loop_decay[i % 8] = loop_decay_rate;
+ decay_accumulator = 1.0;
+ for (j = 0; j < 8; j++)
+ decay_accumulator *= recent_loop_decay[j];
+
+ // Special check for transition or high motion followed by a
+ // to a static scene.
+ if (detect_transition_to_still(cpi, i, cpi->key_frame_frequency - i,
+ loop_decay_rate, decay_accumulator))
+ break;
+
+ // Step on to the next frame
+ cpi->twopass.frames_to_key++;
+
+ // If we don't have a real key frame within the next two
+ // forcekeyframeevery intervals then break out of the loop.
+ if (cpi->twopass.frames_to_key >= 2 * (int)cpi->key_frame_frequency)
+ break;
+ } else
+ cpi->twopass.frames_to_key++;
+
+ i++;
+ }
+
+ // If there is a max kf interval set by the user we must obey it.
+ // We already breakout of the loop above at 2x max.
+ // This code centers the extra kf if the actual natural
+ // interval is between 1x and 2x
+ if (cpi->oxcf.auto_key
+ && cpi->twopass.frames_to_key > (int)cpi->key_frame_frequency) {
+ FIRSTPASS_STATS *current_pos = cpi->twopass.stats_in;
+ FIRSTPASS_STATS tmp_frame;
+
+ cpi->twopass.frames_to_key /= 2;
+
+ // Copy first frame details
+ tmp_frame = first_frame;
+
+ // Reset to the start of the group
+ reset_fpf_position(cpi, start_position);
+
+ kf_group_err = 0;
+ kf_group_intra_err = 0;
+ kf_group_coded_err = 0;
+
+ // Rescan to get the correct error data for the forced kf group
+ for (i = 0; i < cpi->twopass.frames_to_key; i++) {
+ // Accumulate kf group errors
+ kf_group_err += calculate_modified_err(cpi, &tmp_frame);
+ kf_group_intra_err += tmp_frame.intra_error;
+ kf_group_coded_err += tmp_frame.coded_error;
+
+ // Load a the next frame's stats
+ input_stats(cpi, &tmp_frame);
+ }
+
+ // Reset to the start of the group
+ reset_fpf_position(cpi, current_pos);
+
+ cpi->next_key_frame_forced = 1;
+ } else
+ cpi->next_key_frame_forced = 0;
+
+ // Special case for the last frame of the file
+ if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end) {
+ // Accumulate kf group error
+ kf_group_err += calculate_modified_err(cpi, this_frame);
+
+ // These figures keep intra and coded error counts for all frames including key frames in the group.
+ // The effect of the key frame itself can be subtracted out using the first_frame data collected above
+ kf_group_intra_err += this_frame->intra_error;
+ kf_group_coded_err += this_frame->coded_error;
+ }
+
+ // Calculate the number of bits that should be assigned to the kf group.
+ if ((cpi->twopass.bits_left > 0) && (cpi->twopass.modified_error_left > 0.0)) {
+ // Max for a single normal frame (not key frame)
+ int max_bits = frame_max_bits(cpi);
+
+ // Maximum bits for the kf group
+ int64_t max_grp_bits;
+
+ // Default allocation based on bits left and relative
+ // complexity of the section
+ cpi->twopass.kf_group_bits = (int64_t)(cpi->twopass.bits_left *
+ (kf_group_err /
+ cpi->twopass.modified_error_left));
+
+ // Clip based on maximum per frame rate defined by the user.
+ max_grp_bits = (int64_t)max_bits * (int64_t)cpi->twopass.frames_to_key;
+ if (cpi->twopass.kf_group_bits > max_grp_bits)
+ cpi->twopass.kf_group_bits = max_grp_bits;
+ } else
+ cpi->twopass.kf_group_bits = 0;
+
+ // Reset the first pass file position
+ reset_fpf_position(cpi, start_position);
+
+ // determine how big to make this keyframe based on how well the subsequent frames use inter blocks
+ decay_accumulator = 1.0;
+ boost_score = 0.0;
+ loop_decay_rate = 1.00; // Starting decay rate
+
+ // Scan through the kf group collating various stats.
+ for (i = 0; i < cpi->twopass.frames_to_key; i++) {
+ double r;
+
+ if (EOF == input_stats(cpi, &next_frame))
+ break;
+
+ // Monitor for static sections.
+ if ((next_frame.pcnt_inter - next_frame.pcnt_motion) <
+ zero_motion_accumulator) {
+ zero_motion_accumulator =
+ (next_frame.pcnt_inter - next_frame.pcnt_motion);
+ }
+
+ // For the first few frames collect data to decide kf boost.
+ if (i <= (cpi->max_gf_interval * 2)) {
+ if (next_frame.intra_error > cpi->twopass.kf_intra_err_min)
+ r = (IIKFACTOR2 * next_frame.intra_error /
+ DOUBLE_DIVIDE_CHECK(next_frame.coded_error));
+ else
+ r = (IIKFACTOR2 * cpi->twopass.kf_intra_err_min /
+ DOUBLE_DIVIDE_CHECK(next_frame.coded_error));
+
+ if (r > RMAX)
+ r = RMAX;
+
+ // How fast is prediction quality decaying
+ if (!detect_flash(cpi, 0)) {
+ loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame);
+ decay_accumulator = decay_accumulator * loop_decay_rate;
+ decay_accumulator = decay_accumulator < MIN_DECAY_FACTOR
+ ? MIN_DECAY_FACTOR : decay_accumulator;
+ }
+
+ boost_score += (decay_accumulator * r);
+ }
+ }
+
+ {
+ FIRSTPASS_STATS sectionstats;
+
+ zero_stats(&sectionstats);
+ reset_fpf_position(cpi, start_position);
+
+ for (i = 0; i < cpi->twopass.frames_to_key; i++) {
+ input_stats(cpi, &next_frame);
+ accumulate_stats(&sectionstats, &next_frame);
+ }
+
+ avg_stats(&sectionstats);
+
+ cpi->twopass.section_intra_rating = (int)
+ (sectionstats.intra_error
+ / DOUBLE_DIVIDE_CHECK(sectionstats.coded_error));
+ }
+
+ // Reset the first pass file position
+ reset_fpf_position(cpi, start_position);
+
+ // Work out how many bits to allocate for the key frame itself
+ if (1) {
+ int kf_boost = (int)boost_score;
+ int allocation_chunks;
+ int alt_kf_bits;
+
+ if (kf_boost < (cpi->twopass.frames_to_key * 3))
+ kf_boost = (cpi->twopass.frames_to_key * 3);
+
+ if (kf_boost < 300) // Min KF boost
+ kf_boost = 300;
+
+ // Make a note of baseline boost and the zero motion
+ // accumulator value for use elsewhere.
+ cpi->kf_boost = kf_boost;
+ cpi->kf_zeromotion_pct = (int)(zero_motion_accumulator * 100.0);
+
+ // We do three calculations for kf size.
+ // The first is based on the error score for the whole kf group.
+ // The second (optionaly) on the key frames own error if this is
+ // smaller than the average for the group.
+ // The final one insures that the frame receives at least the
+ // allocation it would have received based on its own error score vs
+ // the error score remaining
+ // Special case if the sequence appears almost totaly static
+ // In this case we want to spend almost all of the bits on the
+ // key frame.
+ // cpi->twopass.frames_to_key-1 because key frame itself is taken
+ // care of by kf_boost.
+ if (zero_motion_accumulator >= 0.99) {
+ allocation_chunks =
+ ((cpi->twopass.frames_to_key - 1) * 10) + kf_boost;
+ } else {
+ allocation_chunks =
+ ((cpi->twopass.frames_to_key - 1) * 100) + kf_boost;
+ }
+
+ // Prevent overflow
+ if (kf_boost > 1028) {
+ int divisor = kf_boost >> 10;
+ kf_boost /= divisor;
+ allocation_chunks /= divisor;
+ }
+
+ cpi->twopass.kf_group_bits = (cpi->twopass.kf_group_bits < 0) ? 0 : cpi->twopass.kf_group_bits;
+
+ // Calculate the number of bits to be spent on the key frame
+ cpi->twopass.kf_bits = (int)((double)kf_boost * ((double)cpi->twopass.kf_group_bits / (double)allocation_chunks));
+
+ // If the key frame is actually easier than the average for the
+ // kf group (which does sometimes happen... eg a blank intro frame)
+ // Then use an alternate calculation based on the kf error score
+ // which should give a smaller key frame.
+ if (kf_mod_err < kf_group_err / cpi->twopass.frames_to_key) {
+ double alt_kf_grp_bits =
+ ((double)cpi->twopass.bits_left *
+ (kf_mod_err * (double)cpi->twopass.frames_to_key) /
+ DOUBLE_DIVIDE_CHECK(cpi->twopass.modified_error_left));
+
+ alt_kf_bits = (int)((double)kf_boost *
+ (alt_kf_grp_bits / (double)allocation_chunks));
+
+ if (cpi->twopass.kf_bits > alt_kf_bits) {
+ cpi->twopass.kf_bits = alt_kf_bits;
+ }
+ }
+ // Else if it is much harder than other frames in the group make sure
+ // it at least receives an allocation in keeping with its relative
+ // error score
+ else {
+ alt_kf_bits =
+ (int)((double)cpi->twopass.bits_left *
+ (kf_mod_err /
+ DOUBLE_DIVIDE_CHECK(cpi->twopass.modified_error_left)));
+
+ if (alt_kf_bits > cpi->twopass.kf_bits) {
+ cpi->twopass.kf_bits = alt_kf_bits;
+ }
+ }
+
+ cpi->twopass.kf_group_bits -= cpi->twopass.kf_bits;
+ // Add in the minimum frame allowance
+ cpi->twopass.kf_bits += cpi->min_frame_bandwidth;
+
+ // Peer frame bit target for this frame
+ cpi->per_frame_bandwidth = cpi->twopass.kf_bits;
+ // Convert to a per second bitrate
+ cpi->target_bandwidth = (int)(cpi->twopass.kf_bits *
+ cpi->output_frame_rate);
+ }
+
+ // Note the total error score of the kf group minus the key frame itself
+ cpi->twopass.kf_group_error_left = (int)(kf_group_err - kf_mod_err);
+
+ // Adjust the count of total modified error left.
+ // The count of bits left is adjusted elsewhere based on real coded frame sizes
+ cpi->twopass.modified_error_left -= kf_group_err;
+}
diff --git a/libvpx/vp9/encoder/vp9_firstpass.h b/libvpx/vp9/encoder/vp9_firstpass.h
new file mode 100644
index 0000000..2296a66
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_firstpass.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_ENCODER_VP9_FIRSTPASS_H_
+#define VP9_ENCODER_VP9_FIRSTPASS_H_
+
+void vp9_init_first_pass(VP9_COMP *cpi);
+void vp9_first_pass(VP9_COMP *cpi);
+void vp9_end_first_pass(VP9_COMP *cpi);
+
+void vp9_init_second_pass(VP9_COMP *cpi);
+void vp9_second_pass(VP9_COMP *cpi);
+void vp9_end_second_pass(VP9_COMP *cpi);
+
+#endif // VP9_ENCODER_VP9_FIRSTPASS_H_
diff --git a/libvpx/vp9/encoder/vp9_lookahead.c b/libvpx/vp9/encoder/vp9_lookahead.c
new file mode 100644
index 0000000..b07d92a
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_lookahead.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <assert.h>
+#include <stdlib.h>
+
+#include "vpx_config.h"
+#include "vp9/common/vp9_common.h"
+#include "vp9/encoder/vp9_lookahead.h"
+#include "vp9/common/vp9_extend.h"
+
+#define MAX_LAG_BUFFERS 25
+
+struct lookahead_ctx {
+ unsigned int max_sz; /* Absolute size of the queue */
+ unsigned int sz; /* Number of buffers currently in the queue */
+ unsigned int read_idx; /* Read index */
+ unsigned int write_idx; /* Write index */
+ struct lookahead_entry *buf; /* Buffer list */
+};
+
+
+/* Return the buffer at the given absolute index and increment the index */
+static struct lookahead_entry * pop(struct lookahead_ctx *ctx,
+ unsigned int *idx) {
+ unsigned int index = *idx;
+ struct lookahead_entry *buf = ctx->buf + index;
+
+ assert(index < ctx->max_sz);
+ if (++index >= ctx->max_sz)
+ index -= ctx->max_sz;
+ *idx = index;
+ return buf;
+}
+
+
+void vp9_lookahead_destroy(struct lookahead_ctx *ctx) {
+ if (ctx) {
+ if (ctx->buf) {
+ unsigned int i;
+
+ for (i = 0; i < ctx->max_sz; i++)
+ vp9_free_frame_buffer(&ctx->buf[i].img);
+ free(ctx->buf);
+ }
+ free(ctx);
+ }
+}
+
+
+struct lookahead_ctx * vp9_lookahead_init(unsigned int width,
+ unsigned int height,
+ unsigned int subsampling_x,
+ unsigned int subsampling_y,
+ unsigned int depth) {
+ struct lookahead_ctx *ctx = NULL;
+
+ // Clamp the lookahead queue depth
+ depth = clamp(depth, 1, MAX_LAG_BUFFERS);
+
+ // Allocate the lookahead structures
+ ctx = calloc(1, sizeof(*ctx));
+ if (ctx) {
+ unsigned int i;
+ ctx->max_sz = depth;
+ ctx->buf = calloc(depth, sizeof(*ctx->buf));
+ if (!ctx->buf)
+ goto bail;
+ for (i = 0; i < depth; i++)
+ if (vp9_alloc_frame_buffer(&ctx->buf[i].img,
+ width, height, subsampling_x, subsampling_y,
+ VP9BORDERINPIXELS))
+ goto bail;
+ }
+ return ctx;
+bail:
+ vp9_lookahead_destroy(ctx);
+ return NULL;
+}
+
+#define USE_PARTIAL_COPY 0
+
+int vp9_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
+ int64_t ts_start, int64_t ts_end, unsigned int flags,
+ unsigned char *active_map) {
+ struct lookahead_entry *buf;
+#if USE_PARTIAL_COPY
+ int row, col, active_end;
+ int mb_rows = (src->y_height + 15) >> 4;
+ int mb_cols = (src->y_width + 15) >> 4;
+#endif
+
+ if (ctx->sz + 1 > ctx->max_sz)
+ return 1;
+ ctx->sz++;
+ buf = pop(ctx, &ctx->write_idx);
+
+#if USE_PARTIAL_COPY
+ // TODO(jkoleszar): This is disabled for now, as
+ // vp9_copy_and_extend_frame_with_rect is not subsampling/alpha aware.
+
+ // Only do this partial copy if the following conditions are all met:
+ // 1. Lookahead queue has has size of 1.
+ // 2. Active map is provided.
+ // 3. This is not a key frame, golden nor altref frame.
+ if (ctx->max_sz == 1 && active_map && !flags) {
+ for (row = 0; row < mb_rows; ++row) {
+ col = 0;
+
+ while (1) {
+ // Find the first active macroblock in this row.
+ for (; col < mb_cols; ++col) {
+ if (active_map[col])
+ break;
+ }
+
+ // No more active macroblock in this row.
+ if (col == mb_cols)
+ break;
+
+ // Find the end of active region in this row.
+ active_end = col;
+
+ for (; active_end < mb_cols; ++active_end) {
+ if (!active_map[active_end])
+ break;
+ }
+
+ // Only copy this active region.
+ vp9_copy_and_extend_frame_with_rect(src, &buf->img,
+ row << 4,
+ col << 4, 16,
+ (active_end - col) << 4);
+
+ // Start again from the end of this active region.
+ col = active_end;
+ }
+
+ active_map += mb_cols;
+ }
+ } else {
+ vp9_copy_and_extend_frame(src, &buf->img);
+ }
+#else
+ // Partial copy not implemented yet
+ vp9_copy_and_extend_frame(src, &buf->img);
+#endif
+
+ buf->ts_start = ts_start;
+ buf->ts_end = ts_end;
+ buf->flags = flags;
+ return 0;
+}
+
+
+struct lookahead_entry * vp9_lookahead_pop(struct lookahead_ctx *ctx,
+ int drain) {
+ struct lookahead_entry *buf = NULL;
+
+ if (ctx->sz && (drain || ctx->sz == ctx->max_sz)) {
+ buf = pop(ctx, &ctx->read_idx);
+ ctx->sz--;
+ }
+ return buf;
+}
+
+
+struct lookahead_entry * vp9_lookahead_peek(struct lookahead_ctx *ctx,
+ int index) {
+ struct lookahead_entry *buf = NULL;
+
+ assert(index < (int)ctx->max_sz);
+ if (index < (int)ctx->sz) {
+ index += ctx->read_idx;
+ if (index >= (int)ctx->max_sz)
+ index -= ctx->max_sz;
+ buf = ctx->buf + index;
+ }
+ return buf;
+}
+
+unsigned int vp9_lookahead_depth(struct lookahead_ctx *ctx) {
+ return ctx->sz;
+}
diff --git a/libvpx/vp9/encoder/vp9_lookahead.h b/libvpx/vp9/encoder/vp9_lookahead.h
new file mode 100644
index 0000000..81baa2c
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_lookahead.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_ENCODER_VP9_LOOKAHEAD_H_
+#define VP9_ENCODER_VP9_LOOKAHEAD_H_
+
+#include "vpx_scale/yv12config.h"
+#include "vpx/vpx_integer.h"
+
+struct lookahead_entry {
+ YV12_BUFFER_CONFIG img;
+ int64_t ts_start;
+ int64_t ts_end;
+ unsigned int flags;
+};
+
+
+struct lookahead_ctx;
+
+/**\brief Initializes the lookahead stage
+ *
+ * The lookahead stage is a queue of frame buffers on which some analysis
+ * may be done when buffers are enqueued.
+ */
+struct lookahead_ctx *vp9_lookahead_init(unsigned int width,
+ unsigned int height,
+ unsigned int subsampling_x,
+ unsigned int subsampling_y,
+ unsigned int depth);
+
+
+/**\brief Destroys the lookahead stage
+ */
+void vp9_lookahead_destroy(struct lookahead_ctx *ctx);
+
+
+/**\brief Enqueue a source buffer
+ *
+ * This function will copy the source image into a new framebuffer with
+ * the expected stride/border.
+ *
+ * If active_map is non-NULL and there is only one frame in the queue, then copy
+ * only active macroblocks.
+ *
+ * \param[in] ctx Pointer to the lookahead context
+ * \param[in] src Pointer to the image to enqueue
+ * \param[in] ts_start Timestamp for the start of this frame
+ * \param[in] ts_end Timestamp for the end of this frame
+ * \param[in] flags Flags set on this frame
+ * \param[in] active_map Map that specifies which macroblock is active
+ */
+int vp9_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
+ int64_t ts_start, int64_t ts_end, unsigned int flags,
+ unsigned char *active_map);
+
+
+/**\brief Get the next source buffer to encode
+ *
+ *
+ * \param[in] ctx Pointer to the lookahead context
+ * \param[in] drain Flag indicating the buffer should be drained
+ * (return a buffer regardless of the current queue depth)
+ *
+ * \retval NULL, if drain set and queue is empty
+ * \retval NULL, if drain not set and queue not of the configured depth
+ */
+struct lookahead_entry *vp9_lookahead_pop(struct lookahead_ctx *ctx,
+ int drain);
+
+
+/**\brief Get a future source buffer to encode
+ *
+ * \param[in] ctx Pointer to the lookahead context
+ * \param[in] index Index of the frame to be returned, 0 == next frame
+ *
+ * \retval NULL, if no buffer exists at the specified index
+ */
+struct lookahead_entry *vp9_lookahead_peek(struct lookahead_ctx *ctx,
+ int index);
+
+
+/**\brief Get the number of frames currently in the lookahead queue
+ *
+ * \param[in] ctx Pointer to the lookahead context
+ */
+unsigned int vp9_lookahead_depth(struct lookahead_ctx *ctx);
+
+#endif // VP9_ENCODER_VP9_LOOKAHEAD_H_
diff --git a/libvpx/vp9/encoder/vp9_mbgraph.c b/libvpx/vp9/encoder/vp9_mbgraph.c
new file mode 100644
index 0000000..65fdcbe
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_mbgraph.c
@@ -0,0 +1,440 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits.h>
+
+#include <vpx_mem/vpx_mem.h>
+#include <vp9/encoder/vp9_encodeintra.h>
+#include <vp9/encoder/vp9_rdopt.h>
+#include <vp9/common/vp9_blockd.h>
+#include <vp9/common/vp9_reconinter.h>
+#include <vp9/common/vp9_systemdependent.h>
+#include <vp9/encoder/vp9_segmentation.h>
+
+static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi,
+ int_mv *ref_mv,
+ int_mv *dst_mv,
+ int mb_row,
+ int mb_col) {
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ vp9_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16];
+ unsigned int best_err;
+
+ const int tmp_col_min = x->mv_col_min;
+ const int tmp_col_max = x->mv_col_max;
+ const int tmp_row_min = x->mv_row_min;
+ const int tmp_row_max = x->mv_row_max;
+ int_mv ref_full;
+
+ // Further step/diamond searches as necessary
+ int step_param = cpi->sf.first_step +
+ (cpi->speed < 8 ? (cpi->speed > 5 ? 1 : 0) : 2);
+
+ vp9_clamp_mv_min_max(x, ref_mv);
+
+ ref_full.as_mv.col = ref_mv->as_mv.col >> 3;
+ ref_full.as_mv.row = ref_mv->as_mv.row >> 3;
+
+ /*cpi->sf.search_method == HEX*/
+ best_err = vp9_hex_search(x, &ref_full, dst_mv, step_param, x->errorperbit,
+ &v_fn_ptr, NULL, NULL, NULL, NULL, ref_mv);
+
+ // Try sub-pixel MC
+ // if (bestsme > error_thresh && bestsme < INT_MAX)
+ {
+ int distortion;
+ unsigned int sse;
+ best_err = cpi->find_fractional_mv_step(
+ x,
+ dst_mv, ref_mv,
+ x->errorperbit, &v_fn_ptr,
+ NULL, NULL,
+ & distortion, &sse);
+ }
+
+ vp9_set_mbmode_and_mvs(x, NEWMV, dst_mv);
+ vp9_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_SIZE_MB16X16);
+ best_err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
+ xd->plane[0].dst.buf, xd->plane[0].dst.stride,
+ INT_MAX);
+
+ /* restore UMV window */
+ x->mv_col_min = tmp_col_min;
+ x->mv_col_max = tmp_col_max;
+ x->mv_row_min = tmp_row_min;
+ x->mv_row_max = tmp_row_max;
+
+ return best_err;
+}
+
+static int do_16x16_motion_search(VP9_COMP *cpi,
+ int_mv *ref_mv, int_mv *dst_mv,
+ int buf_mb_y_offset, int mb_y_offset,
+ int mb_row, int mb_col) {
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ unsigned int err, tmp_err;
+ int_mv tmp_mv;
+
+ // Try zero MV first
+ // FIXME should really use something like near/nearest MV and/or MV prediction
+ err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
+ xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
+ INT_MAX);
+ dst_mv->as_int = 0;
+
+ // Test last reference frame using the previous best mv as the
+ // starting point (best reference) for the search
+ tmp_err = do_16x16_motion_iteration(cpi, ref_mv, &tmp_mv, mb_row, mb_col);
+ if (tmp_err < err) {
+ err = tmp_err;
+ dst_mv->as_int = tmp_mv.as_int;
+ }
+
+ // If the current best reference mv is not centred on 0,0 then do a 0,0 based search as well
+ if (ref_mv->as_int) {
+ unsigned int tmp_err;
+ int_mv zero_ref_mv, tmp_mv;
+
+ zero_ref_mv.as_int = 0;
+ tmp_err = do_16x16_motion_iteration(cpi, &zero_ref_mv, &tmp_mv,
+ mb_row, mb_col);
+ if (tmp_err < err) {
+ dst_mv->as_int = tmp_mv.as_int;
+ err = tmp_err;
+ }
+ }
+
+ return err;
+}
+
+static int do_16x16_zerozero_search(VP9_COMP *cpi,
+ int_mv *dst_mv,
+ int buf_mb_y_offset, int mb_y_offset) {
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ unsigned int err;
+
+ // Try zero MV first
+ // FIXME should really use something like near/nearest MV and/or MV prediction
+ err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
+ xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
+ INT_MAX);
+
+ dst_mv->as_int = 0;
+
+ return err;
+}
+static int find_best_16x16_intra(VP9_COMP *cpi,
+ int mb_y_offset,
+ MB_PREDICTION_MODE *pbest_mode) {
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MB_PREDICTION_MODE best_mode = -1, mode;
+ unsigned int best_err = INT_MAX;
+
+ // calculate SATD for each intra prediction mode;
+ // we're intentionally not doing 4x4, we just want a rough estimate
+ for (mode = DC_PRED; mode <= TM_PRED; mode++) {
+ unsigned int err;
+ const int bwl = b_width_log2(BLOCK_SIZE_MB16X16), bw = 4 << bwl;
+ const int bhl = b_height_log2(BLOCK_SIZE_MB16X16), bh = 4 << bhl;
+
+ xd->mode_info_context->mbmi.mode = mode;
+ vp9_build_intra_predictors(x->plane[0].src.buf, x->plane[0].src.stride,
+ xd->plane[0].dst.buf, xd->plane[0].dst.stride,
+ xd->mode_info_context->mbmi.mode,
+ bw, bh,
+ xd->up_available, xd->left_available,
+ xd->right_available);
+ err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
+ xd->plane[0].dst.buf, xd->plane[0].dst.stride, best_err);
+
+ // find best
+ if (err < best_err) {
+ best_err = err;
+ best_mode = mode;
+ }
+ }
+
+ if (pbest_mode)
+ *pbest_mode = best_mode;
+
+ return best_err;
+}
+
+static void update_mbgraph_mb_stats
+(
+ VP9_COMP *cpi,
+ MBGRAPH_MB_STATS *stats,
+ YV12_BUFFER_CONFIG *buf,
+ int mb_y_offset,
+ YV12_BUFFER_CONFIG *golden_ref,
+ int_mv *prev_golden_ref_mv,
+ int gld_y_offset,
+ YV12_BUFFER_CONFIG *alt_ref,
+ int_mv *prev_alt_ref_mv,
+ int arf_y_offset,
+ int mb_row,
+ int mb_col
+) {
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ int intra_error;
+ VP9_COMMON *cm = &cpi->common;
+
+ // FIXME in practice we're completely ignoring chroma here
+ x->plane[0].src.buf = buf->y_buffer + mb_y_offset;
+ x->plane[0].src.stride = buf->y_stride;
+
+ xd->plane[0].dst.buf = cm->yv12_fb[cm->new_fb_idx].y_buffer + mb_y_offset;
+ xd->plane[0].dst.stride = cm->yv12_fb[cm->new_fb_idx].y_stride;
+
+ // do intra 16x16 prediction
+ intra_error = find_best_16x16_intra(cpi, mb_y_offset,
+ &stats->ref[INTRA_FRAME].m.mode);
+ if (intra_error <= 0)
+ intra_error = 1;
+ stats->ref[INTRA_FRAME].err = intra_error;
+
+ // Golden frame MV search, if it exists and is different than last frame
+ if (golden_ref) {
+ int g_motion_error;
+ xd->plane[0].pre[0].buf = golden_ref->y_buffer + mb_y_offset;
+ xd->plane[0].pre[0].stride = golden_ref->y_stride;
+ g_motion_error = do_16x16_motion_search(cpi,
+ prev_golden_ref_mv,
+ &stats->ref[GOLDEN_FRAME].m.mv,
+ mb_y_offset, gld_y_offset,
+ mb_row, mb_col);
+ stats->ref[GOLDEN_FRAME].err = g_motion_error;
+ } else {
+ stats->ref[GOLDEN_FRAME].err = INT_MAX;
+ stats->ref[GOLDEN_FRAME].m.mv.as_int = 0;
+ }
+
+ // Alt-ref frame MV search, if it exists and is different than last/golden frame
+ if (alt_ref) {
+ int a_motion_error;
+ xd->plane[0].pre[0].buf = alt_ref->y_buffer + mb_y_offset;
+ xd->plane[0].pre[0].stride = alt_ref->y_stride;
+ a_motion_error = do_16x16_zerozero_search(cpi,
+ &stats->ref[ALTREF_FRAME].m.mv,
+ mb_y_offset, arf_y_offset);
+
+ stats->ref[ALTREF_FRAME].err = a_motion_error;
+ } else {
+ stats->ref[ALTREF_FRAME].err = INT_MAX;
+ stats->ref[ALTREF_FRAME].m.mv.as_int = 0;
+ }
+}
+
+static void update_mbgraph_frame_stats(VP9_COMP *cpi,
+ MBGRAPH_FRAME_STATS *stats,
+ YV12_BUFFER_CONFIG *buf,
+ YV12_BUFFER_CONFIG *golden_ref,
+ YV12_BUFFER_CONFIG *alt_ref) {
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ VP9_COMMON *const cm = &cpi->common;
+
+ int mb_col, mb_row, offset = 0;
+ int mb_y_offset = 0, arf_y_offset = 0, gld_y_offset = 0;
+ int_mv arf_top_mv, gld_top_mv;
+ MODE_INFO mi_local;
+
+ // Make sure the mi context starts in a consistent state.
+ memset(&mi_local, 0, sizeof(mi_local));
+
+ // Set up limit values for motion vectors to prevent them extending outside the UMV borders
+ arf_top_mv.as_int = 0;
+ gld_top_mv.as_int = 0;
+ x->mv_row_min = -(VP9BORDERINPIXELS - 8 - VP9_INTERP_EXTEND);
+ x->mv_row_max = (cm->mb_rows - 1) * 8 + VP9BORDERINPIXELS
+ - 8 - VP9_INTERP_EXTEND;
+ xd->up_available = 0;
+ xd->plane[0].dst.stride = buf->y_stride;
+ xd->plane[0].pre[0].stride = buf->y_stride;
+ xd->plane[1].dst.stride = buf->uv_stride;
+ xd->mode_info_context = &mi_local;
+ mi_local.mbmi.sb_type = BLOCK_SIZE_MB16X16;
+ mi_local.mbmi.ref_frame[0] = LAST_FRAME;
+ mi_local.mbmi.ref_frame[1] = NONE;
+
+ for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
+ int_mv arf_left_mv, gld_left_mv;
+ int mb_y_in_offset = mb_y_offset;
+ int arf_y_in_offset = arf_y_offset;
+ int gld_y_in_offset = gld_y_offset;
+
+ // Set up limit values for motion vectors to prevent them extending outside the UMV borders
+ arf_left_mv.as_int = arf_top_mv.as_int;
+ gld_left_mv.as_int = gld_top_mv.as_int;
+ x->mv_col_min = -(VP9BORDERINPIXELS - 8 - VP9_INTERP_EXTEND);
+ x->mv_col_max = (cm->mb_cols - 1) * 8 + VP9BORDERINPIXELS
+ - 8 - VP9_INTERP_EXTEND;
+ xd->left_available = 0;
+
+ for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
+ MBGRAPH_MB_STATS *mb_stats = &stats->mb_stats[offset + mb_col];
+
+ update_mbgraph_mb_stats(cpi, mb_stats, buf, mb_y_in_offset,
+ golden_ref, &gld_left_mv, gld_y_in_offset,
+ alt_ref, &arf_left_mv, arf_y_in_offset,
+ mb_row, mb_col);
+ arf_left_mv.as_int = mb_stats->ref[ALTREF_FRAME].m.mv.as_int;
+ gld_left_mv.as_int = mb_stats->ref[GOLDEN_FRAME].m.mv.as_int;
+ if (mb_col == 0) {
+ arf_top_mv.as_int = arf_left_mv.as_int;
+ gld_top_mv.as_int = gld_left_mv.as_int;
+ }
+ xd->left_available = 1;
+ mb_y_in_offset += 16;
+ gld_y_in_offset += 16;
+ arf_y_in_offset += 16;
+ x->mv_col_min -= 16;
+ x->mv_col_max -= 16;
+ }
+ xd->up_available = 1;
+ mb_y_offset += buf->y_stride * 16;
+ gld_y_offset += golden_ref->y_stride * 16;
+ if (alt_ref)
+ arf_y_offset += alt_ref->y_stride * 16;
+ x->mv_row_min -= 16;
+ x->mv_row_max -= 16;
+ offset += cm->mb_cols;
+ }
+}
+
+// void separate_arf_mbs_byzz
+static void separate_arf_mbs(VP9_COMP *cpi) {
+ VP9_COMMON *const cm = &cpi->common;
+ int mb_col, mb_row, offset, i;
+ int ncnt[4];
+ int n_frames = cpi->mbgraph_n_frames;
+
+ int *arf_not_zz;
+
+ CHECK_MEM_ERROR(arf_not_zz,
+ vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz), 1));
+
+ // We are not interested in results beyond the alt ref itself.
+ if (n_frames > cpi->frames_till_gf_update_due)
+ n_frames = cpi->frames_till_gf_update_due;
+
+ // defer cost to reference frames
+ for (i = n_frames - 1; i >= 0; i--) {
+ MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
+
+ for (offset = 0, mb_row = 0; mb_row < cm->mb_rows;
+ offset += cm->mb_cols, mb_row++) {
+ for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
+ MBGRAPH_MB_STATS *mb_stats = &frame_stats->mb_stats[offset + mb_col];
+
+ int altref_err = mb_stats->ref[ALTREF_FRAME].err;
+ int intra_err = mb_stats->ref[INTRA_FRAME ].err;
+ int golden_err = mb_stats->ref[GOLDEN_FRAME].err;
+
+ // Test for altref vs intra and gf and that its mv was 0,0.
+ if (altref_err > 1000 ||
+ altref_err > intra_err ||
+ altref_err > golden_err) {
+ arf_not_zz[offset + mb_col]++;
+ }
+ }
+ }
+ }
+
+ vpx_memset(ncnt, 0, sizeof(ncnt));
+ for (offset = 0, mb_row = 0; mb_row < cm->mb_rows;
+ offset += cm->mb_cols, mb_row++) {
+ for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
+ // If any of the blocks in the sequence failed then the MB
+ // goes in segment 0
+ if (arf_not_zz[offset + mb_col]) {
+ ncnt[0]++;
+ cpi->segmentation_map[offset * 4 + 2 * mb_col] = 0;
+ cpi->segmentation_map[offset * 4 + 2 * mb_col + 1] = 0;
+ cpi->segmentation_map[offset * 4 + 2 * mb_col + cm->mi_cols] = 0;
+ cpi->segmentation_map[offset * 4 + 2 * mb_col + cm->mi_cols + 1] = 0;
+ } else {
+ cpi->segmentation_map[offset * 4 + 2 * mb_col] = 1;
+ cpi->segmentation_map[offset * 4 + 2 * mb_col + 1] = 1;
+ cpi->segmentation_map[offset * 4 + 2 * mb_col + cm->mi_cols] = 1;
+ cpi->segmentation_map[offset * 4 + 2 * mb_col + cm->mi_cols + 1] = 1;
+ ncnt[1]++;
+ }
+ }
+ }
+
+ // Only bother with segmentation if over 10% of the MBs in static segment
+ // if ( ncnt[1] && (ncnt[0] / ncnt[1] < 10) )
+ if (1) {
+ // Note % of blocks that are marked as static
+ if (cm->MBs)
+ cpi->static_mb_pct = (ncnt[1] * 100) / cm->MBs;
+
+ // This error case should not be reachable as this function should
+ // never be called with the common data structure uninitialized.
+ else
+ cpi->static_mb_pct = 0;
+
+ cpi->seg0_cnt = ncnt[0];
+ vp9_enable_segmentation((VP9_PTR)cpi);
+ } else {
+ cpi->static_mb_pct = 0;
+ vp9_disable_segmentation((VP9_PTR)cpi);
+ }
+
+ // Free localy allocated storage
+ vpx_free(arf_not_zz);
+}
+
+void vp9_update_mbgraph_stats(VP9_COMP *cpi) {
+ VP9_COMMON *const cm = &cpi->common;
+ int i, n_frames = vp9_lookahead_depth(cpi->lookahead);
+ YV12_BUFFER_CONFIG *golden_ref =
+ &cm->yv12_fb[cm->ref_frame_map[cpi->gld_fb_idx]];
+
+ // we need to look ahead beyond where the ARF transitions into
+ // being a GF - so exit if we don't look ahead beyond that
+ if (n_frames <= cpi->frames_till_gf_update_due)
+ return;
+ if (n_frames > (int)cpi->common.frames_till_alt_ref_frame)
+ n_frames = cpi->common.frames_till_alt_ref_frame;
+ if (n_frames > MAX_LAG_BUFFERS)
+ n_frames = MAX_LAG_BUFFERS;
+
+ cpi->mbgraph_n_frames = n_frames;
+ for (i = 0; i < n_frames; i++) {
+ MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
+ vpx_memset(frame_stats->mb_stats, 0,
+ cm->mb_rows * cm->mb_cols * sizeof(*cpi->mbgraph_stats[i].mb_stats));
+ }
+
+ // do motion search to find contribution of each reference to data
+ // later on in this GF group
+ // FIXME really, the GF/last MC search should be done forward, and
+ // the ARF MC search backwards, to get optimal results for MV caching
+ for (i = 0; i < n_frames; i++) {
+ MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
+ struct lookahead_entry *q_cur = vp9_lookahead_peek(cpi->lookahead, i);
+
+ assert(q_cur != NULL);
+
+ update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img,
+ golden_ref, cpi->Source);
+ }
+
+ vp9_clear_system_state(); // __asm emms;
+
+ separate_arf_mbs(cpi);
+}
diff --git a/libvpx/vp9/encoder/vp9_mbgraph.h b/libvpx/vp9/encoder/vp9_mbgraph.h
new file mode 100644
index 0000000..c5bca4d
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_mbgraph.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_ENCODER_VP9_MBGRAPH_H_
+#define VP9_ENCODER_VP9_MBGRAPH_H_
+
+void vp9_update_mbgraph_stats(VP9_COMP *cpi);
+
+#endif // VP9_ENCODER_VP9_MBGRAPH_H_
diff --git a/libvpx/vp9/encoder/vp9_mcomp.c b/libvpx/vp9/encoder/vp9_mcomp.c
new file mode 100644
index 0000000..2e99736
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_mcomp.c
@@ -0,0 +1,2429 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <limits.h>
+#include <math.h>
+
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vp9/encoder/vp9_mcomp.h"
+#include "vpx_mem/vpx_mem.h"
+#include "./vpx_config.h"
+#include "vp9/common/vp9_findnearmv.h"
+#include "vp9/common/vp9_common.h"
+
+void vp9_clamp_mv_min_max(MACROBLOCK *x, int_mv *ref_mv) {
+ int col_min = (ref_mv->as_mv.col >> 3) - MAX_FULL_PEL_VAL +
+ ((ref_mv->as_mv.col & 7) ? 1 : 0);
+ int row_min = (ref_mv->as_mv.row >> 3) - MAX_FULL_PEL_VAL +
+ ((ref_mv->as_mv.row & 7) ? 1 : 0);
+ int col_max = (ref_mv->as_mv.col >> 3) + MAX_FULL_PEL_VAL;
+ int row_max = (ref_mv->as_mv.row >> 3) + MAX_FULL_PEL_VAL;
+
+ /* Get intersection of UMV window and valid MV window to reduce # of checks in diamond search. */
+ if (x->mv_col_min < col_min)
+ x->mv_col_min = col_min;
+ if (x->mv_col_max > col_max)
+ x->mv_col_max = col_max;
+ if (x->mv_row_min < row_min)
+ x->mv_row_min = row_min;
+ if (x->mv_row_max > row_max)
+ x->mv_row_max = row_max;
+}
+
+int vp9_init_search_range(int width, int height) {
+ int sr = 0;
+ int frm = MIN(width, height);
+
+ while ((frm << sr) < MAX_FULL_PEL_VAL)
+ sr++;
+
+ if (sr)
+ sr--;
+
+ return sr;
+}
+
+int vp9_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvjcost, int *mvcost[2],
+ int weight, int ishp) {
+ MV v;
+ v.row = mv->as_mv.row - ref->as_mv.row;
+ v.col = mv->as_mv.col - ref->as_mv.col;
+ return ROUND_POWER_OF_TWO((mvjcost[vp9_get_mv_joint(&v)] +
+ mvcost[0][v.row] +
+ mvcost[1][v.col]) * weight, 7);
+}
+
+static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvjcost, int *mvcost[2],
+ int error_per_bit, int ishp) {
+ if (mvcost) {
+ MV v;
+ v.row = mv->as_mv.row - ref->as_mv.row;
+ v.col = mv->as_mv.col - ref->as_mv.col;
+ return ROUND_POWER_OF_TWO((mvjcost[vp9_get_mv_joint(&v)] +
+ mvcost[0][v.row] +
+ mvcost[1][v.col]) * error_per_bit, 13);
+ }
+ return 0;
+}
+
+static int mvsad_err_cost(int_mv *mv, int_mv *ref, int *mvjsadcost,
+ int *mvsadcost[2], int error_per_bit) {
+ if (mvsadcost) {
+ MV v;
+ v.row = mv->as_mv.row - ref->as_mv.row;
+ v.col = mv->as_mv.col - ref->as_mv.col;
+ return ROUND_POWER_OF_TWO((mvjsadcost[vp9_get_mv_joint(&v)] +
+ mvsadcost[0][v.row] +
+ mvsadcost[1][v.col]) * error_per_bit, 8);
+ }
+ return 0;
+}
+
+void vp9_init_dsmotion_compensation(MACROBLOCK *x, int stride) {
+ int len;
+ int search_site_count = 0;
+
+ // Generate offsets for 4 search sites per step.
+ x->ss[search_site_count].mv.col = 0;
+ x->ss[search_site_count].mv.row = 0;
+ x->ss[search_site_count].offset = 0;
+ search_site_count++;
+
+ for (len = MAX_FIRST_STEP; len > 0; len /= 2) {
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = 0;
+ x->ss[search_site_count].mv.row = -len;
+ x->ss[search_site_count].offset = -len * stride;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = 0;
+ x->ss[search_site_count].mv.row = len;
+ x->ss[search_site_count].offset = len * stride;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = -len;
+ x->ss[search_site_count].mv.row = 0;
+ x->ss[search_site_count].offset = -len;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = len;
+ x->ss[search_site_count].mv.row = 0;
+ x->ss[search_site_count].offset = len;
+ search_site_count++;
+ }
+
+ x->ss_count = search_site_count;
+ x->searches_per_step = 4;
+}
+
+void vp9_init3smotion_compensation(MACROBLOCK *x, int stride) {
+ int len;
+ int search_site_count = 0;
+
+ // Generate offsets for 8 search sites per step.
+ x->ss[search_site_count].mv.col = 0;
+ x->ss[search_site_count].mv.row = 0;
+ x->ss[search_site_count].offset = 0;
+ search_site_count++;
+
+ for (len = MAX_FIRST_STEP; len > 0; len /= 2) {
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = 0;
+ x->ss[search_site_count].mv.row = -len;
+ x->ss[search_site_count].offset = -len * stride;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = 0;
+ x->ss[search_site_count].mv.row = len;
+ x->ss[search_site_count].offset = len * stride;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = -len;
+ x->ss[search_site_count].mv.row = 0;
+ x->ss[search_site_count].offset = -len;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = len;
+ x->ss[search_site_count].mv.row = 0;
+ x->ss[search_site_count].offset = len;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = -len;
+ x->ss[search_site_count].mv.row = -len;
+ x->ss[search_site_count].offset = -len * stride - len;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = len;
+ x->ss[search_site_count].mv.row = -len;
+ x->ss[search_site_count].offset = -len * stride + len;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = -len;
+ x->ss[search_site_count].mv.row = len;
+ x->ss[search_site_count].offset = len * stride - len;
+ search_site_count++;
+
+ // Compute offsets for search sites.
+ x->ss[search_site_count].mv.col = len;
+ x->ss[search_site_count].mv.row = len;
+ x->ss[search_site_count].offset = len * stride + len;
+ search_site_count++;
+ }
+
+ x->ss_count = search_site_count;
+ x->searches_per_step = 8;
+}
+
+/*
+ * To avoid the penalty for crossing cache-line read, preload the reference
+ * area in a small buffer, which is aligned to make sure there won't be crossing
+ * cache-line read while reading from this buffer. This reduced the cpu
+ * cycles spent on reading ref data in sub-pixel filter functions.
+ * TODO: Currently, since sub-pixel search range here is -3 ~ 3, copy 22 rows x
+ * 32 cols area that is enough for 16x16 macroblock. Later, for SPLITMV, we
+ * could reduce the area.
+ */
+
+/* estimated cost of a motion vector (r,c) */
+#define MVC(r, c) \
+ (mvcost ? \
+ ((mvjcost[((r) != rr) * 2 + ((c) != rc)] + \
+ mvcost[0][((r) - rr)] + mvcost[1][((c) - rc)]) * \
+ error_per_bit + 4096) >> 13 : 0)
+
+
+#define SP(x) (((x) & 7) << 1) // convert motion vector component to offset
+ // for svf calc
+
+#define IFMVCV(r, c, s, e) \
+ if (c >= minc && c <= maxc && r >= minr && r <= maxr) \
+ s \
+ else \
+ e;
+
+/* pointer to predictor base of a motionvector */
+#define PRE(r, c) (y + (((r) >> 3) * y_stride + ((c) >> 3) -(offset)))
+
+/* returns subpixel variance error function */
+#define DIST(r, c) \
+ vfp->svf(PRE(r, c), y_stride, SP(c), SP(r), z, src_stride, &sse)
+
+/* checks if (r, c) has better score than previous best */
+#define CHECK_BETTER(v, r, c) \
+ IFMVCV(r, c, { \
+ thismse = (DIST(r, c)); \
+ if ((v = MVC(r, c) + thismse) < besterr) { \
+ besterr = v; \
+ br = r; \
+ bc = c; \
+ *distortion = thismse; \
+ *sse1 = sse; \
+ } \
+ }, \
+ v = INT_MAX;)
+
+int vp9_find_best_sub_pixel_step_iteratively(MACROBLOCK *x,
+ int_mv *bestmv, int_mv *ref_mv,
+ int error_per_bit,
+ const vp9_variance_fn_ptr_t *vfp,
+ int *mvjcost, int *mvcost[2],
+ int *distortion,
+ unsigned int *sse1) {
+ uint8_t *z = x->plane[0].src.buf;
+ int src_stride = x->plane[0].src.stride;
+ MACROBLOCKD *xd = &x->e_mbd;
+
+ int rr, rc, br, bc, hstep;
+ int tr, tc;
+ unsigned int besterr = INT_MAX;
+ unsigned int left, right, up, down, diag;
+ unsigned int sse;
+ unsigned int whichdir;
+ unsigned int halfiters = 4;
+ unsigned int quarteriters = 4;
+ unsigned int eighthiters = 4;
+ int thismse;
+ int maxc, minc, maxr, minr;
+ int y_stride;
+ int offset;
+ int usehp = xd->allow_high_precision_mv;
+
+ uint8_t *y = xd->plane[0].pre[0].buf +
+ (bestmv->as_mv.row) * xd->plane[0].pre[0].stride +
+ bestmv->as_mv.col;
+
+ y_stride = xd->plane[0].pre[0].stride;
+
+ rr = ref_mv->as_mv.row;
+ rc = ref_mv->as_mv.col;
+ br = bestmv->as_mv.row << 3;
+ bc = bestmv->as_mv.col << 3;
+ hstep = 4;
+ minc = MAX(x->mv_col_min << 3, (ref_mv->as_mv.col) - ((1 << MV_MAX_BITS) - 1));
+ maxc = MIN(x->mv_col_max << 3, (ref_mv->as_mv.col) + ((1 << MV_MAX_BITS) - 1));
+ minr = MAX(x->mv_row_min << 3, (ref_mv->as_mv.row) - ((1 << MV_MAX_BITS) - 1));
+ maxr = MIN(x->mv_row_max << 3, (ref_mv->as_mv.row) + ((1 << MV_MAX_BITS) - 1));
+
+ tr = br;
+ tc = bc;
+
+
+ offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col;
+
+ // central mv
+ bestmv->as_mv.row <<= 3;
+ bestmv->as_mv.col <<= 3;
+
+ // calculate central point error
+ besterr = vfp->vf(y, y_stride, z, src_stride, sse1);
+ *distortion = besterr;
+ besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost,
+ error_per_bit, xd->allow_high_precision_mv);
+
+ // TODO: Each subsequent iteration checks at least one point in
+ // common with the last iteration could be 2 ( if diag selected)
+ while (--halfiters) {
+ // 1/2 pel
+ CHECK_BETTER(left, tr, tc - hstep);
+ CHECK_BETTER(right, tr, tc + hstep);
+ CHECK_BETTER(up, tr - hstep, tc);
+ CHECK_BETTER(down, tr + hstep, tc);
+
+ whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
+
+ switch (whichdir) {
+ case 0:
+ CHECK_BETTER(diag, tr - hstep, tc - hstep);
+ break;
+ case 1:
+ CHECK_BETTER(diag, tr - hstep, tc + hstep);
+ break;
+ case 2:
+ CHECK_BETTER(diag, tr + hstep, tc - hstep);
+ break;
+ case 3:
+ CHECK_BETTER(diag, tr + hstep, tc + hstep);
+ break;
+ }
+
+ // no reason to check the same one again.
+ if (tr == br && tc == bc)
+ break;
+
+ tr = br;
+ tc = bc;
+ }
+
+ // TODO: Each subsequent iteration checks at least one point in common with
+ // the last iteration could be 2 ( if diag selected) 1/4 pel
+ hstep >>= 1;
+ while (--quarteriters) {
+ CHECK_BETTER(left, tr, tc - hstep);
+ CHECK_BETTER(right, tr, tc + hstep);
+ CHECK_BETTER(up, tr - hstep, tc);
+ CHECK_BETTER(down, tr + hstep, tc);
+
+ whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
+
+ switch (whichdir) {
+ case 0:
+ CHECK_BETTER(diag, tr - hstep, tc - hstep);
+ break;
+ case 1:
+ CHECK_BETTER(diag, tr - hstep, tc + hstep);
+ break;
+ case 2:
+ CHECK_BETTER(diag, tr + hstep, tc - hstep);
+ break;
+ case 3:
+ CHECK_BETTER(diag, tr + hstep, tc + hstep);
+ break;
+ }
+
+ // no reason to check the same one again.
+ if (tr == br && tc == bc)
+ break;
+
+ tr = br;
+ tc = bc;
+ }
+
+ if (xd->allow_high_precision_mv) {
+ usehp = vp9_use_nmv_hp(&ref_mv->as_mv);
+ } else {
+ usehp = 0;
+ }
+
+ if (usehp) {
+ hstep >>= 1;
+ while (--eighthiters) {
+ CHECK_BETTER(left, tr, tc - hstep);
+ CHECK_BETTER(right, tr, tc + hstep);
+ CHECK_BETTER(up, tr - hstep, tc);
+ CHECK_BETTER(down, tr + hstep, tc);
+
+ whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
+
+ switch (whichdir) {
+ case 0:
+ CHECK_BETTER(diag, tr - hstep, tc - hstep);
+ break;
+ case 1:
+ CHECK_BETTER(diag, tr - hstep, tc + hstep);
+ break;
+ case 2:
+ CHECK_BETTER(diag, tr + hstep, tc - hstep);
+ break;
+ case 3:
+ CHECK_BETTER(diag, tr + hstep, tc + hstep);
+ break;
+ }
+
+ // no reason to check the same one again.
+ if (tr == br && tc == bc)
+ break;
+
+ tr = br;
+ tc = bc;
+ }
+ }
+ bestmv->as_mv.row = br;
+ bestmv->as_mv.col = bc;
+
+ if ((abs(bestmv->as_mv.col - ref_mv->as_mv.col) > (MAX_FULL_PEL_VAL << 3)) ||
+ (abs(bestmv->as_mv.row - ref_mv->as_mv.row) > (MAX_FULL_PEL_VAL << 3)))
+ return INT_MAX;
+
+ return besterr;
+}
+
+#undef DIST
+/* returns subpixel variance error function */
+#define DIST(r, c) \
+ vfp->svaf(PRE(r, c), y_stride, SP(c), SP(r), \
+ z, src_stride, &sse, second_pred)
+
+int vp9_find_best_sub_pixel_comp(MACROBLOCK *x,
+ int_mv *bestmv, int_mv *ref_mv,
+ int error_per_bit,
+ const vp9_variance_fn_ptr_t *vfp,
+ int *mvjcost, int *mvcost[2],
+ int *distortion,
+ unsigned int *sse1,
+ const uint8_t *second_pred, int w, int h) {
+ uint8_t *z = x->plane[0].src.buf;
+ int src_stride = x->plane[0].src.stride;
+ MACROBLOCKD *xd = &x->e_mbd;
+
+ int rr, rc, br, bc, hstep;
+ int tr, tc;
+ unsigned int besterr = INT_MAX;
+ unsigned int left, right, up, down, diag;
+ unsigned int sse;
+ unsigned int whichdir;
+ unsigned int halfiters = 4;
+ unsigned int quarteriters = 4;
+ unsigned int eighthiters = 4;
+ int thismse;
+ int maxc, minc, maxr, minr;
+ int y_stride;
+ int offset;
+ int usehp = xd->allow_high_precision_mv;
+
+ uint8_t *comp_pred = vpx_memalign(16, w * h * sizeof(uint8_t));
+ uint8_t *y = xd->plane[0].pre[0].buf +
+ (bestmv->as_mv.row) * xd->plane[0].pre[0].stride +
+ bestmv->as_mv.col;
+
+ y_stride = xd->plane[0].pre[0].stride;
+
+ rr = ref_mv->as_mv.row;
+ rc = ref_mv->as_mv.col;
+ br = bestmv->as_mv.row << 3;
+ bc = bestmv->as_mv.col << 3;
+ hstep = 4;
+ minc = MAX(x->mv_col_min << 3, (ref_mv->as_mv.col) -
+ ((1 << MV_MAX_BITS) - 1));
+ maxc = MIN(x->mv_col_max << 3, (ref_mv->as_mv.col) +
+ ((1 << MV_MAX_BITS) - 1));
+ minr = MAX(x->mv_row_min << 3, (ref_mv->as_mv.row) -
+ ((1 << MV_MAX_BITS) - 1));
+ maxr = MIN(x->mv_row_max << 3, (ref_mv->as_mv.row) +
+ ((1 << MV_MAX_BITS) - 1));
+
+ tr = br;
+ tc = bc;
+
+
+ offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col;
+
+ // central mv
+ bestmv->as_mv.row <<= 3;
+ bestmv->as_mv.col <<= 3;
+
+ // calculate central point error
+ // TODO(yunqingwang): central pointer error was already calculated in full-
+ // pixel search, and can be passed in this function.
+ comp_avg_pred(comp_pred, second_pred, w, h, y, y_stride);
+ besterr = vfp->vf(comp_pred, w, z, src_stride, sse1);
+ *distortion = besterr;
+ besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost,
+ error_per_bit, xd->allow_high_precision_mv);
+
+ // Each subsequent iteration checks at least one point in
+ // common with the last iteration could be 2 ( if diag selected)
+ while (--halfiters) {
+ // 1/2 pel
+ CHECK_BETTER(left, tr, tc - hstep);
+ CHECK_BETTER(right, tr, tc + hstep);
+ CHECK_BETTER(up, tr - hstep, tc);
+ CHECK_BETTER(down, tr + hstep, tc);
+
+ whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
+
+ switch (whichdir) {
+ case 0:
+ CHECK_BETTER(diag, tr - hstep, tc - hstep);
+ break;
+ case 1:
+ CHECK_BETTER(diag, tr - hstep, tc + hstep);
+ break;
+ case 2:
+ CHECK_BETTER(diag, tr + hstep, tc - hstep);
+ break;
+ case 3:
+ CHECK_BETTER(diag, tr + hstep, tc + hstep);
+ break;
+ }
+
+ // no reason to check the same one again.
+ if (tr == br && tc == bc)
+ break;
+
+ tr = br;
+ tc = bc;
+ }
+
+ // Each subsequent iteration checks at least one point in common with
+ // the last iteration could be 2 ( if diag selected) 1/4 pel
+ hstep >>= 1;
+ while (--quarteriters) {
+ CHECK_BETTER(left, tr, tc - hstep);
+ CHECK_BETTER(right, tr, tc + hstep);
+ CHECK_BETTER(up, tr - hstep, tc);
+ CHECK_BETTER(down, tr + hstep, tc);
+
+ whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
+
+ switch (whichdir) {
+ case 0:
+ CHECK_BETTER(diag, tr - hstep, tc - hstep);
+ break;
+ case 1:
+ CHECK_BETTER(diag, tr - hstep, tc + hstep);
+ break;
+ case 2:
+ CHECK_BETTER(diag, tr + hstep, tc - hstep);
+ break;
+ case 3:
+ CHECK_BETTER(diag, tr + hstep, tc + hstep);
+ break;
+ }
+
+ // no reason to check the same one again.
+ if (tr == br && tc == bc)
+ break;
+
+ tr = br;
+ tc = bc;
+ }
+
+ if (xd->allow_high_precision_mv) {
+ usehp = vp9_use_nmv_hp(&ref_mv->as_mv);
+ } else {
+ usehp = 0;
+ }
+
+ if (usehp) {
+ hstep >>= 1;
+ while (--eighthiters) {
+ CHECK_BETTER(left, tr, tc - hstep);
+ CHECK_BETTER(right, tr, tc + hstep);
+ CHECK_BETTER(up, tr - hstep, tc);
+ CHECK_BETTER(down, tr + hstep, tc);
+
+ whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
+
+ switch (whichdir) {
+ case 0:
+ CHECK_BETTER(diag, tr - hstep, tc - hstep);
+ break;
+ case 1:
+ CHECK_BETTER(diag, tr - hstep, tc + hstep);
+ break;
+ case 2:
+ CHECK_BETTER(diag, tr + hstep, tc - hstep);
+ break;
+ case 3:
+ CHECK_BETTER(diag, tr + hstep, tc + hstep);
+ break;
+ }
+
+ // no reason to check the same one again.
+ if (tr == br && tc == bc)
+ break;
+
+ tr = br;
+ tc = bc;
+ }
+ }
+ bestmv->as_mv.row = br;
+ bestmv->as_mv.col = bc;
+
+ vpx_free(comp_pred);
+
+ if ((abs(bestmv->as_mv.col - ref_mv->as_mv.col) > (MAX_FULL_PEL_VAL << 3)) ||
+ (abs(bestmv->as_mv.row - ref_mv->as_mv.row) > (MAX_FULL_PEL_VAL << 3)))
+ return INT_MAX;
+
+ return besterr;
+}
+
+
+#undef MVC
+#undef PRE
+#undef DIST
+#undef IFMVCV
+#undef CHECK_BETTER
+#undef MIN
+#undef MAX
+
+int vp9_find_best_sub_pixel_step(MACROBLOCK *x,
+ int_mv *bestmv, int_mv *ref_mv,
+ int error_per_bit,
+ const vp9_variance_fn_ptr_t *vfp,
+ int *mvjcost, int *mvcost[2], int *distortion,
+ unsigned int *sse1) {
+ int bestmse = INT_MAX;
+ int_mv startmv;
+ int_mv this_mv;
+ int_mv orig_mv;
+ int yrow_movedback = 0, ycol_movedback = 0;
+ uint8_t *z = x->plane[0].src.buf;
+ int src_stride = x->plane[0].src.stride;
+ int left, right, up, down, diag;
+ unsigned int sse;
+ int whichdir;
+ int thismse;
+ int y_stride;
+ MACROBLOCKD *xd = &x->e_mbd;
+ int usehp = xd->allow_high_precision_mv;
+
+ uint8_t *y = xd->plane[0].pre[0].buf +
+ (bestmv->as_mv.row) * xd->plane[0].pre[0].stride +
+ bestmv->as_mv.col;
+ y_stride = xd->plane[0].pre[0].stride;
+
+ // central mv
+ bestmv->as_mv.row <<= 3;
+ bestmv->as_mv.col <<= 3;
+ startmv = *bestmv;
+ orig_mv = *bestmv;
+
+ // calculate central point error
+ bestmse = vfp->vf(y, y_stride, z, src_stride, sse1);
+ *distortion = bestmse;
+ bestmse += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
+
+ // go left then right and check error
+ this_mv.as_mv.row = startmv.as_mv.row;
+ this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
+ thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, src_stride, &sse);
+ left = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
+
+ if (left < bestmse) {
+ *bestmv = this_mv;
+ bestmse = left;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ this_mv.as_mv.col += 8;
+ thismse = vfp->svf_halfpix_h(y, y_stride, z, src_stride, &sse);
+ right = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
+ error_per_bit, xd->allow_high_precision_mv);
+
+ if (right < bestmse) {
+ *bestmv = this_mv;
+ bestmse = right;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ // go up then down and check error
+ this_mv.as_mv.col = startmv.as_mv.col;
+ this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
+ thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, src_stride, &sse);
+ up = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
+
+ if (up < bestmse) {
+ *bestmv = this_mv;
+ bestmse = up;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ this_mv.as_mv.row += 8;
+ thismse = vfp->svf_halfpix_v(y, y_stride, z, src_stride, &sse);
+ down = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
+
+ if (down < bestmse) {
+ *bestmv = this_mv;
+ bestmse = down;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+
+ // now check 1 more diagonal
+ whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
+ // for(whichdir =0;whichdir<4;whichdir++)
+ // {
+ this_mv = startmv;
+
+ switch (whichdir) {
+ case 0:
+ this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
+ this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
+ thismse = vfp->svf_halfpix_hv(y - 1 - y_stride, y_stride, z, src_stride,
+ &sse);
+ break;
+ case 1:
+ this_mv.as_mv.col += 4;
+ this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
+ thismse = vfp->svf_halfpix_hv(y - y_stride, y_stride, z, src_stride,
+ &sse);
+ break;
+ case 2:
+ this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
+ this_mv.as_mv.row += 4;
+ thismse = vfp->svf_halfpix_hv(y - 1, y_stride, z, src_stride, &sse);
+ break;
+ case 3:
+ default:
+ this_mv.as_mv.col += 4;
+ this_mv.as_mv.row += 4;
+ thismse = vfp->svf_halfpix_hv(y, y_stride, z, src_stride, &sse);
+ break;
+ }
+
+ diag = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
+
+ if (diag < bestmse) {
+ *bestmv = this_mv;
+ bestmse = diag;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+// }
+
+
+ // time to check quarter pels.
+ if (bestmv->as_mv.row < startmv.as_mv.row) {
+ y -= y_stride;
+ yrow_movedback = 1;
+ }
+
+ if (bestmv->as_mv.col < startmv.as_mv.col) {
+ y--;
+ ycol_movedback = 1;
+ }
+
+ startmv = *bestmv;
+
+
+
+ // go left then right and check error
+ this_mv.as_mv.row = startmv.as_mv.row;
+
+ if (startmv.as_mv.col & 7) {
+ this_mv.as_mv.col = startmv.as_mv.col - 2;
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
+ } else {
+ this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
+ thismse = vfp->svf(y - 1, y_stride, SP(6), SP(this_mv.as_mv.row), z,
+ src_stride, &sse);
+ }
+
+ left = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
+
+ if (left < bestmse) {
+ *bestmv = this_mv;
+ bestmse = left;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ this_mv.as_mv.col += 4;
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
+ right = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
+ error_per_bit, xd->allow_high_precision_mv);
+
+ if (right < bestmse) {
+ *bestmv = this_mv;
+ bestmse = right;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ // go up then down and check error
+ this_mv.as_mv.col = startmv.as_mv.col;
+
+ if (startmv.as_mv.row & 7) {
+ this_mv.as_mv.row = startmv.as_mv.row - 2;
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
+ } else {
+ this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
+ thismse = vfp->svf(y - y_stride, y_stride, SP(this_mv.as_mv.col), SP(6),
+ z, src_stride, &sse);
+ }
+
+ up = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
+
+ if (up < bestmse) {
+ *bestmv = this_mv;
+ bestmse = up;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ this_mv.as_mv.row += 4;
+ thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
+ down = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
+
+ if (down < bestmse) {
+ *bestmv = this_mv;
+ bestmse = down;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+
+ // now check 1 more diagonal
+ whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
+
+// for(whichdir=0;whichdir<4;whichdir++)
+// {
+ this_mv = startmv;
+
+ switch (whichdir) {
+ case 0:
+
+ if (startmv.as_mv.row & 7) {
+ this_mv.as_mv.row -= 2;
+
+ if (startmv.as_mv.col & 7) {
+ this_mv.as_mv.col -= 2;
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
+ } else {
+ this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
+ thismse = vfp->svf(y - 1, y_stride,
+ SP(6), SP(this_mv.as_mv.row), z, src_stride, &sse);
+ }
+ } else {
+ this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
+
+ if (startmv.as_mv.col & 7) {
+ this_mv.as_mv.col -= 2;
+ thismse = vfp->svf(y - y_stride, y_stride,
+ SP(this_mv.as_mv.col), SP(6), z, src_stride, &sse);
+ } else {
+ this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
+ thismse = vfp->svf(y - y_stride - 1, y_stride,
+ SP(6), SP(6), z, src_stride, &sse);
+ }
+ }
+
+ break;
+ case 1:
+ this_mv.as_mv.col += 2;
+
+ if (startmv.as_mv.row & 7) {
+ this_mv.as_mv.row -= 2;
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
+ } else {
+ this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
+ thismse = vfp->svf(y - y_stride, y_stride,
+ SP(this_mv.as_mv.col), SP(6), z, src_stride, &sse);
+ }
+
+ break;
+ case 2:
+ this_mv.as_mv.row += 2;
+
+ if (startmv.as_mv.col & 7) {
+ this_mv.as_mv.col -= 2;
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
+ } else {
+ this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
+ thismse = vfp->svf(y - 1, y_stride, SP(6), SP(this_mv.as_mv.row), z,
+ src_stride, &sse);
+ }
+
+ break;
+ case 3:
+ this_mv.as_mv.col += 2;
+ this_mv.as_mv.row += 2;
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
+ break;
+ }
+
+ diag = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
+
+ if (diag < bestmse) {
+ *bestmv = this_mv;
+ bestmse = diag;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ if (x->e_mbd.allow_high_precision_mv) {
+ usehp = vp9_use_nmv_hp(&ref_mv->as_mv);
+ } else {
+ usehp = 0;
+ }
+ if (!usehp)
+ return bestmse;
+
+ /* Now do 1/8th pixel */
+ if (bestmv->as_mv.row < orig_mv.as_mv.row && !yrow_movedback) {
+ y -= y_stride;
+ yrow_movedback = 1;
+ }
+
+ if (bestmv->as_mv.col < orig_mv.as_mv.col && !ycol_movedback) {
+ y--;
+ ycol_movedback = 1;
+ }
+
+ startmv = *bestmv;
+
+ // go left then right and check error
+ this_mv.as_mv.row = startmv.as_mv.row;
+
+ if (startmv.as_mv.col & 7) {
+ this_mv.as_mv.col = startmv.as_mv.col - 1;
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
+ } else {
+ this_mv.as_mv.col = (startmv.as_mv.col - 8) | 7;
+ thismse = vfp->svf(y - 1, y_stride, SP(7), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
+ }
+
+ left = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
+
+ if (left < bestmse) {
+ *bestmv = this_mv;
+ bestmse = left;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ this_mv.as_mv.col += 2;
+ thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
+ right = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
+ error_per_bit, xd->allow_high_precision_mv);
+
+ if (right < bestmse) {
+ *bestmv = this_mv;
+ bestmse = right;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ // go up then down and check error
+ this_mv.as_mv.col = startmv.as_mv.col;
+
+ if (startmv.as_mv.row & 7) {
+ this_mv.as_mv.row = startmv.as_mv.row - 1;
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
+ } else {
+ this_mv.as_mv.row = (startmv.as_mv.row - 8) | 7;
+ thismse = vfp->svf(y - y_stride, y_stride,
+ SP(this_mv.as_mv.col), SP(7), z, src_stride, &sse);
+ }
+
+ up = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
+
+ if (up < bestmse) {
+ *bestmv = this_mv;
+ bestmse = up;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ this_mv.as_mv.row += 2;
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
+ down = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
+
+ if (down < bestmse) {
+ *bestmv = this_mv;
+ bestmse = down;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ // now check 1 more diagonal
+ whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
+
+// for(whichdir=0;whichdir<4;whichdir++)
+// {
+ this_mv = startmv;
+
+ switch (whichdir) {
+ case 0:
+
+ if (startmv.as_mv.row & 7) {
+ this_mv.as_mv.row -= 1;
+
+ if (startmv.as_mv.col & 7) {
+ this_mv.as_mv.col -= 1;
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
+ } else {
+ this_mv.as_mv.col = (startmv.as_mv.col - 8) | 7;
+ thismse = vfp->svf(y - 1, y_stride,
+ SP(7), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
+ }
+ } else {
+ this_mv.as_mv.row = (startmv.as_mv.row - 8) | 7;
+
+ if (startmv.as_mv.col & 7) {
+ this_mv.as_mv.col -= 1;
+ thismse = vfp->svf(y - y_stride, y_stride,
+ SP(this_mv.as_mv.col), SP(7), z, src_stride, &sse);
+ } else {
+ this_mv.as_mv.col = (startmv.as_mv.col - 8) | 7;
+ thismse = vfp->svf(y - y_stride - 1, y_stride,
+ SP(7), SP(7), z, src_stride, &sse);
+ }
+ }
+
+ break;
+ case 1:
+ this_mv.as_mv.col += 1;
+
+ if (startmv.as_mv.row & 7) {
+ this_mv.as_mv.row -= 1;
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
+ } else {
+ this_mv.as_mv.row = (startmv.as_mv.row - 8) | 7;
+ thismse = vfp->svf(y - y_stride, y_stride,
+ SP(this_mv.as_mv.col), SP(7), z, src_stride, &sse);
+ }
+
+ break;
+ case 2:
+ this_mv.as_mv.row += 1;
+
+ if (startmv.as_mv.col & 7) {
+ this_mv.as_mv.col -= 1;
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
+ } else {
+ this_mv.as_mv.col = (startmv.as_mv.col - 8) | 7;
+ thismse = vfp->svf(y - 1, y_stride,
+ SP(7), SP(this_mv.as_mv.row), z, src_stride, &sse);
+ }
+
+ break;
+ case 3:
+ this_mv.as_mv.col += 1;
+ this_mv.as_mv.row += 1;
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
+ break;
+ }
+
+ diag = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
+
+ if (diag < bestmse) {
+ *bestmv = this_mv;
+ bestmse = diag;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ return bestmse;
+}
+
+#undef SP
+
+int vp9_find_best_half_pixel_step(MACROBLOCK *x,
+ int_mv *bestmv, int_mv *ref_mv,
+ int error_per_bit,
+ const vp9_variance_fn_ptr_t *vfp,
+ int *mvjcost, int *mvcost[2],
+ int *distortion,
+ unsigned int *sse1) {
+ int bestmse = INT_MAX;
+ int_mv startmv;
+ int_mv this_mv;
+ uint8_t *z = x->plane[0].src.buf;
+ int src_stride = x->plane[0].src.stride;
+ int left, right, up, down, diag;
+ unsigned int sse;
+ int whichdir;
+ int thismse;
+ int y_stride;
+ MACROBLOCKD *xd = &x->e_mbd;
+
+ uint8_t *y = xd->plane[0].pre[0].buf +
+ (bestmv->as_mv.row) * xd->plane[0].pre[0].stride + bestmv->as_mv.col;
+ y_stride = xd->plane[0].pre[0].stride;
+
+ // central mv
+ bestmv->as_mv.row <<= 3;
+ bestmv->as_mv.col <<= 3;
+ startmv = *bestmv;
+
+ // calculate central point error
+ bestmse = vfp->vf(y, y_stride, z, src_stride, sse1);
+ *distortion = bestmse;
+ bestmse += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
+
+ // go left then right and check error
+ this_mv.as_mv.row = startmv.as_mv.row;
+ this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
+ thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, src_stride, &sse);
+ left = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
+
+ if (left < bestmse) {
+ *bestmv = this_mv;
+ bestmse = left;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ this_mv.as_mv.col += 8;
+ thismse = vfp->svf_halfpix_h(y, y_stride, z, src_stride, &sse);
+ right = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
+ error_per_bit, xd->allow_high_precision_mv);
+
+ if (right < bestmse) {
+ *bestmv = this_mv;
+ bestmse = right;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ // go up then down and check error
+ this_mv.as_mv.col = startmv.as_mv.col;
+ this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
+ thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, src_stride, &sse);
+ up = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
+
+ if (up < bestmse) {
+ *bestmv = this_mv;
+ bestmse = up;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ this_mv.as_mv.row += 8;
+ thismse = vfp->svf_halfpix_v(y, y_stride, z, src_stride, &sse);
+ down = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
+
+ if (down < bestmse) {
+ *bestmv = this_mv;
+ bestmse = down;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ // now check 1 more diagonal -
+ whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
+ this_mv = startmv;
+
+ switch (whichdir) {
+ case 0:
+ this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
+ this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
+ thismse = vfp->svf_halfpix_hv(y - 1 - y_stride, y_stride,
+ z, src_stride, &sse);
+ break;
+ case 1:
+ this_mv.as_mv.col += 4;
+ this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
+ thismse = vfp->svf_halfpix_hv(y - y_stride, y_stride,
+ z, src_stride, &sse);
+ break;
+ case 2:
+ this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
+ this_mv.as_mv.row += 4;
+ thismse = vfp->svf_halfpix_hv(y - 1, y_stride, z, src_stride, &sse);
+ break;
+ case 3:
+ default:
+ this_mv.as_mv.col += 4;
+ this_mv.as_mv.row += 4;
+ thismse = vfp->svf_halfpix_hv(y, y_stride, z, src_stride, &sse);
+ break;
+ }
+
+ diag = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
+ xd->allow_high_precision_mv);
+
+ if (diag < bestmse) {
+ *bestmv = this_mv;
+ bestmse = diag;
+ *distortion = thismse;
+ *sse1 = sse;
+ }
+
+ return bestmse;
+}
+
+#define CHECK_BOUNDS(range) \
+ {\
+ all_in = 1;\
+ all_in &= ((br-range) >= x->mv_row_min);\
+ all_in &= ((br+range) <= x->mv_row_max);\
+ all_in &= ((bc-range) >= x->mv_col_min);\
+ all_in &= ((bc+range) <= x->mv_col_max);\
+ }
+
+#define CHECK_POINT \
+ {\
+ if (this_mv.as_mv.col < x->mv_col_min) continue;\
+ if (this_mv.as_mv.col > x->mv_col_max) continue;\
+ if (this_mv.as_mv.row < x->mv_row_min) continue;\
+ if (this_mv.as_mv.row > x->mv_row_max) continue;\
+ }
+
+#define CHECK_BETTER \
+ {\
+ if (thissad < bestsad)\
+ {\
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost, mvsadcost, \
+ sad_per_bit);\
+ if (thissad < bestsad)\
+ {\
+ bestsad = thissad;\
+ best_site = i;\
+ }\
+ }\
+ }
+
+static const MV next_chkpts[6][3] = {
+ {{ -2, 0}, { -1, -2}, {1, -2}},
+ {{ -1, -2}, {1, -2}, {2, 0}},
+ {{1, -2}, {2, 0}, {1, 2}},
+ {{2, 0}, {1, 2}, { -1, 2}},
+ {{1, 2}, { -1, 2}, { -2, 0}},
+ {{ -1, 2}, { -2, 0}, { -1, -2}}
+};
+
+int vp9_hex_search
+(
+ MACROBLOCK *x,
+ int_mv *ref_mv,
+ int_mv *best_mv,
+ int search_param,
+ int sad_per_bit,
+ const vp9_variance_fn_ptr_t *vfp,
+ int *mvjsadcost, int *mvsadcost[2],
+ int *mvjcost, int *mvcost[2],
+ int_mv *center_mv
+) {
+ const MACROBLOCKD* const xd = &x->e_mbd;
+ MV hex[6] = { { -1, -2}, {1, -2}, {2, 0}, {1, 2}, { -1, 2}, { -2, 0} };
+ MV neighbors[4] = {{0, -1}, { -1, 0}, {1, 0}, {0, 1}};
+ int i, j;
+
+ uint8_t *what = x->plane[0].src.buf;
+ int what_stride = x->plane[0].src.stride;
+ int in_what_stride = xd->plane[0].pre[0].stride;
+ int br, bc;
+ int_mv this_mv;
+ unsigned int bestsad = 0x7fffffff;
+ unsigned int thissad;
+ uint8_t *base_offset;
+ uint8_t *this_offset;
+ int k = -1;
+ int all_in;
+ int best_site = -1;
+
+ int_mv fcenter_mv;
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ // adjust ref_mv to make sure it is within MV range
+ clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
+ br = ref_mv->as_mv.row;
+ bc = ref_mv->as_mv.col;
+
+ // Work out the start point for the search
+ base_offset = (uint8_t *)(xd->plane[0].pre[0].buf);
+ this_offset = base_offset + (br * (xd->plane[0].pre[0].stride)) + bc;
+ this_mv.as_mv.row = br;
+ this_mv.as_mv.col = bc;
+ bestsad = vfp->sdf(what, what_stride, this_offset,
+ in_what_stride, 0x7fffffff)
+ + mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost, mvsadcost,
+ sad_per_bit);
+
+ // hex search
+ // j=0
+ CHECK_BOUNDS(2)
+
+ if (all_in) {
+ for (i = 0; i < 6; i++) {
+ this_mv.as_mv.row = br + hex[i].row;
+ this_mv.as_mv.col = bc + hex[i].col;
+ this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) + this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride, bestsad);
+ CHECK_BETTER
+ }
+ } else {
+ for (i = 0; i < 6; i++) {
+ this_mv.as_mv.row = br + hex[i].row;
+ this_mv.as_mv.col = bc + hex[i].col;
+ CHECK_POINT
+ this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) + this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride, bestsad);
+ CHECK_BETTER
+ }
+ }
+
+ if (best_site == -1)
+ goto cal_neighbors;
+ else {
+ br += hex[best_site].row;
+ bc += hex[best_site].col;
+ k = best_site;
+ }
+
+ for (j = 1; j < 127; j++) {
+ best_site = -1;
+ CHECK_BOUNDS(2)
+
+ if (all_in) {
+ for (i = 0; i < 3; i++) {
+ this_mv.as_mv.row = br + next_chkpts[k][i].row;
+ this_mv.as_mv.col = bc + next_chkpts[k][i].col;
+ this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride, bestsad);
+ CHECK_BETTER
+ }
+ } else {
+ for (i = 0; i < 3; i++) {
+ this_mv.as_mv.row = br + next_chkpts[k][i].row;
+ this_mv.as_mv.col = bc + next_chkpts[k][i].col;
+ CHECK_POINT
+ this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride, bestsad);
+ CHECK_BETTER
+ }
+ }
+
+ if (best_site == -1)
+ break;
+ else {
+ br += next_chkpts[k][best_site].row;
+ bc += next_chkpts[k][best_site].col;
+ k += 5 + best_site;
+ if (k >= 12) k -= 12;
+ else if (k >= 6) k -= 6;
+ }
+ }
+
+ // check 4 1-away neighbors
+cal_neighbors:
+ for (j = 0; j < 32; j++) {
+ best_site = -1;
+ CHECK_BOUNDS(1)
+
+ if (all_in) {
+ for (i = 0; i < 4; i++) {
+ this_mv.as_mv.row = br + neighbors[i].row;
+ this_mv.as_mv.col = bc + neighbors[i].col;
+ this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride, bestsad);
+ CHECK_BETTER
+ }
+ } else {
+ for (i = 0; i < 4; i++) {
+ this_mv.as_mv.row = br + neighbors[i].row;
+ this_mv.as_mv.col = bc + neighbors[i].col;
+ CHECK_POINT
+ this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
+ thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride, bestsad);
+ CHECK_BETTER
+ }
+ }
+
+ if (best_site == -1)
+ break;
+ else {
+ br += neighbors[best_site].row;
+ bc += neighbors[best_site].col;
+ }
+ }
+
+ best_mv->as_mv.row = br;
+ best_mv->as_mv.col = bc;
+
+ return bestsad;
+}
+#undef CHECK_BOUNDS
+#undef CHECK_POINT
+#undef CHECK_BETTER
+
+int vp9_diamond_search_sad_c(MACROBLOCK *x,
+ int_mv *ref_mv, int_mv *best_mv,
+ int search_param, int sad_per_bit, int *num00,
+ vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost,
+ int *mvcost[2], int_mv *center_mv) {
+ int i, j, step;
+
+ const MACROBLOCKD* const xd = &x->e_mbd;
+ uint8_t *what = x->plane[0].src.buf;
+ int what_stride = x->plane[0].src.stride;
+ uint8_t *in_what;
+ int in_what_stride = xd->plane[0].pre[0].stride;
+ uint8_t *best_address;
+
+ int tot_steps;
+ int_mv this_mv;
+
+ int bestsad = INT_MAX;
+ int best_site = 0;
+ int last_site = 0;
+
+ int ref_row, ref_col;
+ int this_row_offset, this_col_offset;
+ search_site *ss;
+
+ uint8_t *check_here;
+ int thissad;
+ int_mv fcenter_mv;
+
+ int *mvjsadcost = x->nmvjointsadcost;
+ int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
+
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
+ ref_row = ref_mv->as_mv.row;
+ ref_col = ref_mv->as_mv.col;
+ *num00 = 0;
+ best_mv->as_mv.row = ref_row;
+ best_mv->as_mv.col = ref_col;
+
+ // Work out the start point for the search
+ in_what = (uint8_t *)(xd->plane[0].pre[0].buf +
+ (ref_row * (xd->plane[0].pre[0].stride)) + ref_col);
+ best_address = in_what;
+
+ // Check the starting position
+ bestsad = fn_ptr->sdf(what, what_stride, in_what,
+ in_what_stride, 0x7fffffff)
+ + mvsad_err_cost(best_mv, &fcenter_mv, mvjsadcost, mvsadcost,
+ sad_per_bit);
+
+ // search_param determines the length of the initial step and hence the number of iterations
+ // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
+ ss = &x->ss[search_param * x->searches_per_step];
+ tot_steps = (x->ss_count / x->searches_per_step) - search_param;
+
+ i = 1;
+
+ for (step = 0; step < tot_steps; step++) {
+ for (j = 0; j < x->searches_per_step; j++) {
+ // Trap illegal vectors
+ this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
+ this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
+
+ if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
+ (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
+
+ {
+ check_here = ss[i].offset + best_address;
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad);
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.row = this_row_offset;
+ this_mv.as_mv.col = this_col_offset;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvjsadcost, mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_site = i;
+ }
+ }
+ }
+
+ i++;
+ }
+
+ if (best_site != last_site) {
+ best_mv->as_mv.row += ss[best_site].mv.row;
+ best_mv->as_mv.col += ss[best_site].mv.col;
+ best_address += ss[best_site].offset;
+ last_site = best_site;
+ } else if (best_address == in_what)
+ (*num00)++;
+ }
+
+ this_mv.as_mv.row = best_mv->as_mv.row << 3;
+ this_mv.as_mv.col = best_mv->as_mv.col << 3;
+
+ if (bestsad == INT_MAX)
+ return INT_MAX;
+
+ return
+ fn_ptr->vf(what, what_stride, best_address, in_what_stride,
+ (unsigned int *)(&thissad)) +
+ mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit,
+ xd->allow_high_precision_mv);
+}
+
+int vp9_diamond_search_sadx4(MACROBLOCK *x,
+ int_mv *ref_mv, int_mv *best_mv, int search_param,
+ int sad_per_bit, int *num00,
+ vp9_variance_fn_ptr_t *fn_ptr,
+ int *mvjcost, int *mvcost[2], int_mv *center_mv) {
+ int i, j, step;
+
+ const MACROBLOCKD* const xd = &x->e_mbd;
+ uint8_t *what = x->plane[0].src.buf;
+ int what_stride = x->plane[0].src.stride;
+ uint8_t *in_what;
+ int in_what_stride = xd->plane[0].pre[0].stride;
+ uint8_t *best_address;
+
+ int tot_steps;
+ int_mv this_mv;
+
+ unsigned int bestsad = INT_MAX;
+ int best_site = 0;
+ int last_site = 0;
+
+ int ref_row;
+ int ref_col;
+ int this_row_offset;
+ int this_col_offset;
+ search_site *ss;
+
+ uint8_t *check_here;
+ unsigned int thissad;
+ int_mv fcenter_mv;
+
+ int *mvjsadcost = x->nmvjointsadcost;
+ int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
+
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
+ ref_row = ref_mv->as_mv.row;
+ ref_col = ref_mv->as_mv.col;
+ *num00 = 0;
+ best_mv->as_mv.row = ref_row;
+ best_mv->as_mv.col = ref_col;
+
+ // Work out the start point for the search
+ in_what = (uint8_t *)(xd->plane[0].pre[0].buf +
+ (ref_row * (xd->plane[0].pre[0].stride)) + ref_col);
+ best_address = in_what;
+
+ // Check the starting position
+ bestsad = fn_ptr->sdf(what, what_stride,
+ in_what, in_what_stride, 0x7fffffff)
+ + mvsad_err_cost(best_mv, &fcenter_mv, mvjsadcost, mvsadcost,
+ sad_per_bit);
+
+ // search_param determines the length of the initial step and hence the number of iterations
+ // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
+ ss = &x->ss[search_param * x->searches_per_step];
+ tot_steps = (x->ss_count / x->searches_per_step) - search_param;
+
+ i = 1;
+
+ for (step = 0; step < tot_steps; step++) {
+ int all_in = 1, t;
+
+ // To know if all neighbor points are within the bounds, 4 bounds checking are enough instead of
+ // checking 4 bounds for each points.
+ all_in &= ((best_mv->as_mv.row + ss[i].mv.row) > x->mv_row_min);
+ all_in &= ((best_mv->as_mv.row + ss[i + 1].mv.row) < x->mv_row_max);
+ all_in &= ((best_mv->as_mv.col + ss[i + 2].mv.col) > x->mv_col_min);
+ all_in &= ((best_mv->as_mv.col + ss[i + 3].mv.col) < x->mv_col_max);
+
+ if (all_in) {
+ unsigned int sad_array[4];
+
+ for (j = 0; j < x->searches_per_step; j += 4) {
+ unsigned char const *block_offset[4];
+
+ for (t = 0; t < 4; t++)
+ block_offset[t] = ss[i + t].offset + best_address;
+
+ fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride,
+ sad_array);
+
+ for (t = 0; t < 4; t++, i++) {
+ if (sad_array[t] < bestsad) {
+ this_mv.as_mv.row = best_mv->as_mv.row + ss[i].mv.row;
+ this_mv.as_mv.col = best_mv->as_mv.col + ss[i].mv.col;
+ sad_array[t] += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvjsadcost, mvsadcost, sad_per_bit);
+
+ if (sad_array[t] < bestsad) {
+ bestsad = sad_array[t];
+ best_site = i;
+ }
+ }
+ }
+ }
+ } else {
+ for (j = 0; j < x->searches_per_step; j++) {
+ // Trap illegal vectors
+ this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
+ this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
+
+ if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
+ (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max)) {
+ check_here = ss[i].offset + best_address;
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad);
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.row = this_row_offset;
+ this_mv.as_mv.col = this_col_offset;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvjsadcost, mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_site = i;
+ }
+ }
+ }
+ i++;
+ }
+ }
+
+ if (best_site != last_site) {
+ best_mv->as_mv.row += ss[best_site].mv.row;
+ best_mv->as_mv.col += ss[best_site].mv.col;
+ best_address += ss[best_site].offset;
+ last_site = best_site;
+ } else if (best_address == in_what)
+ (*num00)++;
+ }
+
+ this_mv.as_mv.row = best_mv->as_mv.row << 3;
+ this_mv.as_mv.col = best_mv->as_mv.col << 3;
+
+ if (bestsad == INT_MAX)
+ return INT_MAX;
+
+ return
+ fn_ptr->vf(what, what_stride, best_address, in_what_stride,
+ (unsigned int *)(&thissad)) +
+ mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit,
+ xd->allow_high_precision_mv);
+}
+
+/* do_refine: If last step (1-away) of n-step search doesn't pick the center
+ point as the best match, we will do a final 1-away diamond
+ refining search */
+int vp9_full_pixel_diamond(VP9_COMP *cpi, MACROBLOCK *x,
+ int_mv *mvp_full, int step_param,
+ int sadpb, int further_steps,
+ int do_refine, vp9_variance_fn_ptr_t *fn_ptr,
+ int_mv *ref_mv, int_mv *dst_mv) {
+ int_mv temp_mv;
+ int thissme, n, num00;
+ int bestsme = cpi->diamond_search_sad(x, mvp_full, &temp_mv,
+ step_param, sadpb, &num00,
+ fn_ptr, x->nmvjointcost,
+ x->mvcost, ref_mv);
+ dst_mv->as_int = temp_mv.as_int;
+
+ n = num00;
+ num00 = 0;
+
+ /* If there won't be more n-step search, check to see if refining search is needed. */
+ if (n > further_steps)
+ do_refine = 0;
+
+ while (n < further_steps) {
+ n++;
+
+ if (num00)
+ num00--;
+ else {
+ thissme = cpi->diamond_search_sad(x, mvp_full, &temp_mv,
+ step_param + n, sadpb, &num00,
+ fn_ptr, x->nmvjointcost, x->mvcost,
+ ref_mv);
+
+ /* check to see if refining search is needed. */
+ if (num00 > (further_steps - n))
+ do_refine = 0;
+
+ if (thissme < bestsme) {
+ bestsme = thissme;
+ dst_mv->as_int = temp_mv.as_int;
+ }
+ }
+ }
+
+ /* final 1-away diamond refining search */
+ if (do_refine == 1) {
+ int search_range = 8;
+ int_mv best_mv;
+ best_mv.as_int = dst_mv->as_int;
+ thissme = cpi->refining_search_sad(x, &best_mv, sadpb, search_range,
+ fn_ptr, x->nmvjointcost, x->mvcost,
+ ref_mv);
+
+ if (thissme < bestsme) {
+ bestsme = thissme;
+ dst_mv->as_int = best_mv.as_int;
+ }
+ }
+ return bestsme;
+}
+
+int vp9_full_search_sad_c(MACROBLOCK *x, int_mv *ref_mv,
+ int sad_per_bit, int distance,
+ vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost,
+ int *mvcost[2],
+ int_mv *center_mv, int n) {
+ const MACROBLOCKD* const xd = &x->e_mbd;
+ uint8_t *what = x->plane[0].src.buf;
+ int what_stride = x->plane[0].src.stride;
+ uint8_t *in_what;
+ int in_what_stride = xd->plane[0].pre[0].stride;
+ int mv_stride = xd->plane[0].pre[0].stride;
+ uint8_t *bestaddress;
+ int_mv *best_mv = &x->e_mbd.mode_info_context->bmi[n].as_mv[0];
+ int_mv this_mv;
+ int bestsad = INT_MAX;
+ int r, c;
+
+ uint8_t *check_here;
+ int thissad;
+
+ int ref_row = ref_mv->as_mv.row;
+ int ref_col = ref_mv->as_mv.col;
+
+ int row_min = ref_row - distance;
+ int row_max = ref_row + distance;
+ int col_min = ref_col - distance;
+ int col_max = ref_col + distance;
+ int_mv fcenter_mv;
+
+ int *mvjsadcost = x->nmvjointsadcost;
+ int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
+
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ // Work out the mid point for the search
+ in_what = xd->plane[0].pre[0].buf;
+ bestaddress = in_what + (ref_row * xd->plane[0].pre[0].stride) + ref_col;
+
+ best_mv->as_mv.row = ref_row;
+ best_mv->as_mv.col = ref_col;
+
+ // Baseline value at the centre
+ bestsad = fn_ptr->sdf(what, what_stride, bestaddress,
+ in_what_stride, 0x7fffffff)
+ + mvsad_err_cost(best_mv, &fcenter_mv, mvjsadcost, mvsadcost,
+ sad_per_bit);
+
+ // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
+ if (col_min < x->mv_col_min)
+ col_min = x->mv_col_min;
+
+ if (col_max > x->mv_col_max)
+ col_max = x->mv_col_max;
+
+ if (row_min < x->mv_row_min)
+ row_min = x->mv_row_min;
+
+ if (row_max > x->mv_row_max)
+ row_max = x->mv_row_max;
+
+ for (r = row_min; r < row_max; r++) {
+ this_mv.as_mv.row = r;
+ check_here = r * mv_stride + in_what + col_min;
+
+ for (c = col_min; c < col_max; c++) {
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad);
+
+ this_mv.as_mv.col = c;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvjsadcost, mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_mv->as_mv.row = r;
+ best_mv->as_mv.col = c;
+ bestaddress = check_here;
+ }
+
+ check_here++;
+ }
+ }
+
+ this_mv.as_mv.row = best_mv->as_mv.row << 3;
+ this_mv.as_mv.col = best_mv->as_mv.col << 3;
+
+ if (bestsad < INT_MAX)
+ return
+ fn_ptr->vf(what, what_stride, bestaddress, in_what_stride,
+ (unsigned int *)(&thissad)) +
+ mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit,
+ xd->allow_high_precision_mv);
+ else
+ return INT_MAX;
+}
+
+int vp9_full_search_sadx3(MACROBLOCK *x, int_mv *ref_mv,
+ int sad_per_bit, int distance,
+ vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost,
+ int *mvcost[2], int_mv *center_mv, int n) {
+ const MACROBLOCKD* const xd = &x->e_mbd;
+ uint8_t *what = x->plane[0].src.buf;
+ int what_stride = x->plane[0].src.stride;
+ uint8_t *in_what;
+ int in_what_stride = xd->plane[0].pre[0].stride;
+ int mv_stride = xd->plane[0].pre[0].stride;
+ uint8_t *bestaddress;
+ int_mv *best_mv = &x->e_mbd.mode_info_context->bmi[n].as_mv[0];
+ int_mv this_mv;
+ unsigned int bestsad = INT_MAX;
+ int r, c;
+
+ uint8_t *check_here;
+ unsigned int thissad;
+
+ int ref_row = ref_mv->as_mv.row;
+ int ref_col = ref_mv->as_mv.col;
+
+ int row_min = ref_row - distance;
+ int row_max = ref_row + distance;
+ int col_min = ref_col - distance;
+ int col_max = ref_col + distance;
+
+ unsigned int sad_array[3];
+ int_mv fcenter_mv;
+
+ int *mvjsadcost = x->nmvjointsadcost;
+ int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
+
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ // Work out the mid point for the search
+ in_what = xd->plane[0].pre[0].buf;
+ bestaddress = in_what + (ref_row * xd->plane[0].pre[0].stride) + ref_col;
+
+ best_mv->as_mv.row = ref_row;
+ best_mv->as_mv.col = ref_col;
+
+ // Baseline value at the centre
+ bestsad = fn_ptr->sdf(what, what_stride,
+ bestaddress, in_what_stride, 0x7fffffff)
+ + mvsad_err_cost(best_mv, &fcenter_mv, mvjsadcost, mvsadcost,
+ sad_per_bit);
+
+ // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
+ if (col_min < x->mv_col_min)
+ col_min = x->mv_col_min;
+
+ if (col_max > x->mv_col_max)
+ col_max = x->mv_col_max;
+
+ if (row_min < x->mv_row_min)
+ row_min = x->mv_row_min;
+
+ if (row_max > x->mv_row_max)
+ row_max = x->mv_row_max;
+
+ for (r = row_min; r < row_max; r++) {
+ this_mv.as_mv.row = r;
+ check_here = r * mv_stride + in_what + col_min;
+ c = col_min;
+
+ while ((c + 2) < col_max) {
+ int i;
+
+ fn_ptr->sdx3f(what, what_stride, check_here, in_what_stride, sad_array);
+
+ for (i = 0; i < 3; i++) {
+ thissad = sad_array[i];
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.col = c;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvjsadcost, mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_mv->as_mv.row = r;
+ best_mv->as_mv.col = c;
+ bestaddress = check_here;
+ }
+ }
+
+ check_here++;
+ c++;
+ }
+ }
+
+ while (c < col_max) {
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad);
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.col = c;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvjsadcost, mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_mv->as_mv.row = r;
+ best_mv->as_mv.col = c;
+ bestaddress = check_here;
+ }
+ }
+
+ check_here++;
+ c++;
+ }
+
+ }
+
+ this_mv.as_mv.row = best_mv->as_mv.row << 3;
+ this_mv.as_mv.col = best_mv->as_mv.col << 3;
+
+ if (bestsad < INT_MAX)
+ return
+ fn_ptr->vf(what, what_stride, bestaddress, in_what_stride,
+ (unsigned int *)(&thissad)) +
+ mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit,
+ xd->allow_high_precision_mv);
+ else
+ return INT_MAX;
+}
+
+int vp9_full_search_sadx8(MACROBLOCK *x, int_mv *ref_mv,
+ int sad_per_bit, int distance,
+ vp9_variance_fn_ptr_t *fn_ptr,
+ int *mvjcost, int *mvcost[2],
+ int_mv *center_mv, int n) {
+ const MACROBLOCKD* const xd = &x->e_mbd;
+ uint8_t *what = x->plane[0].src.buf;
+ int what_stride = x->plane[0].src.stride;
+ uint8_t *in_what;
+ int in_what_stride = xd->plane[0].pre[0].stride;
+ int mv_stride = xd->plane[0].pre[0].stride;
+ uint8_t *bestaddress;
+ int_mv *best_mv = &x->e_mbd.mode_info_context->bmi[n].as_mv[0];
+ int_mv this_mv;
+ unsigned int bestsad = INT_MAX;
+ int r, c;
+
+ uint8_t *check_here;
+ unsigned int thissad;
+
+ int ref_row = ref_mv->as_mv.row;
+ int ref_col = ref_mv->as_mv.col;
+
+ int row_min = ref_row - distance;
+ int row_max = ref_row + distance;
+ int col_min = ref_col - distance;
+ int col_max = ref_col + distance;
+
+ DECLARE_ALIGNED_ARRAY(16, uint32_t, sad_array8, 8);
+ unsigned int sad_array[3];
+ int_mv fcenter_mv;
+
+ int *mvjsadcost = x->nmvjointsadcost;
+ int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
+
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ // Work out the mid point for the search
+ in_what = xd->plane[0].pre[0].buf;
+ bestaddress = in_what + (ref_row * xd->plane[0].pre[0].stride) + ref_col;
+
+ best_mv->as_mv.row = ref_row;
+ best_mv->as_mv.col = ref_col;
+
+ // Baseline value at the centre
+ bestsad = fn_ptr->sdf(what, what_stride,
+ bestaddress, in_what_stride, 0x7fffffff)
+ + mvsad_err_cost(best_mv, &fcenter_mv, mvjsadcost, mvsadcost,
+ sad_per_bit);
+
+ // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
+ if (col_min < x->mv_col_min)
+ col_min = x->mv_col_min;
+
+ if (col_max > x->mv_col_max)
+ col_max = x->mv_col_max;
+
+ if (row_min < x->mv_row_min)
+ row_min = x->mv_row_min;
+
+ if (row_max > x->mv_row_max)
+ row_max = x->mv_row_max;
+
+ for (r = row_min; r < row_max; r++) {
+ this_mv.as_mv.row = r;
+ check_here = r * mv_stride + in_what + col_min;
+ c = col_min;
+
+ while ((c + 7) < col_max) {
+ int i;
+
+ fn_ptr->sdx8f(what, what_stride, check_here, in_what_stride, sad_array8);
+
+ for (i = 0; i < 8; i++) {
+ thissad = (unsigned int)sad_array8[i];
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.col = c;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvjsadcost, mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_mv->as_mv.row = r;
+ best_mv->as_mv.col = c;
+ bestaddress = check_here;
+ }
+ }
+
+ check_here++;
+ c++;
+ }
+ }
+
+ while ((c + 2) < col_max) {
+ int i;
+
+ fn_ptr->sdx3f(what, what_stride, check_here, in_what_stride, sad_array);
+
+ for (i = 0; i < 3; i++) {
+ thissad = sad_array[i];
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.col = c;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvjsadcost, mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_mv->as_mv.row = r;
+ best_mv->as_mv.col = c;
+ bestaddress = check_here;
+ }
+ }
+
+ check_here++;
+ c++;
+ }
+ }
+
+ while (c < col_max) {
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad);
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.col = c;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
+ mvjsadcost, mvsadcost, sad_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_mv->as_mv.row = r;
+ best_mv->as_mv.col = c;
+ bestaddress = check_here;
+ }
+ }
+
+ check_here++;
+ c++;
+ }
+ }
+
+ this_mv.as_mv.row = best_mv->as_mv.row << 3;
+ this_mv.as_mv.col = best_mv->as_mv.col << 3;
+
+ if (bestsad < INT_MAX)
+ return
+ fn_ptr->vf(what, what_stride, bestaddress, in_what_stride,
+ (unsigned int *)(&thissad)) +
+ mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit,
+ xd->allow_high_precision_mv);
+ else
+ return INT_MAX;
+}
+int vp9_refining_search_sad_c(MACROBLOCK *x,
+ int_mv *ref_mv, int error_per_bit,
+ int search_range, vp9_variance_fn_ptr_t *fn_ptr,
+ int *mvjcost, int *mvcost[2], int_mv *center_mv) {
+ const MACROBLOCKD* const xd = &x->e_mbd;
+ MV neighbors[4] = {{ -1, 0}, {0, -1}, {0, 1}, {1, 0}};
+ int i, j;
+ int this_row_offset, this_col_offset;
+
+ int what_stride = x->plane[0].src.stride;
+ int in_what_stride = xd->plane[0].pre[0].stride;
+ uint8_t *what = x->plane[0].src.buf;
+ uint8_t *best_address = xd->plane[0].pre[0].buf +
+ (ref_mv->as_mv.row * xd->plane[0].pre[0].stride) +
+ ref_mv->as_mv.col;
+ uint8_t *check_here;
+ unsigned int thissad;
+ int_mv this_mv;
+ unsigned int bestsad = INT_MAX;
+ int_mv fcenter_mv;
+
+ int *mvjsadcost = x->nmvjointsadcost;
+ int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
+
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride, 0x7fffffff) +
+ mvsad_err_cost(ref_mv, &fcenter_mv, mvjsadcost, mvsadcost, error_per_bit);
+
+ for (i = 0; i < search_range; i++) {
+ int best_site = -1;
+
+ for (j = 0; j < 4; j++) {
+ this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
+ this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
+
+ if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
+ (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max)) {
+ check_here = (neighbors[j].row) * in_what_stride + neighbors[j].col + best_address;
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad);
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.row = this_row_offset;
+ this_mv.as_mv.col = this_col_offset;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost,
+ mvsadcost, error_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_site = j;
+ }
+ }
+ }
+ }
+
+ if (best_site == -1)
+ break;
+ else {
+ ref_mv->as_mv.row += neighbors[best_site].row;
+ ref_mv->as_mv.col += neighbors[best_site].col;
+ best_address += (neighbors[best_site].row) * in_what_stride + neighbors[best_site].col;
+ }
+ }
+
+ this_mv.as_mv.row = ref_mv->as_mv.row << 3;
+ this_mv.as_mv.col = ref_mv->as_mv.col << 3;
+
+ if (bestsad < INT_MAX)
+ return
+ fn_ptr->vf(what, what_stride, best_address, in_what_stride,
+ (unsigned int *)(&thissad)) +
+ mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit,
+ xd->allow_high_precision_mv);
+ else
+ return INT_MAX;
+}
+
+int vp9_refining_search_sadx4(MACROBLOCK *x,
+ int_mv *ref_mv, int error_per_bit,
+ int search_range, vp9_variance_fn_ptr_t *fn_ptr,
+ int *mvjcost, int *mvcost[2], int_mv *center_mv) {
+ const MACROBLOCKD* const xd = &x->e_mbd;
+ MV neighbors[4] = {{ -1, 0}, {0, -1}, {0, 1}, {1, 0}};
+ int i, j;
+ int this_row_offset, this_col_offset;
+
+ int what_stride = x->plane[0].src.stride;
+ int in_what_stride = xd->plane[0].pre[0].stride;
+ uint8_t *what = x->plane[0].src.buf;
+ uint8_t *best_address = xd->plane[0].pre[0].buf +
+ (ref_mv->as_mv.row * xd->plane[0].pre[0].stride) +
+ ref_mv->as_mv.col;
+ uint8_t *check_here;
+ unsigned int thissad;
+ int_mv this_mv;
+ unsigned int bestsad = INT_MAX;
+ int_mv fcenter_mv;
+
+ int *mvjsadcost = x->nmvjointsadcost;
+ int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
+
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride, 0x7fffffff) +
+ mvsad_err_cost(ref_mv, &fcenter_mv, mvjsadcost, mvsadcost, error_per_bit);
+
+ for (i = 0; i < search_range; i++) {
+ int best_site = -1;
+ int all_in = ((ref_mv->as_mv.row - 1) > x->mv_row_min) &
+ ((ref_mv->as_mv.row + 1) < x->mv_row_max) &
+ ((ref_mv->as_mv.col - 1) > x->mv_col_min) &
+ ((ref_mv->as_mv.col + 1) < x->mv_col_max);
+
+ if (all_in) {
+ unsigned int sad_array[4];
+ unsigned char const *block_offset[4];
+ block_offset[0] = best_address - in_what_stride;
+ block_offset[1] = best_address - 1;
+ block_offset[2] = best_address + 1;
+ block_offset[3] = best_address + in_what_stride;
+
+ fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, sad_array);
+
+ for (j = 0; j < 4; j++) {
+ if (sad_array[j] < bestsad) {
+ this_mv.as_mv.row = ref_mv->as_mv.row + neighbors[j].row;
+ this_mv.as_mv.col = ref_mv->as_mv.col + neighbors[j].col;
+ sad_array[j] += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost,
+ mvsadcost, error_per_bit);
+
+ if (sad_array[j] < bestsad) {
+ bestsad = sad_array[j];
+ best_site = j;
+ }
+ }
+ }
+ } else {
+ for (j = 0; j < 4; j++) {
+ this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
+ this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
+
+ if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
+ (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max)) {
+ check_here = (neighbors[j].row) * in_what_stride + neighbors[j].col + best_address;
+ thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad);
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.row = this_row_offset;
+ this_mv.as_mv.col = this_col_offset;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost,
+ mvsadcost, error_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_site = j;
+ }
+ }
+ }
+ }
+ }
+
+ if (best_site == -1)
+ break;
+ else {
+ ref_mv->as_mv.row += neighbors[best_site].row;
+ ref_mv->as_mv.col += neighbors[best_site].col;
+ best_address += (neighbors[best_site].row) * in_what_stride + neighbors[best_site].col;
+ }
+ }
+
+ this_mv.as_mv.row = ref_mv->as_mv.row << 3;
+ this_mv.as_mv.col = ref_mv->as_mv.col << 3;
+
+ if (bestsad < INT_MAX)
+ return
+ fn_ptr->vf(what, what_stride, best_address, in_what_stride,
+ (unsigned int *)(&thissad)) +
+ mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit,
+ xd->allow_high_precision_mv);
+ else
+ return INT_MAX;
+}
+
+/* This function is called when we do joint motion search in comp_inter_inter
+ * mode.
+ */
+int vp9_refining_search_8p_c(MACROBLOCK *x,
+ int_mv *ref_mv, int error_per_bit,
+ int search_range, vp9_variance_fn_ptr_t *fn_ptr,
+ int *mvjcost, int *mvcost[2], int_mv *center_mv,
+ const uint8_t *second_pred, int w, int h) {
+ const MACROBLOCKD* const xd = &x->e_mbd;
+ MV neighbors[8] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0},
+ {-1, -1}, {1, -1}, {-1, 1}, {1, 1}};
+ int i, j;
+ int this_row_offset, this_col_offset;
+
+ int what_stride = x->plane[0].src.stride;
+ int in_what_stride = xd->plane[0].pre[0].stride;
+ uint8_t *what = x->plane[0].src.buf;
+ uint8_t *best_address = xd->plane[0].pre[0].buf +
+ (ref_mv->as_mv.row * xd->plane[0].pre[0].stride) +
+ ref_mv->as_mv.col;
+ uint8_t *check_here;
+ unsigned int thissad;
+ int_mv this_mv;
+ unsigned int bestsad = INT_MAX;
+ int_mv fcenter_mv;
+
+ int *mvjsadcost = x->nmvjointsadcost;
+ int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
+
+ /* Compound pred buffer */
+ uint8_t *comp_pred = vpx_memalign(16, w * h * sizeof(uint8_t));
+
+ fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
+ fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+
+ /* Get compound pred by averaging two pred blocks. */
+ comp_avg_pred(comp_pred, second_pred, w, h, best_address, in_what_stride);
+
+ bestsad = fn_ptr->sdf(what, what_stride, comp_pred, w, 0x7fffffff) +
+ mvsad_err_cost(ref_mv, &fcenter_mv, mvjsadcost, mvsadcost, error_per_bit);
+
+ for (i = 0; i < search_range; i++) {
+ int best_site = -1;
+
+ for (j = 0; j < 8; j++) {
+ this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
+ this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
+
+ if ((this_col_offset > x->mv_col_min) &&
+ (this_col_offset < x->mv_col_max) &&
+ (this_row_offset > x->mv_row_min) &&
+ (this_row_offset < x->mv_row_max)) {
+ check_here = (neighbors[j].row) * in_what_stride + neighbors[j].col +
+ best_address;
+
+ /* Get compound block and use it to calculate SAD. */
+ comp_avg_pred(comp_pred, second_pred, w, h, check_here,
+ in_what_stride);
+ thissad = fn_ptr->sdf(what, what_stride, comp_pred, w, bestsad);
+
+ if (thissad < bestsad) {
+ this_mv.as_mv.row = this_row_offset;
+ this_mv.as_mv.col = this_col_offset;
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost,
+ mvsadcost, error_per_bit);
+
+ if (thissad < bestsad) {
+ bestsad = thissad;
+ best_site = j;
+ }
+ }
+ }
+ }
+
+ if (best_site == -1) {
+ break;
+ } else {
+ ref_mv->as_mv.row += neighbors[best_site].row;
+ ref_mv->as_mv.col += neighbors[best_site].col;
+ best_address += (neighbors[best_site].row) * in_what_stride +
+ neighbors[best_site].col;
+ }
+ }
+
+ this_mv.as_mv.row = ref_mv->as_mv.row << 3;
+ this_mv.as_mv.col = ref_mv->as_mv.col << 3;
+
+ if (bestsad < INT_MAX) {
+ int besterr;
+ comp_avg_pred(comp_pred, second_pred, w, h, best_address, in_what_stride);
+ besterr = fn_ptr->vf(what, what_stride, comp_pred, w,
+ (unsigned int *)(&thissad)) +
+ mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit,
+ xd->allow_high_precision_mv);
+ vpx_free(comp_pred);
+ return besterr;
+ } else {
+ vpx_free(comp_pred);
+ return INT_MAX;
+ }
+}
diff --git a/libvpx/vp9/encoder/vp9_mcomp.h b/libvpx/vp9/encoder/vp9_mcomp.h
new file mode 100644
index 0000000..28b2efd
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_mcomp.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_VP9_MCOMP_H_
+#define VP9_ENCODER_VP9_MCOMP_H_
+
+#include "vp9/encoder/vp9_block.h"
+#include "vp9/encoder/vp9_variance.h"
+
+// The maximum number of steps in a step search given the largest
+// allowed initial step
+#define MAX_MVSEARCH_STEPS 11
+// Max full pel mv specified in 1 pel units
+#define MAX_FULL_PEL_VAL ((1 << (MAX_MVSEARCH_STEPS)) - 1)
+// Maximum size of the first step in full pel units
+#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS-1))
+
+void vp9_clamp_mv_min_max(MACROBLOCK *x, int_mv *ref_mv);
+int vp9_init_search_range(int width, int height);
+
+int vp9_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvjcost,
+ int *mvcost[2], int weight, int ishp);
+void vp9_init_dsmotion_compensation(MACROBLOCK *x, int stride);
+void vp9_init3smotion_compensation(MACROBLOCK *x, int stride);
+
+// Runs sequence of diamond searches in smaller steps for RD
+struct VP9_COMP;
+int vp9_full_pixel_diamond(struct VP9_COMP *cpi, MACROBLOCK *x,
+ int_mv *mvp_full, int step_param,
+ int sadpb, int further_steps, int do_refine,
+ vp9_variance_fn_ptr_t *fn_ptr,
+ int_mv *ref_mv, int_mv *dst_mv);
+
+int vp9_hex_search(MACROBLOCK *x,
+ int_mv *ref_mv, int_mv *best_mv,
+ int search_param, int error_per_bit,
+ const vp9_variance_fn_ptr_t *vf,
+ int *mvjsadcost, int *mvsadcost[2],
+ int *mvjcost, int *mvcost[2],
+ int_mv *center_mv);
+
+typedef int (fractional_mv_step_fp) (MACROBLOCK *x, int_mv
+ *bestmv, int_mv *ref_mv, int error_per_bit, const vp9_variance_fn_ptr_t *vfp,
+ int *mvjcost, int *mvcost[2], int *distortion, unsigned int *sse);
+extern fractional_mv_step_fp vp9_find_best_sub_pixel_step_iteratively;
+extern fractional_mv_step_fp vp9_find_best_sub_pixel_step;
+extern fractional_mv_step_fp vp9_find_best_half_pixel_step;
+
+typedef int (*vp9_full_search_fn_t)(MACROBLOCK *x,
+ int_mv *ref_mv, int sad_per_bit,
+ int distance, vp9_variance_fn_ptr_t *fn_ptr,
+ int *mvjcost, int *mvcost[2],
+ int_mv *center_mv, int n);
+
+typedef int (*vp9_refining_search_fn_t)(MACROBLOCK *x,
+ int_mv *ref_mv, int sad_per_bit,
+ int distance,
+ vp9_variance_fn_ptr_t *fn_ptr,
+ int *mvjcost, int *mvcost[2],
+ int_mv *center_mv);
+
+typedef int (*vp9_diamond_search_fn_t)(MACROBLOCK *x,
+ int_mv *ref_mv, int_mv *best_mv,
+ int search_param, int sad_per_bit,
+ int *num00,
+ vp9_variance_fn_ptr_t *fn_ptr,
+ int *mvjcost, int *mvcost[2],
+ int_mv *center_mv);
+
+int vp9_find_best_sub_pixel_comp(MACROBLOCK *x,
+ int_mv *bestmv, int_mv *ref_mv,
+ int error_per_bit,
+ const vp9_variance_fn_ptr_t *vfp,
+ int *mvjcost, int *mvcost[2],
+ int *distortion, unsigned int *sse1,
+ const uint8_t *second_pred,
+ int w, int h);
+
+int vp9_refining_search_8p_c(MACROBLOCK *x,
+ int_mv *ref_mv, int error_per_bit,
+ int search_range, vp9_variance_fn_ptr_t *fn_ptr,
+ int *mvjcost, int *mvcost[2],
+ int_mv *center_mv, const uint8_t *second_pred,
+ int w, int h);
+#endif // VP9_ENCODER_VP9_MCOMP_H_
diff --git a/libvpx/vp9/encoder/vp9_modecosts.c b/libvpx/vp9/encoder/vp9_modecosts.c
new file mode 100644
index 0000000..f2e4ce4
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_modecosts.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vp9/encoder/vp9_treewriter.h"
+#include "vp9/common/vp9_entropymode.h"
+
+
+void vp9_init_mode_costs(VP9_COMP *c) {
+ VP9_COMMON *x = &c->common;
+ const vp9_tree_p KT = vp9_intra_mode_tree;
+ int i, j;
+
+ for (i = 0; i < VP9_INTRA_MODES; i++) {
+ for (j = 0; j < VP9_INTRA_MODES; j++) {
+ vp9_cost_tokens((int *)c->mb.y_mode_costs[i][j],
+ x->kf_y_mode_prob[i][j], KT);
+ }
+ }
+
+ // TODO(rbultje) separate tables for superblock costing?
+ vp9_cost_tokens(c->mb.mbmode_cost, x->fc.y_mode_prob[1],
+ vp9_intra_mode_tree);
+ vp9_cost_tokens(c->mb.intra_uv_mode_cost[1],
+ x->fc.uv_mode_prob[VP9_INTRA_MODES - 1], vp9_intra_mode_tree);
+ vp9_cost_tokens(c->mb.intra_uv_mode_cost[0],
+ x->kf_uv_mode_prob[VP9_INTRA_MODES - 1], vp9_intra_mode_tree);
+
+ for (i = 0; i <= VP9_SWITCHABLE_FILTERS; ++i)
+ vp9_cost_tokens((int *)c->mb.switchable_interp_costs[i],
+ x->fc.switchable_interp_prob[i],
+ vp9_switchable_interp_tree);
+}
diff --git a/libvpx/vp9/encoder/vp9_modecosts.h b/libvpx/vp9/encoder/vp9_modecosts.h
new file mode 100644
index 0000000..f43033e
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_modecosts.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_VP9_MODECOSTS_H_
+#define VP9_ENCODER_VP9_MODECOSTS_H_
+
+void vp9_init_mode_costs(VP9_COMP *x);
+
+#endif // VP9_ENCODER_VP9_MODECOSTS_H_
diff --git a/libvpx/vp9/encoder/vp9_onyx_if.c b/libvpx/vp9/encoder/vp9_onyx_if.c
new file mode 100644
index 0000000..6a14df4
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_onyx_if.c
@@ -0,0 +1,3932 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vpx_config.h"
+#include "vp9/common/vp9_filter.h"
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/common/vp9_reconinter.h"
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vp9/common/vp9_systemdependent.h"
+#include "vp9/encoder/vp9_quantize.h"
+#include "vp9/common/vp9_alloccommon.h"
+#include "vp9/encoder/vp9_mcomp.h"
+#include "vp9/encoder/vp9_firstpass.h"
+#include "vp9/encoder/vp9_psnr.h"
+#include "vpx_scale/vpx_scale.h"
+#include "vp9/common/vp9_extend.h"
+#include "vp9/encoder/vp9_ratectrl.h"
+#include "vp9/common/vp9_quant_common.h"
+#include "vp9/common/vp9_tile_common.h"
+#include "vp9/encoder/vp9_segmentation.h"
+#include "./vp9_rtcd.h"
+#include "./vpx_scale_rtcd.h"
+#if CONFIG_POSTPROC
+#include "vp9/common/vp9_postproc.h"
+#endif
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/vpx_timer.h"
+
+#include "vp9/common/vp9_seg_common.h"
+#include "vp9/encoder/vp9_mbgraph.h"
+#include "vp9/common/vp9_pred_common.h"
+#include "vp9/encoder/vp9_rdopt.h"
+#include "vp9/encoder/vp9_bitstream.h"
+#include "vp9/encoder/vp9_picklpf.h"
+#include "vp9/common/vp9_mvref_common.h"
+#include "vp9/encoder/vp9_temporal_filter.h"
+
+#include <math.h>
+#include <stdio.h>
+#include <limits.h>
+
+extern void print_tree_update_probs();
+
+static void set_default_lf_deltas(VP9_COMP *cpi);
+
+#define DEFAULT_INTERP_FILTER SWITCHABLE
+
+#define SEARCH_BEST_FILTER 0 /* to search exhaustively for
+ best filter */
+#define RESET_FOREACH_FILTER 0 /* whether to reset the encoder state
+ before trying each new filter */
+#define SHARP_FILTER_QTHRESH 0 /* Q threshold for 8-tap sharp filter */
+
+#define ALTREF_HIGH_PRECISION_MV 1 /* whether to use high precision mv
+ for altref computation */
+#define HIGH_PRECISION_MV_QTHRESH 200 /* Q threshold for use of high precision
+ mv. Choose a very high value for
+ now so that HIGH_PRECISION is always
+ chosen */
+
+#if CONFIG_INTERNAL_STATS
+#include "math.h"
+
+extern double vp9_calc_ssim(YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *dest, int lumamask,
+ double *weight);
+
+
+extern double vp9_calc_ssimg(YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *dest, double *ssim_y,
+ double *ssim_u, double *ssim_v);
+
+
+#endif
+
+// #define OUTPUT_YUV_REC
+
+#ifdef OUTPUT_YUV_SRC
+FILE *yuv_file;
+#endif
+#ifdef OUTPUT_YUV_REC
+FILE *yuv_rec_file;
+#endif
+
+#if 0
+FILE *framepsnr;
+FILE *kf_list;
+FILE *keyfile;
+#endif
+
+
+#ifdef ENTROPY_STATS
+extern int intra_mode_stats[VP9_INTRA_MODES]
+ [VP9_INTRA_MODES]
+ [VP9_INTRA_MODES];
+#endif
+
+#ifdef NMV_STATS
+extern void init_nmvstats();
+extern void print_nmvstats();
+#endif
+#ifdef MODE_STATS
+extern void init_tx_count_stats();
+extern void write_tx_count_stats();
+extern void init_switchable_interp_stats();
+extern void write_switchable_interp_stats();
+#endif
+
+#ifdef SPEEDSTATS
+unsigned int frames_at_speed[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+#endif
+
+#if defined(SECTIONBITS_OUTPUT)
+extern unsigned __int64 Sectionbits[500];
+#endif
+
+extern void vp9_init_quantizer(VP9_COMP *cpi);
+
+// Tables relating active max Q to active min Q
+static int kf_low_motion_minq[QINDEX_RANGE];
+static int kf_high_motion_minq[QINDEX_RANGE];
+static int gf_low_motion_minq[QINDEX_RANGE];
+static int gf_high_motion_minq[QINDEX_RANGE];
+static int inter_minq[QINDEX_RANGE];
+
+// Functions to compute the active minq lookup table entries based on a
+// formulaic approach to facilitate easier adjustment of the Q tables.
+// The formulae were derived from computing a 3rd order polynomial best
+// fit to the original data (after plotting real maxq vs minq (not q index))
+static int calculate_minq_index(double maxq,
+ double x3, double x2, double x1, double c) {
+ int i;
+ const double minqtarget = MIN(((x3 * maxq + x2) * maxq + x1) * maxq + c,
+ maxq);
+
+ // Special case handling to deal with the step from q2.0
+ // down to lossless mode represented by q 1.0.
+ if (minqtarget <= 2.0)
+ return 0;
+
+ for (i = 0; i < QINDEX_RANGE; i++) {
+ if (minqtarget <= vp9_convert_qindex_to_q(i))
+ return i;
+ }
+
+ return QINDEX_RANGE - 1;
+}
+
+static void init_minq_luts(void) {
+ int i;
+
+ for (i = 0; i < QINDEX_RANGE; i++) {
+ const double maxq = vp9_convert_qindex_to_q(i);
+
+
+ kf_low_motion_minq[i] = calculate_minq_index(maxq,
+ 0.000001,
+ -0.0004,
+ 0.15,
+ 0.0);
+ kf_high_motion_minq[i] = calculate_minq_index(maxq,
+ 0.000002,
+ -0.0012,
+ 0.5,
+ 0.0);
+
+ gf_low_motion_minq[i] = calculate_minq_index(maxq,
+ 0.0000015,
+ -0.0009,
+ 0.33,
+ 0.0);
+ gf_high_motion_minq[i] = calculate_minq_index(maxq,
+ 0.0000021,
+ -0.00125,
+ 0.45,
+ 0.0);
+ inter_minq[i] = calculate_minq_index(maxq,
+ 0.00000271,
+ -0.00113,
+ 0.697,
+ 0.0);
+
+ }
+}
+
+static void set_mvcost(MACROBLOCK *mb) {
+ if (mb->e_mbd.allow_high_precision_mv) {
+ mb->mvcost = mb->nmvcost_hp;
+ mb->mvsadcost = mb->nmvsadcost_hp;
+ } else {
+ mb->mvcost = mb->nmvcost;
+ mb->mvsadcost = mb->nmvsadcost;
+ }
+}
+
+void vp9_initialize_enc() {
+ static int init_done = 0;
+
+ if (!init_done) {
+ vp9_initialize_common();
+ vp9_tokenize_initialize();
+ vp9_init_quant_tables();
+ vp9_init_me_luts();
+ init_minq_luts();
+ // init_base_skip_probs();
+ init_done = 1;
+ }
+}
+
+static void setup_features(VP9_COMP *cpi) {
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+
+ // Set up default state for MB feature flags
+ xd->segmentation_enabled = 0;
+
+ xd->update_mb_segmentation_map = 0;
+ xd->update_mb_segmentation_data = 0;
+ vpx_memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs));
+
+ vp9_clearall_segfeatures(xd);
+
+ xd->mode_ref_lf_delta_enabled = 0;
+ xd->mode_ref_lf_delta_update = 0;
+ vpx_memset(xd->ref_lf_deltas, 0, sizeof(xd->ref_lf_deltas));
+ vpx_memset(xd->mode_lf_deltas, 0, sizeof(xd->mode_lf_deltas));
+ vpx_memset(xd->last_ref_lf_deltas, 0, sizeof(xd->ref_lf_deltas));
+ vpx_memset(xd->last_mode_lf_deltas, 0, sizeof(xd->mode_lf_deltas));
+
+ set_default_lf_deltas(cpi);
+}
+
+static void dealloc_compressor_data(VP9_COMP *cpi) {
+ // Delete sementation map
+ vpx_free(cpi->segmentation_map);
+ cpi->segmentation_map = 0;
+ vpx_free(cpi->common.last_frame_seg_map);
+ cpi->common.last_frame_seg_map = 0;
+ vpx_free(cpi->coding_context.last_frame_seg_map_copy);
+ cpi->coding_context.last_frame_seg_map_copy = 0;
+
+ vpx_free(cpi->active_map);
+ cpi->active_map = 0;
+
+ vp9_free_frame_buffers(&cpi->common);
+
+ vp9_free_frame_buffer(&cpi->last_frame_uf);
+ vp9_free_frame_buffer(&cpi->scaled_source);
+ vp9_free_frame_buffer(&cpi->alt_ref_buffer);
+ vp9_lookahead_destroy(cpi->lookahead);
+
+ vpx_free(cpi->tok);
+ cpi->tok = 0;
+
+ // Activity mask based per mb zbin adjustments
+ vpx_free(cpi->mb_activity_map);
+ cpi->mb_activity_map = 0;
+ vpx_free(cpi->mb_norm_activity_map);
+ cpi->mb_norm_activity_map = 0;
+
+ vpx_free(cpi->mb.pip);
+ cpi->mb.pip = 0;
+}
+
+// Computes a q delta (in "q index" terms) to get from a starting q value
+// to a target value
+// target q value
+static int compute_qdelta(VP9_COMP *cpi, double qstart, double qtarget) {
+ int i;
+ int start_index = cpi->worst_quality;
+ int target_index = cpi->worst_quality;
+
+ // Convert the average q value to an index.
+ for (i = cpi->best_quality; i < cpi->worst_quality; i++) {
+ start_index = i;
+ if (vp9_convert_qindex_to_q(i) >= qstart)
+ break;
+ }
+
+ // Convert the q target to an index
+ for (i = cpi->best_quality; i < cpi->worst_quality; i++) {
+ target_index = i;
+ if (vp9_convert_qindex_to_q(i) >= qtarget)
+ break;
+ }
+
+ return target_index - start_index;
+}
+
+static void configure_static_seg_features(VP9_COMP *cpi) {
+ VP9_COMMON *cm = &cpi->common;
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+
+ int high_q = (int)(cpi->avg_q > 48.0);
+ int qi_delta;
+
+ // Disable and clear down for KF
+ if (cm->frame_type == KEY_FRAME) {
+ // Clear down the global segmentation map
+ vpx_memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
+ xd->update_mb_segmentation_map = 0;
+ xd->update_mb_segmentation_data = 0;
+ cpi->static_mb_pct = 0;
+
+ // Disable segmentation
+ vp9_disable_segmentation((VP9_PTR)cpi);
+
+ // Clear down the segment features.
+ vp9_clearall_segfeatures(xd);
+ } else if (cpi->refresh_alt_ref_frame) {
+ // If this is an alt ref frame
+ // Clear down the global segmentation map
+ vpx_memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
+ xd->update_mb_segmentation_map = 0;
+ xd->update_mb_segmentation_data = 0;
+ cpi->static_mb_pct = 0;
+
+ // Disable segmentation and individual segment features by default
+ vp9_disable_segmentation((VP9_PTR)cpi);
+ vp9_clearall_segfeatures(xd);
+
+ // Scan frames from current to arf frame.
+ // This function re-enables segmentation if appropriate.
+ vp9_update_mbgraph_stats(cpi);
+
+ // If segmentation was enabled set those features needed for the
+ // arf itself.
+ if (xd->segmentation_enabled) {
+ xd->update_mb_segmentation_map = 1;
+ xd->update_mb_segmentation_data = 1;
+
+ qi_delta = compute_qdelta(cpi, cpi->avg_q, (cpi->avg_q * 0.875));
+ vp9_set_segdata(xd, 1, SEG_LVL_ALT_Q, (qi_delta - 2));
+ vp9_set_segdata(xd, 1, SEG_LVL_ALT_LF, -2);
+
+ vp9_enable_segfeature(xd, 1, SEG_LVL_ALT_Q);
+ vp9_enable_segfeature(xd, 1, SEG_LVL_ALT_LF);
+
+ // Where relevant assume segment data is delta data
+ xd->mb_segment_abs_delta = SEGMENT_DELTADATA;
+
+ }
+ } else if (xd->segmentation_enabled) {
+ // All other frames if segmentation has been enabled
+
+ // First normal frame in a valid gf or alt ref group
+ if (cpi->common.frames_since_golden == 0) {
+ // Set up segment features for normal frames in an arf group
+ if (cpi->source_alt_ref_active) {
+ xd->update_mb_segmentation_map = 0;
+ xd->update_mb_segmentation_data = 1;
+ xd->mb_segment_abs_delta = SEGMENT_DELTADATA;
+
+ qi_delta = compute_qdelta(cpi, cpi->avg_q,
+ (cpi->avg_q * 1.125));
+ vp9_set_segdata(xd, 1, SEG_LVL_ALT_Q, (qi_delta + 2));
+ vp9_enable_segfeature(xd, 1, SEG_LVL_ALT_Q);
+
+ vp9_set_segdata(xd, 1, SEG_LVL_ALT_LF, -2);
+ vp9_enable_segfeature(xd, 1, SEG_LVL_ALT_LF);
+
+ // Segment coding disabled for compred testing
+ if (high_q || (cpi->static_mb_pct == 100)) {
+ vp9_set_segdata(xd, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
+ vp9_enable_segfeature(xd, 1, SEG_LVL_REF_FRAME);
+ vp9_enable_segfeature(xd, 1, SEG_LVL_SKIP);
+ }
+ } else {
+ // Disable segmentation and clear down features if alt ref
+ // is not active for this group
+
+ vp9_disable_segmentation((VP9_PTR)cpi);
+
+ vpx_memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
+
+ xd->update_mb_segmentation_map = 0;
+ xd->update_mb_segmentation_data = 0;
+
+ vp9_clearall_segfeatures(xd);
+ }
+ } else if (cpi->is_src_frame_alt_ref) {
+ // Special case where we are coding over the top of a previous
+ // alt ref frame.
+ // Segment coding disabled for compred testing
+
+ // Enable ref frame features for segment 0 as well
+ vp9_enable_segfeature(xd, 0, SEG_LVL_REF_FRAME);
+ vp9_enable_segfeature(xd, 1, SEG_LVL_REF_FRAME);
+
+ // All mbs should use ALTREF_FRAME
+ vp9_clear_segdata(xd, 0, SEG_LVL_REF_FRAME);
+ vp9_set_segdata(xd, 0, SEG_LVL_REF_FRAME, ALTREF_FRAME);
+ vp9_clear_segdata(xd, 1, SEG_LVL_REF_FRAME);
+ vp9_set_segdata(xd, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
+
+ // Skip all MBs if high Q (0,0 mv and skip coeffs)
+ if (high_q) {
+ vp9_enable_segfeature(xd, 0, SEG_LVL_SKIP);
+ vp9_enable_segfeature(xd, 1, SEG_LVL_SKIP);
+ }
+ // Enable data udpate
+ xd->update_mb_segmentation_data = 1;
+ } else {
+ // All other frames.
+
+ // No updates.. leave things as they are.
+ xd->update_mb_segmentation_map = 0;
+ xd->update_mb_segmentation_data = 0;
+ }
+ }
+}
+
+#ifdef ENTROPY_STATS
+void vp9_update_mode_context_stats(VP9_COMP *cpi) {
+ VP9_COMMON *cm = &cpi->common;
+ int i, j;
+ unsigned int (*inter_mode_counts)[VP9_INTER_MODES - 1][2] =
+ cm->fc.inter_mode_counts;
+ int64_t (*mv_ref_stats)[VP9_INTER_MODES - 1][2] = cpi->mv_ref_stats;
+ FILE *f;
+
+ // Read the past stats counters
+ f = fopen("mode_context.bin", "rb");
+ if (!f) {
+ vpx_memset(cpi->mv_ref_stats, 0, sizeof(cpi->mv_ref_stats));
+ } else {
+ fread(cpi->mv_ref_stats, sizeof(cpi->mv_ref_stats), 1, f);
+ fclose(f);
+ }
+
+ // Add in the values for this frame
+ for (i = 0; i < INTER_MODE_CONTEXTS; i++) {
+ for (j = 0; j < VP9_INTER_MODES - 1; j++) {
+ mv_ref_stats[i][j][0] += (int64_t)inter_mode_counts[i][j][0];
+ mv_ref_stats[i][j][1] += (int64_t)inter_mode_counts[i][j][1];
+ }
+ }
+
+ // Write back the accumulated stats
+ f = fopen("mode_context.bin", "wb");
+ fwrite(cpi->mv_ref_stats, sizeof(cpi->mv_ref_stats), 1, f);
+ fclose(f);
+}
+
+void print_mode_context(VP9_COMP *cpi) {
+ FILE *f = fopen("vp9_modecont.c", "a");
+ int i, j;
+
+ fprintf(f, "#include \"vp9_entropy.h\"\n");
+ fprintf(
+ f,
+ "const int inter_mode_probs[INTER_MODE_CONTEXTS][VP9_INTER_MODES - 1] =");
+ fprintf(f, "{\n");
+ for (j = 0; j < INTER_MODE_CONTEXTS; j++) {
+ fprintf(f, " {/* %d */ ", j);
+ fprintf(f, " ");
+ for (i = 0; i < VP9_INTER_MODES - 1; i++) {
+ int this_prob;
+ int64_t count = cpi->mv_ref_stats[j][i][0] + cpi->mv_ref_stats[j][i][1];
+ if (count)
+ this_prob = ((cpi->mv_ref_stats[j][i][0] * 256) + (count >> 1)) / count;
+ else
+ this_prob = 128;
+
+ // context probs
+ fprintf(f, "%5d, ", this_prob);
+ }
+ fprintf(f, " },\n");
+ }
+
+ fprintf(f, "};\n");
+ fclose(f);
+}
+#endif // ENTROPY_STATS
+
+// DEBUG: Print out the segment id of each MB in the current frame.
+static void print_seg_map(VP9_COMP *cpi) {
+ VP9_COMMON *cm = &cpi->common;
+ int row, col;
+ int map_index = 0;
+ FILE *statsfile = fopen("segmap.stt", "a");
+
+ fprintf(statsfile, "%10d\n", cm->current_video_frame);
+
+ for (row = 0; row < cpi->common.mi_rows; row++) {
+ for (col = 0; col < cpi->common.mi_cols; col++) {
+ fprintf(statsfile, "%10d", cpi->segmentation_map[map_index]);
+ map_index++;
+ }
+ fprintf(statsfile, "\n");
+ }
+ fprintf(statsfile, "\n");
+
+ fclose(statsfile);
+}
+
+static void update_reference_segmentation_map(VP9_COMP *cpi) {
+ VP9_COMMON *const cm = &cpi->common;
+ int row, col;
+ MODE_INFO *mi, *mi_ptr = cm->mi;
+ uint8_t *cache_ptr = cm->last_frame_seg_map, *cache;
+
+ for (row = 0; row < cm->mi_rows; row++) {
+ mi = mi_ptr;
+ cache = cache_ptr;
+ for (col = 0; col < cm->mi_cols; col++, mi++, cache++)
+ cache[0] = mi->mbmi.segment_id;
+ mi_ptr += cm->mode_info_stride;
+ cache_ptr += cm->mi_cols;
+ }
+}
+
+static void set_default_lf_deltas(VP9_COMP *cpi) {
+ cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 1;
+ cpi->mb.e_mbd.mode_ref_lf_delta_update = 1;
+
+ vpx_memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
+ vpx_memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
+
+ // Test of ref frame deltas
+ cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2;
+ cpi->mb.e_mbd.ref_lf_deltas[LAST_FRAME] = 0;
+ cpi->mb.e_mbd.ref_lf_deltas[GOLDEN_FRAME] = -2;
+ cpi->mb.e_mbd.ref_lf_deltas[ALTREF_FRAME] = -2;
+
+ cpi->mb.e_mbd.mode_lf_deltas[0] = 0; // Zero
+ cpi->mb.e_mbd.mode_lf_deltas[1] = 0; // New mv
+}
+
+static void set_rd_speed_thresholds(VP9_COMP *cpi, int mode, int speed) {
+ SPEED_FEATURES *sf = &cpi->sf;
+ int speed_multiplier = speed + 1;
+ int i;
+
+ // Set baseline threshold values
+ for (i = 0; i < MAX_MODES; ++i)
+ sf->thresh_mult[i] = mode == 0 ? -500 : 0;
+
+ sf->thresh_mult[THR_ZEROMV ] = 0;
+ sf->thresh_mult[THR_ZEROG ] = 0;
+ sf->thresh_mult[THR_ZEROA ] = 0;
+
+ sf->thresh_mult[THR_NEARESTMV] = 0;
+ sf->thresh_mult[THR_NEARESTG ] = 0;
+ sf->thresh_mult[THR_NEARESTA ] = 0;
+
+ sf->thresh_mult[THR_NEARMV ] += speed_multiplier * 1000;
+ sf->thresh_mult[THR_NEARG ] += speed_multiplier * 1000;
+ sf->thresh_mult[THR_NEARA ] += speed_multiplier * 1000;
+
+ sf->thresh_mult[THR_DC ] = 0;
+ sf->thresh_mult[THR_TM ] += speed_multiplier * 1000;
+ sf->thresh_mult[THR_V_PRED ] += speed_multiplier * 1000;
+ sf->thresh_mult[THR_H_PRED ] += speed_multiplier * 1000;
+ sf->thresh_mult[THR_D45_PRED ] += speed_multiplier * 1500;
+ sf->thresh_mult[THR_D135_PRED] += speed_multiplier * 1500;
+ sf->thresh_mult[THR_D117_PRED] += speed_multiplier * 1500;
+ sf->thresh_mult[THR_D153_PRED] += speed_multiplier * 1500;
+ sf->thresh_mult[THR_D27_PRED ] += speed_multiplier * 1500;
+ sf->thresh_mult[THR_D63_PRED ] += speed_multiplier * 1500;
+
+ sf->thresh_mult[THR_B_PRED ] += speed_multiplier * 2500;
+
+ sf->thresh_mult[THR_NEWMV ] += speed_multiplier * 1000;
+ sf->thresh_mult[THR_NEWG ] += speed_multiplier * 1000;
+ sf->thresh_mult[THR_NEWA ] += speed_multiplier * 1000;
+
+ sf->thresh_mult[THR_SPLITMV ] += speed_multiplier * 2500;
+ sf->thresh_mult[THR_SPLITG ] += speed_multiplier * 2500;
+ sf->thresh_mult[THR_SPLITA ] += speed_multiplier * 2500;
+
+ sf->thresh_mult[THR_COMP_ZEROLA ] += speed_multiplier * 1500;
+ sf->thresh_mult[THR_COMP_ZEROGA ] += speed_multiplier * 1500;
+
+ sf->thresh_mult[THR_COMP_NEARESTLA] += speed_multiplier * 1500;
+ sf->thresh_mult[THR_COMP_NEARESTGA] += speed_multiplier * 1500;
+
+ sf->thresh_mult[THR_COMP_NEARLA ] += speed_multiplier * 1500;
+ sf->thresh_mult[THR_COMP_NEARGA ] += speed_multiplier * 1500;
+
+ sf->thresh_mult[THR_COMP_NEWLA ] += speed_multiplier * 2000;
+ sf->thresh_mult[THR_COMP_NEWGA ] += speed_multiplier * 2000;
+
+ sf->thresh_mult[THR_COMP_SPLITLA ] += speed_multiplier * 4500;
+ sf->thresh_mult[THR_COMP_SPLITGA ] += speed_multiplier * 4500;
+
+ if (speed > 4) {
+ for (i = 0; i < MAX_MODES; ++i)
+ sf->thresh_mult[i] = INT_MAX;
+
+ sf->thresh_mult[THR_DC ] = 0;
+ sf->thresh_mult[THR_TM ] = 0;
+ sf->thresh_mult[THR_NEWMV ] = 4000;
+ sf->thresh_mult[THR_NEWG ] = 4000;
+ sf->thresh_mult[THR_NEWA ] = 4000;
+ sf->thresh_mult[THR_NEARESTMV] = 0;
+ sf->thresh_mult[THR_NEARESTG ] = 0;
+ sf->thresh_mult[THR_NEARESTA ] = 0;
+ sf->thresh_mult[THR_NEARMV ] = 2000;
+ sf->thresh_mult[THR_NEARG ] = 2000;
+ sf->thresh_mult[THR_NEARA ] = 2000;
+ sf->thresh_mult[THR_COMP_NEARESTLA] = 2000;
+ sf->recode_loop = 0;
+ }
+
+ /* disable frame modes if flags not set */
+ if (!(cpi->ref_frame_flags & VP9_LAST_FLAG)) {
+ sf->thresh_mult[THR_NEWMV ] = INT_MAX;
+ sf->thresh_mult[THR_NEARESTMV] = INT_MAX;
+ sf->thresh_mult[THR_ZEROMV ] = INT_MAX;
+ sf->thresh_mult[THR_NEARMV ] = INT_MAX;
+ sf->thresh_mult[THR_SPLITMV ] = INT_MAX;
+ }
+ if (!(cpi->ref_frame_flags & VP9_GOLD_FLAG)) {
+ sf->thresh_mult[THR_NEARESTG ] = INT_MAX;
+ sf->thresh_mult[THR_ZEROG ] = INT_MAX;
+ sf->thresh_mult[THR_NEARG ] = INT_MAX;
+ sf->thresh_mult[THR_NEWG ] = INT_MAX;
+ sf->thresh_mult[THR_SPLITG ] = INT_MAX;
+ }
+ if (!(cpi->ref_frame_flags & VP9_ALT_FLAG)) {
+ sf->thresh_mult[THR_NEARESTA ] = INT_MAX;
+ sf->thresh_mult[THR_ZEROA ] = INT_MAX;
+ sf->thresh_mult[THR_NEARA ] = INT_MAX;
+ sf->thresh_mult[THR_NEWA ] = INT_MAX;
+ sf->thresh_mult[THR_SPLITA ] = INT_MAX;
+ }
+
+ if ((cpi->ref_frame_flags & (VP9_LAST_FLAG | VP9_ALT_FLAG)) !=
+ (VP9_LAST_FLAG | VP9_ALT_FLAG)) {
+ sf->thresh_mult[THR_COMP_ZEROLA ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_NEARESTLA] = INT_MAX;
+ sf->thresh_mult[THR_COMP_NEARLA ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_NEWLA ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_SPLITLA ] = INT_MAX;
+ }
+ if ((cpi->ref_frame_flags & (VP9_GOLD_FLAG | VP9_ALT_FLAG)) !=
+ (VP9_GOLD_FLAG | VP9_ALT_FLAG)) {
+ sf->thresh_mult[THR_COMP_ZEROGA ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_NEARESTGA] = INT_MAX;
+ sf->thresh_mult[THR_COMP_NEARGA ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_NEWGA ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_SPLITGA ] = INT_MAX;
+ }
+}
+
+void vp9_set_speed_features(VP9_COMP *cpi) {
+ SPEED_FEATURES *sf = &cpi->sf;
+ int mode = cpi->compressor_speed;
+ int speed = cpi->speed;
+ int i;
+
+ // Only modes 0 and 1 supported for now in experimental code basae
+ if (mode > 1)
+ mode = 1;
+
+ // Initialise default mode frequency sampling variables
+ for (i = 0; i < MAX_MODES; i ++) {
+ cpi->mode_check_freq[i] = 0;
+ cpi->mode_test_hit_counts[i] = 0;
+ cpi->mode_chosen_counts[i] = 0;
+ }
+
+ // best quality defaults
+ sf->RD = 1;
+ sf->search_method = NSTEP;
+ sf->auto_filter = 1;
+ sf->recode_loop = 1;
+ sf->quarter_pixel_search = 1;
+ sf->half_pixel_search = 1;
+ sf->iterative_sub_pixel = 1;
+ sf->optimize_coefficients = !cpi->oxcf.lossless;
+ sf->first_step = 0;
+ sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
+ sf->comp_inter_joint_search_thresh = BLOCK_SIZE_AB4X4;
+ sf->adpative_rd_thresh = 0;
+
+#if CONFIG_MULTIPLE_ARF
+ // Switch segmentation off.
+ sf->static_segmentation = 0;
+#else
+ sf->static_segmentation = 0;
+#endif
+
+ switch (mode) {
+ case 0: // best quality mode
+ sf->search_best_filter = SEARCH_BEST_FILTER;
+ break;
+
+ case 1:
+#if CONFIG_MULTIPLE_ARF
+ // Switch segmentation off.
+ sf->static_segmentation = 0;
+#else
+ sf->static_segmentation = 0;
+#endif
+ sf->comp_inter_joint_search_thresh = BLOCK_SIZE_SB8X8;
+ sf->adpative_rd_thresh = 1;
+ if (speed > 0) {
+ sf->comp_inter_joint_search_thresh = BLOCK_SIZE_TYPES;
+ sf->optimize_coefficients = 0;
+ sf->first_step = 1;
+ }
+ break;
+
+ }; /* switch */
+
+ // Set rd thresholds based on mode and speed setting
+ set_rd_speed_thresholds(cpi, mode, speed);
+
+ // Slow quant, dct and trellis not worthwhile for first pass
+ // so make sure they are always turned off.
+ if (cpi->pass == 1) {
+ sf->optimize_coefficients = 0;
+ }
+
+ cpi->mb.fwd_txm16x16 = vp9_short_fdct16x16;
+ cpi->mb.fwd_txm8x8 = vp9_short_fdct8x8;
+ cpi->mb.fwd_txm8x4 = vp9_short_fdct8x4;
+ cpi->mb.fwd_txm4x4 = vp9_short_fdct4x4;
+ if (cpi->oxcf.lossless || cpi->mb.e_mbd.lossless) {
+ cpi->mb.fwd_txm8x4 = vp9_short_walsh8x4;
+ cpi->mb.fwd_txm4x4 = vp9_short_walsh4x4;
+ }
+
+ cpi->mb.quantize_b_4x4 = vp9_regular_quantize_b_4x4;
+
+ vp9_init_quantizer(cpi);
+
+ if (cpi->sf.iterative_sub_pixel == 1) {
+ cpi->find_fractional_mv_step = vp9_find_best_sub_pixel_step_iteratively;
+ } else if (cpi->sf.quarter_pixel_search) {
+ cpi->find_fractional_mv_step = vp9_find_best_sub_pixel_step;
+ } else if (cpi->sf.half_pixel_search) {
+ cpi->find_fractional_mv_step = vp9_find_best_half_pixel_step;
+ }
+
+ cpi->mb.optimize = cpi->sf.optimize_coefficients == 1 && cpi->pass != 1;
+
+#ifdef SPEEDSTATS
+ frames_at_speed[cpi->speed]++;
+#endif
+}
+
+static void alloc_raw_frame_buffers(VP9_COMP *cpi) {
+ VP9_COMMON *cm = &cpi->common;
+
+ cpi->lookahead = vp9_lookahead_init(cpi->oxcf.width, cpi->oxcf.height,
+ cm->subsampling_x, cm->subsampling_y,
+ cpi->oxcf.lag_in_frames);
+ if (!cpi->lookahead)
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate lag buffers");
+
+ if (vp9_realloc_frame_buffer(&cpi->alt_ref_buffer,
+ cpi->oxcf.width, cpi->oxcf.height,
+ cm->subsampling_x, cm->subsampling_y,
+ VP9BORDERINPIXELS))
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate altref buffer");
+}
+
+static int alloc_partition_data(VP9_COMP *cpi) {
+ vpx_free(cpi->mb.pip);
+
+ cpi->mb.pip = vpx_calloc((cpi->common.mode_info_stride) *
+ (cpi->common.mi_rows + 64 / MI_SIZE),
+ sizeof(PARTITION_INFO));
+ if (!cpi->mb.pip)
+ return 1;
+
+ cpi->mb.pi = cpi->mb.pip + cpi->common.mode_info_stride + 1;
+
+ return 0;
+}
+
+void vp9_alloc_compressor_data(VP9_COMP *cpi) {
+ VP9_COMMON *cm = &cpi->common;
+
+ if (vp9_alloc_frame_buffers(cm, cm->width, cm->height))
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate frame buffers");
+
+ if (alloc_partition_data(cpi))
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate partition data");
+
+ if (vp9_alloc_frame_buffer(&cpi->last_frame_uf,
+ cm->width, cm->height,
+ cm->subsampling_x, cm->subsampling_y,
+ VP9BORDERINPIXELS))
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate last frame buffer");
+
+ if (vp9_alloc_frame_buffer(&cpi->scaled_source,
+ cm->width, cm->height,
+ cm->subsampling_x, cm->subsampling_y,
+ VP9BORDERINPIXELS))
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate scaled source buffer");
+
+ vpx_free(cpi->tok);
+
+ {
+ unsigned int tokens = get_token_alloc(cm->mb_rows, cm->mb_cols);
+
+ CHECK_MEM_ERROR(cpi->tok, vpx_calloc(tokens, sizeof(*cpi->tok)));
+ }
+
+ // Data used for real time vc mode to see if gf needs refreshing
+ cpi->inter_zz_count = 0;
+ cpi->gf_bad_count = 0;
+ cpi->gf_update_recommended = 0;
+
+ vpx_free(cpi->mb_activity_map);
+ CHECK_MEM_ERROR(cpi->mb_activity_map,
+ vpx_calloc(sizeof(unsigned int),
+ cm->mb_rows * cm->mb_cols));
+
+ vpx_free(cpi->mb_norm_activity_map);
+ CHECK_MEM_ERROR(cpi->mb_norm_activity_map,
+ vpx_calloc(sizeof(unsigned int),
+ cm->mb_rows * cm->mb_cols));
+}
+
+
+static void update_frame_size(VP9_COMP *cpi) {
+ VP9_COMMON *cm = &cpi->common;
+
+ vp9_update_frame_size(cm);
+
+ // Update size of buffers local to this frame
+ if (vp9_realloc_frame_buffer(&cpi->last_frame_uf,
+ cm->width, cm->height,
+ cm->subsampling_x, cm->subsampling_y,
+ VP9BORDERINPIXELS))
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to reallocate last frame buffer");
+
+ if (vp9_realloc_frame_buffer(&cpi->scaled_source,
+ cm->width, cm->height,
+ cm->subsampling_x, cm->subsampling_y,
+ VP9BORDERINPIXELS))
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to reallocate scaled source buffer");
+
+ {
+ int y_stride = cpi->scaled_source.y_stride;
+
+ if (cpi->sf.search_method == NSTEP) {
+ vp9_init3smotion_compensation(&cpi->mb, y_stride);
+ } else if (cpi->sf.search_method == DIAMOND) {
+ vp9_init_dsmotion_compensation(&cpi->mb, y_stride);
+ }
+ }
+}
+
+
+// TODO perhaps change number of steps expose to outside world when setting
+// max and min limits. Also this will likely want refining for the extended Q
+// range.
+//
+// Table that converts 0-63 Q range values passed in outside to the Qindex
+// range used internally.
+static const int q_trans[] = {
+ 0, 4, 8, 12, 16, 20, 24, 28,
+ 32, 36, 40, 44, 48, 52, 56, 60,
+ 64, 68, 72, 76, 80, 84, 88, 92,
+ 96, 100, 104, 108, 112, 116, 120, 124,
+ 128, 132, 136, 140, 144, 148, 152, 156,
+ 160, 164, 168, 172, 176, 180, 184, 188,
+ 192, 196, 200, 204, 208, 212, 216, 220,
+ 224, 228, 232, 236, 240, 244, 249, 255,
+};
+
+int vp9_reverse_trans(int x) {
+ int i;
+
+ for (i = 0; i < 64; i++)
+ if (q_trans[i] >= x)
+ return i;
+
+ return 63;
+};
+void vp9_new_frame_rate(VP9_COMP *cpi, double framerate) {
+ if (framerate < 0.1)
+ framerate = 30;
+
+ cpi->oxcf.frame_rate = framerate;
+ cpi->output_frame_rate = cpi->oxcf.frame_rate;
+ cpi->per_frame_bandwidth = (int)(cpi->oxcf.target_bandwidth / cpi->output_frame_rate);
+ cpi->av_per_frame_bandwidth = (int)(cpi->oxcf.target_bandwidth / cpi->output_frame_rate);
+ cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth * cpi->oxcf.two_pass_vbrmin_section / 100);
+
+
+ cpi->min_frame_bandwidth = MAX(cpi->min_frame_bandwidth, FRAME_OVERHEAD_BITS);
+
+ // Set Maximum gf/arf interval
+ cpi->max_gf_interval = 16;
+
+ // Extended interval for genuinely static scenes
+ cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1;
+
+ // Special conditions when alt ref frame enabled in lagged compress mode
+ if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames) {
+ if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1)
+ cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1;
+
+ if (cpi->twopass.static_scene_max_gf_interval > cpi->oxcf.lag_in_frames - 1)
+ cpi->twopass.static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1;
+ }
+
+ if (cpi->max_gf_interval > cpi->twopass.static_scene_max_gf_interval)
+ cpi->max_gf_interval = cpi->twopass.static_scene_max_gf_interval;
+}
+
+static int64_t rescale(int val, int64_t num, int denom) {
+ int64_t llnum = num;
+ int64_t llden = denom;
+ int64_t llval = val;
+
+ return (llval * llnum / llden);
+}
+
+static void set_tile_limits(VP9_COMP *cpi) {
+ VP9_COMMON *const cm = &cpi->common;
+ int min_log2_tiles, max_log2_tiles;
+
+ cm->log2_tile_columns = cpi->oxcf.tile_columns;
+ cm->log2_tile_rows = cpi->oxcf.tile_rows;
+
+ vp9_get_tile_n_bits(cm, &min_log2_tiles, &max_log2_tiles);
+ max_log2_tiles += min_log2_tiles;
+
+ cm->log2_tile_columns = clamp(cm->log2_tile_columns,
+ min_log2_tiles, max_log2_tiles);
+
+ cm->tile_columns = 1 << cm->log2_tile_columns;
+ cm->tile_rows = 1 << cm->log2_tile_rows;
+}
+
+static void init_config(VP9_PTR ptr, VP9_CONFIG *oxcf) {
+ VP9_COMP *cpi = (VP9_COMP *)(ptr);
+ VP9_COMMON *const cm = &cpi->common;
+ int i;
+
+ cpi->oxcf = *oxcf;
+ cpi->goldfreq = 7;
+
+ cm->version = oxcf->version;
+
+ cm->width = oxcf->width;
+ cm->height = oxcf->height;
+ cm->subsampling_x = 0;
+ cm->subsampling_y = 0;
+ vp9_alloc_compressor_data(cpi);
+
+ // change includes all joint functionality
+ vp9_change_config(ptr, oxcf);
+
+ // Initialize active best and worst q and average q values.
+ cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
+ cpi->active_best_quality = cpi->oxcf.best_allowed_q;
+ cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
+
+ // Initialise the starting buffer levels
+ cpi->buffer_level = cpi->oxcf.starting_buffer_level;
+ cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
+
+ cpi->rolling_target_bits = cpi->av_per_frame_bandwidth;
+ cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth;
+ cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth;
+ cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth;
+
+ cpi->total_actual_bits = 0;
+ cpi->total_target_vs_actual = 0;
+
+ cpi->static_mb_pct = 0;
+
+ cpi->lst_fb_idx = 0;
+ cpi->gld_fb_idx = 1;
+ cpi->alt_fb_idx = 2;
+
+ set_tile_limits(cpi);
+
+ cpi->fixed_divide[0] = 0;
+ for (i = 1; i < 512; i++)
+ cpi->fixed_divide[i] = 0x80000 / i;
+}
+
+
+void vp9_change_config(VP9_PTR ptr, VP9_CONFIG *oxcf) {
+ VP9_COMP *cpi = (VP9_COMP *)(ptr);
+ VP9_COMMON *const cm = &cpi->common;
+
+ if (!cpi || !oxcf)
+ return;
+
+ if (cm->version != oxcf->version) {
+ cm->version = oxcf->version;
+ }
+
+ cpi->oxcf = *oxcf;
+
+ switch (cpi->oxcf.Mode) {
+ // Real time and one pass deprecated in test code base
+ case MODE_FIRSTPASS:
+ cpi->pass = 1;
+ cpi->compressor_speed = 1;
+ break;
+
+ case MODE_SECONDPASS:
+ cpi->pass = 2;
+ cpi->compressor_speed = 1;
+ cpi->oxcf.cpu_used = clamp(cpi->oxcf.cpu_used, -5, 5);
+ break;
+
+ case MODE_SECONDPASS_BEST:
+ cpi->pass = 2;
+ cpi->compressor_speed = 0;
+ break;
+ }
+
+ cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q];
+ cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q];
+ cpi->oxcf.cq_level = q_trans[cpi->oxcf.cq_level];
+
+ cpi->oxcf.lossless = oxcf->lossless;
+ if (cpi->oxcf.lossless) {
+ cpi->mb.e_mbd.inv_txm4x4_1_add = vp9_short_iwalsh4x4_1_add;
+ cpi->mb.e_mbd.inv_txm4x4_add = vp9_short_iwalsh4x4_add;
+ } else {
+ cpi->mb.e_mbd.inv_txm4x4_1_add = vp9_short_idct4x4_1_add;
+ cpi->mb.e_mbd.inv_txm4x4_add = vp9_short_idct4x4_add;
+ }
+
+ cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
+
+ cpi->ref_frame_flags = VP9_ALT_FLAG | VP9_GOLD_FLAG | VP9_LAST_FLAG;
+
+ // cpi->use_golden_frame_only = 0;
+ // cpi->use_last_frame_only = 0;
+ cpi->refresh_golden_frame = 0;
+ cpi->refresh_last_frame = 1;
+ cm->refresh_frame_context = 1;
+ cm->reset_frame_context = 0;
+
+ setup_features(cpi);
+ cpi->mb.e_mbd.allow_high_precision_mv = 0; // Default mv precision adaptation
+ set_mvcost(&cpi->mb);
+
+ {
+ int i;
+
+ for (i = 0; i < MAX_MB_SEGMENTS; i++)
+ cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
+ }
+
+ // At the moment the first order values may not be > MAXQ
+ cpi->oxcf.fixed_q = MIN(cpi->oxcf.fixed_q, MAXQ);
+
+ // local file playback mode == really big buffer
+ if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK) {
+ cpi->oxcf.starting_buffer_level = 60000;
+ cpi->oxcf.optimal_buffer_level = 60000;
+ cpi->oxcf.maximum_buffer_size = 240000;
+ }
+
+ // Convert target bandwidth from Kbit/s to Bit/s
+ cpi->oxcf.target_bandwidth *= 1000;
+
+ cpi->oxcf.starting_buffer_level = rescale(cpi->oxcf.starting_buffer_level,
+ cpi->oxcf.target_bandwidth, 1000);
+
+ // Set or reset optimal and maximum buffer levels.
+ if (cpi->oxcf.optimal_buffer_level == 0)
+ cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8;
+ else
+ cpi->oxcf.optimal_buffer_level = rescale(cpi->oxcf.optimal_buffer_level,
+ cpi->oxcf.target_bandwidth, 1000);
+
+ if (cpi->oxcf.maximum_buffer_size == 0)
+ cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8;
+ else
+ cpi->oxcf.maximum_buffer_size = rescale(cpi->oxcf.maximum_buffer_size,
+ cpi->oxcf.target_bandwidth, 1000);
+
+ // Set up frame rate and related parameters rate control values.
+ vp9_new_frame_rate(cpi, cpi->oxcf.frame_rate);
+
+ // Set absolute upper and lower quality limits
+ cpi->worst_quality = cpi->oxcf.worst_allowed_q;
+ cpi->best_quality = cpi->oxcf.best_allowed_q;
+
+ // active values should only be modified if out of new range
+ cpi->active_worst_quality = clamp(cpi->active_worst_quality,
+ cpi->oxcf.best_allowed_q,
+ cpi->oxcf.worst_allowed_q);
+
+ cpi->active_best_quality = clamp(cpi->active_best_quality,
+ cpi->oxcf.best_allowed_q,
+ cpi->oxcf.worst_allowed_q);
+
+ cpi->buffered_mode = cpi->oxcf.optimal_buffer_level > 0;
+
+ cpi->cq_target_quality = cpi->oxcf.cq_level;
+
+ cm->mcomp_filter_type = DEFAULT_INTERP_FILTER;
+
+ cpi->target_bandwidth = cpi->oxcf.target_bandwidth;
+
+ cm->display_width = cpi->oxcf.width;
+ cm->display_height = cpi->oxcf.height;
+
+ // VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs)
+ cpi->oxcf.Sharpness = MIN(7, cpi->oxcf.Sharpness);
+
+ cm->sharpness_level = cpi->oxcf.Sharpness;
+
+ if (cpi->initial_width) {
+ // Increasing the size of the frame beyond the first seen frame, or some
+ // otherwise signalled maximum size, is not supported.
+ // TODO(jkoleszar): exit gracefully.
+ assert(cm->width <= cpi->initial_width);
+ assert(cm->height <= cpi->initial_height);
+ }
+ update_frame_size(cpi);
+
+ if (cpi->oxcf.fixed_q >= 0) {
+ cpi->last_q[0] = cpi->oxcf.fixed_q;
+ cpi->last_q[1] = cpi->oxcf.fixed_q;
+ cpi->last_boosted_qindex = cpi->oxcf.fixed_q;
+ }
+
+ cpi->speed = cpi->oxcf.cpu_used;
+
+ if (cpi->oxcf.lag_in_frames == 0) {
+ // force to allowlag to 0 if lag_in_frames is 0;
+ cpi->oxcf.allow_lag = 0;
+ } else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS) {
+ // Limit on lag buffers as these are not currently dynamically allocated
+ cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS;
+ }
+
+ // YX Temp
+#if CONFIG_MULTIPLE_ARF
+ vp9_zero(cpi->alt_ref_source);
+#else
+ cpi->alt_ref_source = NULL;
+#endif
+ cpi->is_src_frame_alt_ref = 0;
+
+#if 0
+ // Experimental RD Code
+ cpi->frame_distortion = 0;
+ cpi->last_frame_distortion = 0;
+#endif
+
+ set_tile_limits(cpi);
+}
+
+#define M_LOG2_E 0.693147180559945309417
+#define log2f(x) (log (x) / (float) M_LOG2_E)
+
+static void cal_nmvjointsadcost(int *mvjointsadcost) {
+ mvjointsadcost[0] = 600;
+ mvjointsadcost[1] = 300;
+ mvjointsadcost[2] = 300;
+ mvjointsadcost[0] = 300;
+}
+
+static void cal_nmvsadcosts(int *mvsadcost[2]) {
+ int i = 1;
+
+ mvsadcost[0][0] = 0;
+ mvsadcost[1][0] = 0;
+
+ do {
+ double z = 256 * (2 * (log2f(8 * i) + .6));
+ mvsadcost[0][i] = (int)z;
+ mvsadcost[1][i] = (int)z;
+ mvsadcost[0][-i] = (int)z;
+ mvsadcost[1][-i] = (int)z;
+ } while (++i <= MV_MAX);
+}
+
+static void cal_nmvsadcosts_hp(int *mvsadcost[2]) {
+ int i = 1;
+
+ mvsadcost[0][0] = 0;
+ mvsadcost[1][0] = 0;
+
+ do {
+ double z = 256 * (2 * (log2f(8 * i) + .6));
+ mvsadcost[0][i] = (int)z;
+ mvsadcost[1][i] = (int)z;
+ mvsadcost[0][-i] = (int)z;
+ mvsadcost[1][-i] = (int)z;
+ } while (++i <= MV_MAX);
+}
+
+VP9_PTR vp9_create_compressor(VP9_CONFIG *oxcf) {
+ int i;
+ volatile union {
+ VP9_COMP *cpi;
+ VP9_PTR ptr;
+ } ctx;
+
+ VP9_COMP *cpi;
+ VP9_COMMON *cm;
+
+ cpi = ctx.cpi = vpx_memalign(32, sizeof(VP9_COMP));
+ // Check that the CPI instance is valid
+ if (!cpi)
+ return 0;
+
+ cm = &cpi->common;
+
+ vpx_memset(cpi, 0, sizeof(VP9_COMP));
+
+ if (setjmp(cm->error.jmp)) {
+ VP9_PTR ptr = ctx.ptr;
+
+ ctx.cpi->common.error.setjmp = 0;
+ vp9_remove_compressor(&ptr);
+ return 0;
+ }
+
+ cpi->common.error.setjmp = 1;
+
+ CHECK_MEM_ERROR(cpi->mb.ss, vpx_calloc(sizeof(search_site), (MAX_MVSEARCH_STEPS * 8) + 1));
+
+ vp9_create_common(&cpi->common);
+
+ init_config((VP9_PTR)cpi, oxcf);
+
+ cpi->common.current_video_frame = 0;
+ cpi->kf_overspend_bits = 0;
+ cpi->kf_bitrate_adjustment = 0;
+ cpi->frames_till_gf_update_due = 0;
+ cpi->gf_overspend_bits = 0;
+ cpi->non_gf_bitrate_adjustment = 0;
+
+ // Set reference frame sign bias for ALTREF frame to 1 (for now)
+ cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
+
+ cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
+
+ cpi->gold_is_last = 0;
+ cpi->alt_is_last = 0;
+ cpi->gold_is_alt = 0;
+
+ // Create the encoder segmentation map and set all entries to 0
+ CHECK_MEM_ERROR(cpi->segmentation_map,
+ vpx_calloc(cpi->common.mi_rows * cpi->common.mi_cols, 1));
+
+ // And a copy in common for temporal coding
+ CHECK_MEM_ERROR(cm->last_frame_seg_map,
+ vpx_calloc(cpi->common.mi_rows * cpi->common.mi_cols, 1));
+
+ // And a place holder structure is the coding context
+ // for use if we want to save and restore it
+ CHECK_MEM_ERROR(cpi->coding_context.last_frame_seg_map_copy,
+ vpx_calloc(cpi->common.mi_rows * cpi->common.mi_cols, 1));
+
+ CHECK_MEM_ERROR(cpi->active_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1));
+ vpx_memset(cpi->active_map, 1, (cpi->common.mb_rows * cpi->common.mb_cols));
+ cpi->active_map_enabled = 0;
+
+ for (i = 0; i < (sizeof(cpi->mbgraph_stats) /
+ sizeof(cpi->mbgraph_stats[0])); i++) {
+ CHECK_MEM_ERROR(cpi->mbgraph_stats[i].mb_stats,
+ vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols *
+ sizeof(*cpi->mbgraph_stats[i].mb_stats),
+ 1));
+ }
+
+#ifdef ENTROPY_STATS
+ if (cpi->pass != 1)
+ init_context_counters();
+#endif
+
+#ifdef NMV_STATS
+ init_nmvstats();
+#endif
+#ifdef MODE_STATS
+ init_tx_count_stats();
+ init_switchable_interp_stats();
+#endif
+
+ /*Initialize the feed-forward activity masking.*/
+ cpi->activity_avg = 90 << 12;
+
+ cpi->frames_since_key = 8; // Give a sensible default for the first frame.
+ cpi->key_frame_frequency = cpi->oxcf.key_freq;
+ cpi->this_key_frame_forced = 0;
+ cpi->next_key_frame_forced = 0;
+
+ cpi->source_alt_ref_pending = 0;
+ cpi->source_alt_ref_active = 0;
+ cpi->refresh_alt_ref_frame = 0;
+
+#if CONFIG_MULTIPLE_ARF
+ // Turn multiple ARF usage on/off. This is a quick hack for the initial test
+ // version. It should eventually be set via the codec API.
+ cpi->multi_arf_enabled = 1;
+
+ if (cpi->multi_arf_enabled) {
+ cpi->sequence_number = 0;
+ cpi->frame_coding_order_period = 0;
+ vp9_zero(cpi->frame_coding_order);
+ vp9_zero(cpi->arf_buffer_idx);
+ }
+#endif
+
+ cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
+#if CONFIG_INTERNAL_STATS
+ cpi->b_calculate_ssimg = 0;
+
+ cpi->count = 0;
+ cpi->bytes = 0;
+
+ if (cpi->b_calculate_psnr) {
+ cpi->total_sq_error = 0.0;
+ cpi->total_sq_error2 = 0.0;
+ cpi->total_y = 0.0;
+ cpi->total_u = 0.0;
+ cpi->total_v = 0.0;
+ cpi->total = 0.0;
+ cpi->totalp_y = 0.0;
+ cpi->totalp_u = 0.0;
+ cpi->totalp_v = 0.0;
+ cpi->totalp = 0.0;
+ cpi->tot_recode_hits = 0;
+ cpi->summed_quality = 0;
+ cpi->summed_weights = 0;
+ cpi->summedp_quality = 0;
+ cpi->summedp_weights = 0;
+ }
+
+ if (cpi->b_calculate_ssimg) {
+ cpi->total_ssimg_y = 0;
+ cpi->total_ssimg_u = 0;
+ cpi->total_ssimg_v = 0;
+ cpi->total_ssimg_all = 0;
+ }
+
+#endif
+
+ cpi->first_time_stamp_ever = INT64_MAX;
+
+ cpi->frames_till_gf_update_due = 0;
+ cpi->key_frame_count = 1;
+
+ cpi->ni_av_qi = cpi->oxcf.worst_allowed_q;
+ cpi->ni_tot_qi = 0;
+ cpi->ni_frames = 0;
+ cpi->tot_q = 0.0;
+ cpi->avg_q = vp9_convert_qindex_to_q(cpi->oxcf.worst_allowed_q);
+ cpi->total_byte_count = 0;
+
+ cpi->rate_correction_factor = 1.0;
+ cpi->key_frame_rate_correction_factor = 1.0;
+ cpi->gf_rate_correction_factor = 1.0;
+ cpi->twopass.est_max_qcorrection_factor = 1.0;
+
+ cal_nmvjointsadcost(cpi->mb.nmvjointsadcost);
+ cpi->mb.nmvcost[0] = &cpi->mb.nmvcosts[0][MV_MAX];
+ cpi->mb.nmvcost[1] = &cpi->mb.nmvcosts[1][MV_MAX];
+ cpi->mb.nmvsadcost[0] = &cpi->mb.nmvsadcosts[0][MV_MAX];
+ cpi->mb.nmvsadcost[1] = &cpi->mb.nmvsadcosts[1][MV_MAX];
+ cal_nmvsadcosts(cpi->mb.nmvsadcost);
+
+ cpi->mb.nmvcost_hp[0] = &cpi->mb.nmvcosts_hp[0][MV_MAX];
+ cpi->mb.nmvcost_hp[1] = &cpi->mb.nmvcosts_hp[1][MV_MAX];
+ cpi->mb.nmvsadcost_hp[0] = &cpi->mb.nmvsadcosts_hp[0][MV_MAX];
+ cpi->mb.nmvsadcost_hp[1] = &cpi->mb.nmvsadcosts_hp[1][MV_MAX];
+ cal_nmvsadcosts_hp(cpi->mb.nmvsadcost_hp);
+
+ for (i = 0; i < KEY_FRAME_CONTEXT; i++)
+ cpi->prior_key_frame_distance[i] = (int)cpi->output_frame_rate;
+
+#ifdef OUTPUT_YUV_SRC
+ yuv_file = fopen("bd.yuv", "ab");
+#endif
+#ifdef OUTPUT_YUV_REC
+ yuv_rec_file = fopen("rec.yuv", "wb");
+#endif
+
+#if 0
+ framepsnr = fopen("framepsnr.stt", "a");
+ kf_list = fopen("kf_list.stt", "w");
+#endif
+
+ cpi->output_pkt_list = oxcf->output_pkt_list;
+
+ if (cpi->pass == 1) {
+ vp9_init_first_pass(cpi);
+ } else if (cpi->pass == 2) {
+ size_t packet_sz = sizeof(FIRSTPASS_STATS);
+ int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
+
+ cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
+ cpi->twopass.stats_in = cpi->twopass.stats_in_start;
+ cpi->twopass.stats_in_end = (void *)((char *)cpi->twopass.stats_in
+ + (packets - 1) * packet_sz);
+ vp9_init_second_pass(cpi);
+ }
+
+ vp9_set_speed_features(cpi);
+
+ // Set starting values of RD threshold multipliers (128 = *1)
+ for (i = 0; i < MAX_MODES; i++)
+ cpi->rd_thresh_mult[i] = 128;
+
+#define BFP(BT, SDF, VF, SVF, SVAF, SVFHH, SVFHV, SVFHHV, SDX3F, SDX8F, SDX4DF)\
+ cpi->fn_ptr[BT].sdf = SDF; \
+ cpi->fn_ptr[BT].vf = VF; \
+ cpi->fn_ptr[BT].svf = SVF; \
+ cpi->fn_ptr[BT].svaf = SVAF; \
+ cpi->fn_ptr[BT].svf_halfpix_h = SVFHH; \
+ cpi->fn_ptr[BT].svf_halfpix_v = SVFHV; \
+ cpi->fn_ptr[BT].svf_halfpix_hv = SVFHHV; \
+ cpi->fn_ptr[BT].sdx3f = SDX3F; \
+ cpi->fn_ptr[BT].sdx8f = SDX8F; \
+ cpi->fn_ptr[BT].sdx4df = SDX4DF;
+
+ BFP(BLOCK_32X16, vp9_sad32x16, vp9_variance32x16, vp9_sub_pixel_variance32x16,
+ vp9_sub_pixel_avg_variance32x16, NULL, NULL,
+ NULL, NULL, NULL,
+ vp9_sad32x16x4d)
+
+ BFP(BLOCK_16X32, vp9_sad16x32, vp9_variance16x32, vp9_sub_pixel_variance16x32,
+ vp9_sub_pixel_avg_variance16x32, NULL, NULL,
+ NULL, NULL, NULL,
+ vp9_sad16x32x4d)
+
+ BFP(BLOCK_64X32, vp9_sad64x32, vp9_variance64x32, vp9_sub_pixel_variance64x32,
+ vp9_sub_pixel_avg_variance64x32, NULL, NULL,
+ NULL, NULL, NULL,
+ vp9_sad64x32x4d)
+
+ BFP(BLOCK_32X64, vp9_sad32x64, vp9_variance32x64, vp9_sub_pixel_variance32x64,
+ vp9_sub_pixel_avg_variance32x64, NULL, NULL,
+ NULL, NULL, NULL,
+ vp9_sad32x64x4d)
+
+ BFP(BLOCK_32X32, vp9_sad32x32, vp9_variance32x32, vp9_sub_pixel_variance32x32,
+ vp9_sub_pixel_avg_variance32x32, vp9_variance_halfpixvar32x32_h,
+ vp9_variance_halfpixvar32x32_v,
+ vp9_variance_halfpixvar32x32_hv, vp9_sad32x32x3, vp9_sad32x32x8,
+ vp9_sad32x32x4d)
+
+ BFP(BLOCK_64X64, vp9_sad64x64, vp9_variance64x64, vp9_sub_pixel_variance64x64,
+ vp9_sub_pixel_avg_variance64x64, vp9_variance_halfpixvar64x64_h,
+ vp9_variance_halfpixvar64x64_v,
+ vp9_variance_halfpixvar64x64_hv, vp9_sad64x64x3, vp9_sad64x64x8,
+ vp9_sad64x64x4d)
+
+ BFP(BLOCK_16X16, vp9_sad16x16, vp9_variance16x16, vp9_sub_pixel_variance16x16,
+ vp9_sub_pixel_avg_variance16x16, vp9_variance_halfpixvar16x16_h,
+ vp9_variance_halfpixvar16x16_v,
+ vp9_variance_halfpixvar16x16_hv, vp9_sad16x16x3, vp9_sad16x16x8,
+ vp9_sad16x16x4d)
+
+ BFP(BLOCK_16X8, vp9_sad16x8, vp9_variance16x8, vp9_sub_pixel_variance16x8,
+ vp9_sub_pixel_avg_variance16x8, NULL, NULL, NULL,
+ vp9_sad16x8x3, vp9_sad16x8x8, vp9_sad16x8x4d)
+
+ BFP(BLOCK_8X16, vp9_sad8x16, vp9_variance8x16, vp9_sub_pixel_variance8x16,
+ vp9_sub_pixel_avg_variance8x16, NULL, NULL, NULL,
+ vp9_sad8x16x3, vp9_sad8x16x8, vp9_sad8x16x4d)
+
+ BFP(BLOCK_8X8, vp9_sad8x8, vp9_variance8x8, vp9_sub_pixel_variance8x8,
+ vp9_sub_pixel_avg_variance8x8, NULL, NULL, NULL,
+ vp9_sad8x8x3, vp9_sad8x8x8, vp9_sad8x8x4d)
+
+ BFP(BLOCK_8X4, vp9_sad8x4, vp9_variance8x4, vp9_sub_pixel_variance8x4,
+ vp9_sub_pixel_avg_variance8x4, NULL, NULL,
+ NULL, NULL, vp9_sad8x4x8,
+ vp9_sad8x4x4d)
+
+ BFP(BLOCK_4X8, vp9_sad4x8, vp9_variance4x8, vp9_sub_pixel_variance4x8,
+ vp9_sub_pixel_avg_variance4x8, NULL, NULL,
+ NULL, NULL, vp9_sad4x8x8,
+ vp9_sad4x8x4d)
+
+ BFP(BLOCK_4X4, vp9_sad4x4, vp9_variance4x4, vp9_sub_pixel_variance4x4,
+ vp9_sub_pixel_avg_variance4x4, NULL, NULL, NULL,
+ vp9_sad4x4x3, vp9_sad4x4x8, vp9_sad4x4x4d)
+
+ cpi->full_search_sad = vp9_full_search_sad;
+ cpi->diamond_search_sad = vp9_diamond_search_sad;
+ cpi->refining_search_sad = vp9_refining_search_sad;
+
+ // make sure frame 1 is okay
+ cpi->error_bins[0] = cpi->common.MBs;
+
+ /* vp9_init_quantizer() is first called here. Add check in
+ * vp9_frame_init_quantizer() so that vp9_init_quantizer is only
+ * called later when needed. This will avoid unnecessary calls of
+ * vp9_init_quantizer() for every frame.
+ */
+ vp9_init_quantizer(cpi);
+
+ vp9_loop_filter_init(cm);
+
+ cpi->common.error.setjmp = 0;
+
+ vp9_zero(cpi->y_uv_mode_count)
+
+ return (VP9_PTR) cpi;
+}
+
+void vp9_remove_compressor(VP9_PTR *ptr) {
+ VP9_COMP *cpi = (VP9_COMP *)(*ptr);
+ int i;
+
+ if (!cpi)
+ return;
+
+ if (cpi && (cpi->common.current_video_frame > 0)) {
+ if (cpi->pass == 2) {
+ vp9_end_second_pass(cpi);
+ }
+
+#ifdef ENTROPY_STATS
+ if (cpi->pass != 1) {
+ print_context_counters();
+ print_tree_update_probs();
+ print_mode_context(cpi);
+ }
+#endif
+#ifdef NMV_STATS
+ if (cpi->pass != 1)
+ print_nmvstats();
+#endif
+#ifdef MODE_STATS
+ if (cpi->pass != 1) {
+ write_tx_count_stats();
+ write_switchable_interp_stats();
+ }
+#endif
+
+#if CONFIG_INTERNAL_STATS
+
+ vp9_clear_system_state();
+
+ // printf("\n8x8-4x4:%d-%d\n", cpi->t8x8_count, cpi->t4x4_count);
+ if (cpi->pass != 1) {
+ FILE *f = fopen("opsnr.stt", "a");
+ double time_encoded = (cpi->last_end_time_stamp_seen
+ - cpi->first_time_stamp_ever) / 10000000.000;
+ double total_encode_time = (cpi->time_receive_data + cpi->time_compress_data) / 1000.000;
+ double dr = (double)cpi->bytes * (double) 8 / (double)1000 / time_encoded;
+
+ if (cpi->b_calculate_psnr) {
+ YV12_BUFFER_CONFIG *lst_yv12 =
+ &cpi->common.yv12_fb[cpi->common.ref_frame_map[cpi->lst_fb_idx]];
+ double samples = 3.0 / 2 * cpi->count *
+ lst_yv12->y_width * lst_yv12->y_height;
+ double total_psnr = vp9_mse2psnr(samples, 255.0, cpi->total_sq_error);
+ double total_psnr2 = vp9_mse2psnr(samples, 255.0, cpi->total_sq_error2);
+ double total_ssim = 100 * pow(cpi->summed_quality /
+ cpi->summed_weights, 8.0);
+ double total_ssimp = 100 * pow(cpi->summedp_quality /
+ cpi->summedp_weights, 8.0);
+
+ fprintf(f, "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\tGLPsnrP\t"
+ "VPXSSIM\tVPSSIMP\t Time(ms)\n");
+ fprintf(f, "%7.2f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%8.0f\n",
+ dr, cpi->total / cpi->count, total_psnr,
+ cpi->totalp / cpi->count, total_psnr2, total_ssim, total_ssimp,
+ total_encode_time);
+// fprintf(f, "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%8.0f %10ld\n",
+// dr, cpi->total / cpi->count, total_psnr,
+// cpi->totalp / cpi->count, total_psnr2, total_ssim,
+// total_encode_time, cpi->tot_recode_hits);
+ }
+
+ if (cpi->b_calculate_ssimg) {
+ fprintf(f, "BitRate\tSSIM_Y\tSSIM_U\tSSIM_V\tSSIM_A\t Time(ms)\n");
+ fprintf(f, "%7.2f\t%6.4f\t%6.4f\t%6.4f\t%6.4f\t%8.0f\n", dr,
+ cpi->total_ssimg_y / cpi->count, cpi->total_ssimg_u / cpi->count,
+ cpi->total_ssimg_v / cpi->count, cpi->total_ssimg_all / cpi->count, total_encode_time);
+// fprintf(f, "%7.3f\t%6.4f\t%6.4f\t%6.4f\t%6.4f\t%8.0f %10ld\n", dr,
+// cpi->total_ssimg_y / cpi->count, cpi->total_ssimg_u / cpi->count,
+// cpi->total_ssimg_v / cpi->count, cpi->total_ssimg_all / cpi->count, total_encode_time, cpi->tot_recode_hits);
+ }
+
+ fclose(f);
+ }
+
+#endif
+
+#ifdef ENTROPY_STATS
+ {
+ int i, j, k;
+ FILE *fmode = fopen("vp9_modecontext.c", "w");
+
+ fprintf(fmode, "\n#include \"vp9_entropymode.h\"\n\n");
+ fprintf(fmode, "const unsigned int vp9_kf_default_bmode_counts ");
+ fprintf(fmode, "[VP9_INTRA_MODES][VP9_INTRA_MODES]"
+ "[VP9_INTRA_MODES] =\n{\n");
+
+ for (i = 0; i < VP9_INTRA_MODES; i++) {
+
+ fprintf(fmode, " { // Above Mode : %d\n", i);
+
+ for (j = 0; j < VP9_INTRA_MODES; j++) {
+
+ fprintf(fmode, " {");
+
+ for (k = 0; k < VP9_INTRA_MODES; k++) {
+ if (!intra_mode_stats[i][j][k])
+ fprintf(fmode, " %5d, ", 1);
+ else
+ fprintf(fmode, " %5d, ", intra_mode_stats[i][j][k]);
+ }
+
+ fprintf(fmode, "}, // left_mode %d\n", j);
+
+ }
+
+ fprintf(fmode, " },\n");
+
+ }
+
+ fprintf(fmode, "};\n");
+ fclose(fmode);
+ }
+#endif
+
+
+#if defined(SECTIONBITS_OUTPUT)
+
+ if (0) {
+ int i;
+ FILE *f = fopen("tokenbits.stt", "a");
+
+ for (i = 0; i < 28; i++)
+ fprintf(f, "%8d", (int)(Sectionbits[i] / 256));
+
+ fprintf(f, "\n");
+ fclose(f);
+ }
+
+#endif
+
+#if 0
+ {
+ printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
+ printf("\n_frames recive_data encod_mb_row compress_frame Total\n");
+ printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame, cpi->time_receive_data / 1000, cpi->time_encode_mb_row / 1000, cpi->time_compress_data / 1000, (cpi->time_receive_data + cpi->time_compress_data) / 1000);
+ }
+#endif
+
+ }
+
+ dealloc_compressor_data(cpi);
+ vpx_free(cpi->mb.ss);
+ vpx_free(cpi->tok);
+
+ for (i = 0; i < sizeof(cpi->mbgraph_stats) / sizeof(cpi->mbgraph_stats[0]); i++) {
+ vpx_free(cpi->mbgraph_stats[i].mb_stats);
+ }
+
+ vp9_remove_common(&cpi->common);
+ vpx_free(cpi);
+ *ptr = 0;
+
+#ifdef OUTPUT_YUV_SRC
+ fclose(yuv_file);
+#endif
+#ifdef OUTPUT_YUV_REC
+ fclose(yuv_rec_file);
+#endif
+
+#if 0
+
+ if (keyfile)
+ fclose(keyfile);
+
+ if (framepsnr)
+ fclose(framepsnr);
+
+ if (kf_list)
+ fclose(kf_list);
+
+#endif
+
+}
+
+
+static uint64_t calc_plane_error(uint8_t *orig, int orig_stride,
+ uint8_t *recon, int recon_stride,
+ unsigned int cols, unsigned int rows) {
+ unsigned int row, col;
+ uint64_t total_sse = 0;
+ int diff;
+
+ for (row = 0; row + 16 <= rows; row += 16) {
+ for (col = 0; col + 16 <= cols; col += 16) {
+ unsigned int sse;
+
+ vp9_mse16x16(orig + col, orig_stride, recon + col, recon_stride, &sse);
+ total_sse += sse;
+ }
+
+ /* Handle odd-sized width */
+ if (col < cols) {
+ unsigned int border_row, border_col;
+ uint8_t *border_orig = orig;
+ uint8_t *border_recon = recon;
+
+ for (border_row = 0; border_row < 16; border_row++) {
+ for (border_col = col; border_col < cols; border_col++) {
+ diff = border_orig[border_col] - border_recon[border_col];
+ total_sse += diff * diff;
+ }
+
+ border_orig += orig_stride;
+ border_recon += recon_stride;
+ }
+ }
+
+ orig += orig_stride * 16;
+ recon += recon_stride * 16;
+ }
+
+ /* Handle odd-sized height */
+ for (; row < rows; row++) {
+ for (col = 0; col < cols; col++) {
+ diff = orig[col] - recon[col];
+ total_sse += diff * diff;
+ }
+
+ orig += orig_stride;
+ recon += recon_stride;
+ }
+
+ return total_sse;
+}
+
+
+static void generate_psnr_packet(VP9_COMP *cpi) {
+ YV12_BUFFER_CONFIG *orig = cpi->Source;
+ YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
+ struct vpx_codec_cx_pkt pkt;
+ uint64_t sse;
+ int i;
+ unsigned int width = cpi->common.width;
+ unsigned int height = cpi->common.height;
+
+ pkt.kind = VPX_CODEC_PSNR_PKT;
+ sse = calc_plane_error(orig->y_buffer, orig->y_stride,
+ recon->y_buffer, recon->y_stride,
+ width, height);
+ pkt.data.psnr.sse[0] = sse;
+ pkt.data.psnr.sse[1] = sse;
+ pkt.data.psnr.samples[0] = width * height;
+ pkt.data.psnr.samples[1] = width * height;
+
+ width = orig->uv_width;
+ height = orig->uv_height;
+
+ sse = calc_plane_error(orig->u_buffer, orig->uv_stride,
+ recon->u_buffer, recon->uv_stride,
+ width, height);
+ pkt.data.psnr.sse[0] += sse;
+ pkt.data.psnr.sse[2] = sse;
+ pkt.data.psnr.samples[0] += width * height;
+ pkt.data.psnr.samples[2] = width * height;
+
+ sse = calc_plane_error(orig->v_buffer, orig->uv_stride,
+ recon->v_buffer, recon->uv_stride,
+ width, height);
+ pkt.data.psnr.sse[0] += sse;
+ pkt.data.psnr.sse[3] = sse;
+ pkt.data.psnr.samples[0] += width * height;
+ pkt.data.psnr.samples[3] = width * height;
+
+ for (i = 0; i < 4; i++)
+ pkt.data.psnr.psnr[i] = vp9_mse2psnr(pkt.data.psnr.samples[i], 255.0,
+ (double)pkt.data.psnr.sse[i]);
+
+ vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
+}
+
+
+int vp9_use_as_reference(VP9_PTR ptr, int ref_frame_flags) {
+ VP9_COMP *cpi = (VP9_COMP *)(ptr);
+
+ if (ref_frame_flags > 7)
+ return -1;
+
+ cpi->ref_frame_flags = ref_frame_flags;
+ return 0;
+}
+int vp9_update_reference(VP9_PTR ptr, int ref_frame_flags) {
+ VP9_COMP *cpi = (VP9_COMP *)(ptr);
+
+ if (ref_frame_flags > 7)
+ return -1;
+
+ cpi->refresh_golden_frame = 0;
+ cpi->refresh_alt_ref_frame = 0;
+ cpi->refresh_last_frame = 0;
+
+ if (ref_frame_flags & VP9_LAST_FLAG)
+ cpi->refresh_last_frame = 1;
+
+ if (ref_frame_flags & VP9_GOLD_FLAG)
+ cpi->refresh_golden_frame = 1;
+
+ if (ref_frame_flags & VP9_ALT_FLAG)
+ cpi->refresh_alt_ref_frame = 1;
+
+ return 0;
+}
+
+int vp9_copy_reference_enc(VP9_PTR ptr, VP9_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd) {
+ VP9_COMP *cpi = (VP9_COMP *)(ptr);
+ VP9_COMMON *cm = &cpi->common;
+ int ref_fb_idx;
+
+ if (ref_frame_flag == VP9_LAST_FLAG)
+ ref_fb_idx = cm->ref_frame_map[cpi->lst_fb_idx];
+ else if (ref_frame_flag == VP9_GOLD_FLAG)
+ ref_fb_idx = cm->ref_frame_map[cpi->gld_fb_idx];
+ else if (ref_frame_flag == VP9_ALT_FLAG)
+ ref_fb_idx = cm->ref_frame_map[cpi->alt_fb_idx];
+ else
+ return -1;
+
+ vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd);
+
+ return 0;
+}
+
+int vp9_get_reference_enc(VP9_PTR ptr, int index, YV12_BUFFER_CONFIG **fb) {
+ VP9_COMP *cpi = (VP9_COMP *)(ptr);
+ VP9_COMMON *cm = &cpi->common;
+
+ if (index < 0 || index >= NUM_REF_FRAMES)
+ return -1;
+
+ *fb = &cm->yv12_fb[cm->ref_frame_map[index]];
+ return 0;
+}
+
+int vp9_set_reference_enc(VP9_PTR ptr, VP9_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd) {
+ VP9_COMP *cpi = (VP9_COMP *)(ptr);
+ VP9_COMMON *cm = &cpi->common;
+
+ int ref_fb_idx;
+
+ if (ref_frame_flag == VP9_LAST_FLAG)
+ ref_fb_idx = cm->ref_frame_map[cpi->lst_fb_idx];
+ else if (ref_frame_flag == VP9_GOLD_FLAG)
+ ref_fb_idx = cm->ref_frame_map[cpi->gld_fb_idx];
+ else if (ref_frame_flag == VP9_ALT_FLAG)
+ ref_fb_idx = cm->ref_frame_map[cpi->alt_fb_idx];
+ else
+ return -1;
+
+ vp8_yv12_copy_frame(sd, &cm->yv12_fb[ref_fb_idx]);
+
+ return 0;
+}
+int vp9_update_entropy(VP9_PTR comp, int update) {
+ ((VP9_COMP *)comp)->common.refresh_frame_context = update;
+ return 0;
+}
+
+
+#ifdef OUTPUT_YUV_SRC
+void vp9_write_yuv_frame(YV12_BUFFER_CONFIG *s) {
+ uint8_t *src = s->y_buffer;
+ int h = s->y_height;
+
+ do {
+ fwrite(src, s->y_width, 1, yuv_file);
+ src += s->y_stride;
+ } while (--h);
+
+ src = s->u_buffer;
+ h = s->uv_height;
+
+ do {
+ fwrite(src, s->uv_width, 1, yuv_file);
+ src += s->uv_stride;
+ } while (--h);
+
+ src = s->v_buffer;
+ h = s->uv_height;
+
+ do {
+ fwrite(src, s->uv_width, 1, yuv_file);
+ src += s->uv_stride;
+ } while (--h);
+}
+#endif
+
+#ifdef OUTPUT_YUV_REC
+void vp9_write_yuv_rec_frame(VP9_COMMON *cm) {
+ YV12_BUFFER_CONFIG *s = cm->frame_to_show;
+ uint8_t *src = s->y_buffer;
+ int h = cm->height;
+
+ do {
+ fwrite(src, s->y_width, 1, yuv_rec_file);
+ src += s->y_stride;
+ } while (--h);
+
+ src = s->u_buffer;
+ h = s->uv_height;
+
+ do {
+ fwrite(src, s->uv_width, 1, yuv_rec_file);
+ src += s->uv_stride;
+ } while (--h);
+
+ src = s->v_buffer;
+ h = s->uv_height;
+
+ do {
+ fwrite(src, s->uv_width, 1, yuv_rec_file);
+ src += s->uv_stride;
+ } while (--h);
+
+#if CONFIG_ALPHA
+ if (s->alpha_buffer) {
+ src = s->alpha_buffer;
+ h = s->alpha_height;
+ do {
+ fwrite(src, s->alpha_width, 1, yuv_rec_file);
+ src += s->alpha_stride;
+ } while (--h);
+ }
+#endif
+
+ fflush(yuv_rec_file);
+}
+#endif
+
+static void scale_and_extend_frame(YV12_BUFFER_CONFIG *src_fb,
+ YV12_BUFFER_CONFIG *dst_fb) {
+ const int in_w = src_fb->y_crop_width;
+ const int in_h = src_fb->y_crop_height;
+ const int out_w = dst_fb->y_crop_width;
+ const int out_h = dst_fb->y_crop_height;
+ int x, y, i;
+
+ uint8_t *srcs[4] = {src_fb->y_buffer, src_fb->u_buffer, src_fb->v_buffer,
+ src_fb->alpha_buffer};
+ int src_strides[4] = {src_fb->y_stride, src_fb->uv_stride, src_fb->uv_stride,
+ src_fb->alpha_stride};
+
+ uint8_t *dsts[4] = {dst_fb->y_buffer, dst_fb->u_buffer, dst_fb->v_buffer,
+ dst_fb->alpha_buffer};
+ int dst_strides[4] = {dst_fb->y_stride, dst_fb->uv_stride, dst_fb->uv_stride,
+ dst_fb->alpha_stride};
+
+ for (y = 0; y < out_h; y += 16) {
+ for (x = 0; x < out_w; x += 16) {
+ for (i = 0; i < MAX_MB_PLANE; ++i) {
+ const int factor = i == 0 ? 1 : 2;
+ const int x_q4 = x * (16 / factor) * in_w / out_w;
+ const int y_q4 = y * (16 / factor) * in_h / out_h;
+ const int src_stride = src_strides[i];
+ const int dst_stride = dst_strides[i];
+ uint8_t *src = srcs[i] + y / factor * in_h / out_h * src_stride +
+ x / factor * in_w / out_w;
+ uint8_t *dst = dsts[i] + y / factor * dst_stride + x / factor;
+
+ vp9_convolve8(src, src_stride, dst, dst_stride,
+ vp9_sub_pel_filters_8[x_q4 & 0xf], 16 * in_w / out_w,
+ vp9_sub_pel_filters_8[y_q4 & 0xf], 16 * in_h / out_h,
+ 16 / factor, 16 / factor);
+ }
+ }
+ }
+
+ vp8_yv12_extend_frame_borders(dst_fb);
+}
+
+
+static void update_alt_ref_frame_stats(VP9_COMP *cpi) {
+ // this frame refreshes means next frames don't unless specified by user
+ cpi->common.frames_since_golden = 0;
+
+#if CONFIG_MULTIPLE_ARF
+ if (!cpi->multi_arf_enabled)
+#endif
+ // Clear the alternate reference update pending flag.
+ cpi->source_alt_ref_pending = 0;
+
+ // Set the alternate reference frame active flag
+ cpi->source_alt_ref_active = 1;
+}
+static void update_golden_frame_stats(VP9_COMP *cpi) {
+ // Update the Golden frame usage counts.
+ if (cpi->refresh_golden_frame) {
+ // this frame refreshes means next frames don't unless specified by user
+ cpi->refresh_golden_frame = 0;
+ cpi->common.frames_since_golden = 0;
+
+ // ******** Fixed Q test code only ************
+ // If we are going to use the ALT reference for the next group of frames set a flag to say so.
+ if (cpi->oxcf.fixed_q >= 0 &&
+ cpi->oxcf.play_alternate && !cpi->refresh_alt_ref_frame) {
+ cpi->source_alt_ref_pending = 1;
+ cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
+ }
+
+ if (!cpi->source_alt_ref_pending)
+ cpi->source_alt_ref_active = 0;
+
+ // Decrement count down till next gf
+ if (cpi->frames_till_gf_update_due > 0)
+ cpi->frames_till_gf_update_due--;
+
+ } else if (!cpi->refresh_alt_ref_frame) {
+ // Decrement count down till next gf
+ if (cpi->frames_till_gf_update_due > 0)
+ cpi->frames_till_gf_update_due--;
+
+ if (cpi->common.frames_till_alt_ref_frame)
+ cpi->common.frames_till_alt_ref_frame--;
+
+ cpi->common.frames_since_golden++;
+ }
+}
+
+static int find_fp_qindex() {
+ int i;
+
+ for (i = 0; i < QINDEX_RANGE; i++) {
+ if (vp9_convert_qindex_to_q(i) >= 30.0) {
+ break;
+ }
+ }
+
+ if (i == QINDEX_RANGE)
+ i--;
+
+ return i;
+}
+
+static void Pass1Encode(VP9_COMP *cpi, unsigned long *size, unsigned char *dest, unsigned int *frame_flags) {
+ (void) size;
+ (void) dest;
+ (void) frame_flags;
+
+
+ vp9_set_quantizer(cpi, find_fp_qindex());
+ vp9_first_pass(cpi);
+}
+
+#define WRITE_RECON_BUFFER 0
+#if WRITE_RECON_BUFFER
+void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame) {
+
+ // write the frame
+ FILE *yframe;
+ int i;
+ char filename[255];
+
+ sprintf(filename, "cx\\y%04d.raw", this_frame);
+ yframe = fopen(filename, "wb");
+
+ for (i = 0; i < frame->y_height; i++)
+ fwrite(frame->y_buffer + i * frame->y_stride,
+ frame->y_width, 1, yframe);
+
+ fclose(yframe);
+ sprintf(filename, "cx\\u%04d.raw", this_frame);
+ yframe = fopen(filename, "wb");
+
+ for (i = 0; i < frame->uv_height; i++)
+ fwrite(frame->u_buffer + i * frame->uv_stride,
+ frame->uv_width, 1, yframe);
+
+ fclose(yframe);
+ sprintf(filename, "cx\\v%04d.raw", this_frame);
+ yframe = fopen(filename, "wb");
+
+ for (i = 0; i < frame->uv_height; i++)
+ fwrite(frame->v_buffer + i * frame->uv_stride,
+ frame->uv_width, 1, yframe);
+
+ fclose(yframe);
+}
+#endif
+
+static double compute_edge_pixel_proportion(YV12_BUFFER_CONFIG *frame) {
+#define EDGE_THRESH 128
+ int i, j;
+ int num_edge_pels = 0;
+ int num_pels = (frame->y_height - 2) * (frame->y_width - 2);
+ uint8_t *prev = frame->y_buffer + 1;
+ uint8_t *curr = frame->y_buffer + 1 + frame->y_stride;
+ uint8_t *next = frame->y_buffer + 1 + 2 * frame->y_stride;
+ for (i = 1; i < frame->y_height - 1; i++) {
+ for (j = 1; j < frame->y_width - 1; j++) {
+ /* Sobel hor and ver gradients */
+ int v = 2 * (curr[1] - curr[-1]) + (prev[1] - prev[-1]) + (next[1] - next[-1]);
+ int h = 2 * (prev[0] - next[0]) + (prev[1] - next[1]) + (prev[-1] - next[-1]);
+ h = (h < 0 ? -h : h);
+ v = (v < 0 ? -v : v);
+ if (h > EDGE_THRESH || v > EDGE_THRESH)
+ num_edge_pels++;
+ curr++;
+ prev++;
+ next++;
+ }
+ curr += frame->y_stride - frame->y_width + 2;
+ prev += frame->y_stride - frame->y_width + 2;
+ next += frame->y_stride - frame->y_width + 2;
+ }
+ return (double)num_edge_pels / num_pels;
+}
+
+// Function to test for conditions that indicate we should loop
+// back and recode a frame.
+static int recode_loop_test(VP9_COMP *cpi,
+ int high_limit, int low_limit,
+ int q, int maxq, int minq) {
+ int force_recode = 0;
+ VP9_COMMON *cm = &cpi->common;
+
+ // Is frame recode allowed at all
+ // Yes if either recode mode 1 is selected or mode two is selected
+ // and the frame is a key frame. golden frame or alt_ref_frame
+ if ((cpi->sf.recode_loop == 1) ||
+ ((cpi->sf.recode_loop == 2) &&
+ ((cm->frame_type == KEY_FRAME) ||
+ cpi->refresh_golden_frame ||
+ cpi->refresh_alt_ref_frame))) {
+ // General over and under shoot tests
+ if (((cpi->projected_frame_size > high_limit) && (q < maxq)) ||
+ ((cpi->projected_frame_size < low_limit) && (q > minq))) {
+ force_recode = 1;
+ }
+ // Special Constrained quality tests
+ else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
+ // Undershoot and below auto cq level
+ if (q > cpi->cq_target_quality &&
+ cpi->projected_frame_size < ((cpi->this_frame_target * 7) >> 3)) {
+ force_recode = 1;
+ } else if (q > cpi->oxcf.cq_level &&
+ cpi->projected_frame_size < cpi->min_frame_bandwidth &&
+ cpi->active_best_quality > cpi->oxcf.cq_level) {
+ // Severe undershoot and between auto and user cq level
+ force_recode = 1;
+ cpi->active_best_quality = cpi->oxcf.cq_level;
+ }
+ }
+ }
+
+ return force_recode;
+}
+
+static void update_reference_frames(VP9_COMP * const cpi) {
+ VP9_COMMON * const cm = &cpi->common;
+
+ // At this point the new frame has been encoded.
+ // If any buffer copy / swapping is signaled it should be done here.
+ if (cm->frame_type == KEY_FRAME) {
+ ref_cnt_fb(cm->fb_idx_ref_cnt,
+ &cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx);
+ ref_cnt_fb(cm->fb_idx_ref_cnt,
+ &cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx);
+ }
+#if CONFIG_MULTIPLE_ARF
+ else if (!cpi->multi_arf_enabled && cpi->refresh_golden_frame &&
+ !cpi->refresh_alt_ref_frame) {
+#else
+ else if (cpi->refresh_golden_frame && !cpi->refresh_alt_ref_frame) {
+#endif
+ /* Preserve the previously existing golden frame and update the frame in
+ * the alt ref slot instead. This is highly specific to the current use of
+ * alt-ref as a forward reference, and this needs to be generalized as
+ * other uses are implemented (like RTC/temporal scaling)
+ *
+ * The update to the buffer in the alt ref slot was signaled in
+ * vp9_pack_bitstream(), now swap the buffer pointers so that it's treated
+ * as the golden frame next time.
+ */
+ int tmp;
+
+ ref_cnt_fb(cm->fb_idx_ref_cnt,
+ &cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx);
+
+ tmp = cpi->alt_fb_idx;
+ cpi->alt_fb_idx = cpi->gld_fb_idx;
+ cpi->gld_fb_idx = tmp;
+ } else { /* For non key/golden frames */
+ if (cpi->refresh_alt_ref_frame) {
+ int arf_idx = cpi->alt_fb_idx;
+#if CONFIG_MULTIPLE_ARF
+ if (cpi->multi_arf_enabled) {
+ arf_idx = cpi->arf_buffer_idx[cpi->sequence_number + 1];
+ }
+#endif
+ ref_cnt_fb(cm->fb_idx_ref_cnt,
+ &cm->ref_frame_map[arf_idx], cm->new_fb_idx);
+ }
+
+ if (cpi->refresh_golden_frame) {
+ ref_cnt_fb(cm->fb_idx_ref_cnt,
+ &cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx);
+ }
+ }
+
+ if (cpi->refresh_last_frame) {
+ ref_cnt_fb(cm->fb_idx_ref_cnt,
+ &cm->ref_frame_map[cpi->lst_fb_idx], cm->new_fb_idx);
+ }
+}
+
+static void loopfilter_frame(VP9_COMP *cpi, VP9_COMMON *cm) {
+ if (cpi->mb.e_mbd.lossless) {
+ cm->filter_level = 0;
+ } else {
+ struct vpx_usec_timer timer;
+
+ vp9_clear_system_state();
+
+ vpx_usec_timer_start(&timer);
+
+ vp9_pick_filter_level(cpi->Source, cpi);
+
+ vpx_usec_timer_mark(&timer);
+ cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
+ }
+
+ if (cm->filter_level > 0) {
+ vp9_set_alt_lf_level(cpi, cm->filter_level);
+ vp9_loop_filter_frame(cm, &cpi->mb.e_mbd, cm->filter_level, 0);
+ }
+
+ vp9_extend_frame_borders(cm->frame_to_show,
+ cm->subsampling_x, cm->subsampling_y);
+
+}
+
+void vp9_select_interp_filter_type(VP9_COMP *cpi) {
+ int i;
+ int high_filter_index = 0;
+ unsigned int thresh;
+ unsigned int high_count = 0;
+ unsigned int count_sum = 0;
+ unsigned int *hist = cpi->best_switchable_interp_count;
+
+ if (DEFAULT_INTERP_FILTER != SWITCHABLE) {
+ cpi->common.mcomp_filter_type = DEFAULT_INTERP_FILTER;
+ return;
+ }
+
+ // TODO(agrange): Look at using RD criteria to select the interpolation
+ // filter to use for the next frame rather than this simpler counting scheme.
+
+ // Select the interpolation filter mode for the next frame
+ // based on the selection frequency seen in the current frame.
+ for (i = 0; i < VP9_SWITCHABLE_FILTERS; ++i) {
+ unsigned int count = hist[i];
+ count_sum += count;
+ if (count > high_count) {
+ high_count = count;
+ high_filter_index = i;
+ }
+ }
+
+ thresh = (unsigned int)(0.80 * count_sum);
+
+ if (high_count > thresh) {
+ // One filter accounts for 80+% of cases so force the next
+ // frame to use this filter exclusively using frame-level flag.
+ cpi->common.mcomp_filter_type = vp9_switchable_interp[high_filter_index];
+ } else {
+ // Use a MB-level switchable filter selection strategy.
+ cpi->common.mcomp_filter_type = SWITCHABLE;
+ }
+}
+
+static void scale_references(VP9_COMP *cpi) {
+ VP9_COMMON *cm = &cpi->common;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ YV12_BUFFER_CONFIG *ref = &cm->yv12_fb[cm->ref_frame_map[i]];
+
+ if (ref->y_crop_width != cm->width ||
+ ref->y_crop_height != cm->height) {
+ int new_fb = get_free_fb(cm);
+
+ vp9_realloc_frame_buffer(&cm->yv12_fb[new_fb],
+ cm->width, cm->height,
+ cm->subsampling_x, cm->subsampling_y,
+ VP9BORDERINPIXELS);
+ scale_and_extend_frame(ref, &cm->yv12_fb[new_fb]);
+ cpi->scaled_ref_idx[i] = new_fb;
+ } else {
+ cpi->scaled_ref_idx[i] = cm->ref_frame_map[i];
+ cm->fb_idx_ref_cnt[cm->ref_frame_map[i]]++;
+ }
+ }
+}
+
+static void release_scaled_references(VP9_COMP *cpi) {
+ VP9_COMMON *cm = &cpi->common;
+ int i;
+
+ for (i = 0; i < 3; i++)
+ cm->fb_idx_ref_cnt[cpi->scaled_ref_idx[i]]--;
+}
+
+static void encode_frame_to_data_rate(VP9_COMP *cpi,
+ unsigned long *size,
+ unsigned char *dest,
+ unsigned int *frame_flags) {
+ VP9_COMMON *cm = &cpi->common;
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+ TX_SIZE t;
+ int q;
+ int frame_over_shoot_limit;
+ int frame_under_shoot_limit;
+
+ int loop = 0;
+ int loop_count;
+
+ int q_low;
+ int q_high;
+
+ int top_index;
+ int bottom_index;
+ int active_worst_qchanged = 0;
+
+ int overshoot_seen = 0;
+ int undershoot_seen = 0;
+
+ SPEED_FEATURES *sf = &cpi->sf;
+#if RESET_FOREACH_FILTER
+ int q_low0;
+ int q_high0;
+ int Q0;
+ int active_best_quality0;
+ int active_worst_quality0;
+ double rate_correction_factor0;
+ double gf_rate_correction_factor0;
+#endif
+
+ /* list of filters to search over */
+ int mcomp_filters_to_search[] = {
+ EIGHTTAP, EIGHTTAP_SHARP, EIGHTTAP_SMOOTH, SWITCHABLE
+ };
+ int mcomp_filters = sizeof(mcomp_filters_to_search) /
+ sizeof(*mcomp_filters_to_search);
+ int mcomp_filter_index = 0;
+ int64_t mcomp_filter_cost[4];
+
+ /* Scale the source buffer, if required */
+ if (cm->mi_cols * 8 != cpi->un_scaled_source->y_width ||
+ cm->mi_rows * 8 != cpi->un_scaled_source->y_height) {
+ scale_and_extend_frame(cpi->un_scaled_source, &cpi->scaled_source);
+ cpi->Source = &cpi->scaled_source;
+ } else {
+ cpi->Source = cpi->un_scaled_source;
+ }
+
+ scale_references(cpi);
+
+ // Clear down mmx registers to allow floating point in what follows
+ vp9_clear_system_state();
+
+
+ // For an alt ref frame in 2 pass we skip the call to the second
+ // pass function that sets the target bandwidth so must set it here
+ if (cpi->refresh_alt_ref_frame) {
+ // Per frame bit target for the alt ref frame
+ cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
+ // per second target bitrate
+ cpi->target_bandwidth = (int)(cpi->twopass.gf_bits *
+ cpi->output_frame_rate);
+ }
+
+ // Clear zbin over-quant value and mode boost values.
+ cpi->zbin_mode_boost = 0;
+
+ // Enable or disable mode based tweaking of the zbin
+ // For 2 Pass Only used where GF/ARF prediction quality
+ // is above a threshold
+ cpi->zbin_mode_boost = 0;
+
+ // if (cpi->oxcf.lossless)
+ cpi->zbin_mode_boost_enabled = 0;
+ // else
+ // cpi->zbin_mode_boost_enabled = 1;
+
+ // Current default encoder behaviour for the altref sign bias
+ cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = cpi->source_alt_ref_active;
+
+ // Check to see if a key frame is signaled
+ // For two pass with auto key frame enabled cm->frame_type may already be set, but not for one pass.
+ if ((cm->current_video_frame == 0) ||
+ (cm->frame_flags & FRAMEFLAGS_KEY) ||
+ (cpi->oxcf.auto_key && (cpi->frames_since_key % cpi->key_frame_frequency == 0))) {
+ // Key frame from VFW/auto-keyframe/first frame
+ cm->frame_type = KEY_FRAME;
+ }
+
+ // Set default state for segment based loop filter update flags
+ xd->mode_ref_lf_delta_update = 0;
+
+
+ // Set various flags etc to special state if it is a key frame
+ if (cm->frame_type == KEY_FRAME) {
+ int i;
+
+ // Reset the loop filter deltas and segmentation map
+ setup_features(cpi);
+
+ // If segmentation is enabled force a map update for key frames
+ if (xd->segmentation_enabled) {
+ xd->update_mb_segmentation_map = 1;
+ xd->update_mb_segmentation_data = 1;
+ }
+
+ // The alternate reference frame cannot be active for a key frame
+ cpi->source_alt_ref_active = 0;
+
+ // Reset the RD threshold multipliers to default of * 1 (128)
+ for (i = 0; i < MAX_MODES; i++)
+ cpi->rd_thresh_mult[i] = 128;
+
+ cm->error_resilient_mode = (cpi->oxcf.error_resilient_mode != 0);
+ cm->frame_parallel_decoding_mode =
+ (cpi->oxcf.frame_parallel_decoding_mode != 0);
+ if (cm->error_resilient_mode) {
+ cm->frame_parallel_decoding_mode = 1;
+ cm->reset_frame_context = 0;
+ cm->refresh_frame_context = 0;
+ }
+ }
+
+ // Configure experimental use of segmentation for enhanced coding of
+ // static regions if indicated.
+ // Only allowed for now in second pass of two pass (as requires lagged coding)
+ // and if the relevant speed feature flag is set.
+ if ((cpi->pass == 2) && (cpi->sf.static_segmentation)) {
+ configure_static_seg_features(cpi);
+ }
+
+ // Decide how big to make the frame
+ vp9_pick_frame_size(cpi);
+
+ vp9_clear_system_state();
+
+ // Set an active best quality and if necessary active worst quality
+ q = cpi->active_worst_quality;
+
+ if (cm->frame_type == KEY_FRAME) {
+#if !CONFIG_MULTIPLE_ARF
+ // Special case for key frames forced because we have reached
+ // the maximum key frame interval. Here force the Q to a range
+ // based on the ambient Q to reduce the risk of popping
+ if (cpi->this_key_frame_forced) {
+ int delta_qindex;
+ int qindex = cpi->last_boosted_qindex;
+ double last_boosted_q = vp9_convert_qindex_to_q(qindex);
+
+ delta_qindex = compute_qdelta(cpi, last_boosted_q,
+ (last_boosted_q * 0.75));
+
+ cpi->active_best_quality = MAX(qindex + delta_qindex, cpi->best_quality);
+ } else {
+ int high = 5000;
+ int low = 400;
+ double q_adj_factor = 1.0;
+ double q_val;
+
+ // Baseline value derived from cpi->active_worst_quality and kf boost
+ if (cpi->kf_boost > high) {
+ cpi->active_best_quality = kf_low_motion_minq[q];
+ } else if (cpi->kf_boost < low) {
+ cpi->active_best_quality = kf_high_motion_minq[q];
+ } else {
+ const int gap = high - low;
+ const int offset = high - cpi->kf_boost;
+ const int qdiff = kf_high_motion_minq[q] - kf_low_motion_minq[q];
+ const int adjustment = ((offset * qdiff) + (gap >> 1)) / gap;
+
+ cpi->active_best_quality = kf_low_motion_minq[q] + adjustment;
+ }
+
+
+ // Allow somewhat lower kf minq with small image formats.
+ if ((cm->width * cm->height) <= (352 * 288)) {
+ q_adj_factor -= 0.25;
+ }
+
+ // Make a further adjustment based on the kf zero motion measure.
+ q_adj_factor += 0.05 - (0.001 * (double)cpi->kf_zeromotion_pct);
+
+ // Convert the adjustment factor to a qindex delta on active_best_quality.
+ q_val = vp9_convert_qindex_to_q(cpi->active_best_quality);
+ cpi->active_best_quality +=
+ compute_qdelta(cpi, q_val, (q_val * q_adj_factor));
+ }
+#else
+ double current_q;
+
+ // Force the KF quantizer to be 30% of the active_worst_quality.
+ current_q = vp9_convert_qindex_to_q(cpi->active_worst_quality);
+ cpi->active_best_quality = cpi->active_worst_quality
+ + compute_qdelta(cpi, current_q, current_q * 0.3);
+#endif
+ } else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame) {
+ int high = 2000;
+ int low = 400;
+
+ // Use the lower of cpi->active_worst_quality and recent
+ // average Q as basis for GF/ARF Q limit unless last frame was
+ // a key frame.
+ if (cpi->frames_since_key > 1 &&
+ cpi->avg_frame_qindex < cpi->active_worst_quality) {
+ q = cpi->avg_frame_qindex;
+ }
+
+ // For constrained quality dont allow Q less than the cq level
+ if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY &&
+ q < cpi->cq_target_quality) {
+ q = cpi->cq_target_quality;
+ }
+
+ if (cpi->gfu_boost > high) {
+ cpi->active_best_quality = gf_low_motion_minq[q];
+ } else if (cpi->gfu_boost < low) {
+ cpi->active_best_quality = gf_high_motion_minq[q];
+ } else {
+ const int gap = high - low;
+ const int offset = high - cpi->gfu_boost;
+ const int qdiff = gf_high_motion_minq[q] - gf_low_motion_minq[q];
+ const int adjustment = ((offset * qdiff) + (gap >> 1)) / gap;
+
+ cpi->active_best_quality = gf_low_motion_minq[q] + adjustment;
+ }
+
+ // Constrained quality use slightly lower active best.
+ if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY)
+ cpi->active_best_quality = cpi->active_best_quality * 15 / 16;
+ } else {
+#ifdef ONE_SHOT_Q_ESTIMATE
+#ifdef STRICT_ONE_SHOT_Q
+ cpi->active_best_quality = q;
+#else
+ cpi->active_best_quality = inter_minq[q];
+#endif
+#else
+ cpi->active_best_quality = inter_minq[q];
+#endif
+
+ // For the constant/constrained quality mode we don't want
+ // q to fall below the cq level.
+ if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
+ (cpi->active_best_quality < cpi->cq_target_quality)) {
+ // If we are strongly undershooting the target rate in the last
+ // frames then use the user passed in cq value not the auto
+ // cq value.
+ if (cpi->rolling_actual_bits < cpi->min_frame_bandwidth)
+ cpi->active_best_quality = cpi->oxcf.cq_level;
+ else
+ cpi->active_best_quality = cpi->cq_target_quality;
+ }
+ }
+
+ // Clip the active best and worst quality values to limits
+ if (cpi->active_worst_quality > cpi->worst_quality)
+ cpi->active_worst_quality = cpi->worst_quality;
+
+ if (cpi->active_best_quality < cpi->best_quality)
+ cpi->active_best_quality = cpi->best_quality;
+
+ if (cpi->active_best_quality > cpi->worst_quality)
+ cpi->active_best_quality = cpi->worst_quality;
+
+ if (cpi->active_worst_quality < cpi->active_best_quality)
+ cpi->active_worst_quality = cpi->active_best_quality;
+
+ // Special case code to try and match quality with forced key frames
+ if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) {
+ q = cpi->last_boosted_qindex;
+ } else {
+ // Determine initial Q to try
+ q = vp9_regulate_q(cpi, cpi->this_frame_target);
+ }
+
+ vp9_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
+ &frame_over_shoot_limit);
+
+#if CONFIG_MULTIPLE_ARF
+ // Force the quantizer determined by the coding order pattern.
+ if (cpi->multi_arf_enabled && (cm->frame_type != KEY_FRAME)) {
+ double new_q;
+ double current_q = vp9_convert_qindex_to_q(cpi->active_worst_quality);
+ int level = cpi->this_frame_weight;
+ assert(level >= 0);
+
+ // Set quantizer steps at 10% increments.
+ new_q = current_q * (1.0 - (0.2 * (cpi->max_arf_level - level)));
+ q = cpi->active_worst_quality + compute_qdelta(cpi, current_q, new_q);
+
+ bottom_index = q;
+ top_index = q;
+ q_low = q;
+ q_high = q;
+
+ printf("frame:%d q:%d\n", cm->current_video_frame, q);
+ } else {
+#endif
+ // Limit Q range for the adaptive loop.
+ bottom_index = cpi->active_best_quality;
+ top_index = cpi->active_worst_quality;
+ q_low = cpi->active_best_quality;
+ q_high = cpi->active_worst_quality;
+#if CONFIG_MULTIPLE_ARF
+ }
+#endif
+ loop_count = 0;
+ vpx_memset(cpi->rd_tx_select_threshes, 0, sizeof(cpi->rd_tx_select_threshes));
+
+ if (cm->frame_type != KEY_FRAME) {
+ /* TODO: Decide this more intelligently */
+ if (sf->search_best_filter) {
+ cm->mcomp_filter_type = mcomp_filters_to_search[0];
+ mcomp_filter_index = 0;
+ } else {
+ cm->mcomp_filter_type = DEFAULT_INTERP_FILTER;
+ }
+ /* TODO: Decide this more intelligently */
+ xd->allow_high_precision_mv = q < HIGH_PRECISION_MV_QTHRESH;
+ set_mvcost(&cpi->mb);
+ }
+
+#if CONFIG_POSTPROC
+
+ if (cpi->oxcf.noise_sensitivity > 0) {
+ int l = 0;
+
+ switch (cpi->oxcf.noise_sensitivity) {
+ case 1:
+ l = 20;
+ break;
+ case 2:
+ l = 40;
+ break;
+ case 3:
+ l = 60;
+ break;
+ case 4:
+ case 5:
+ l = 100;
+ break;
+ case 6:
+ l = 150;
+ break;
+ }
+
+ vp9_denoise(cpi->Source, cpi->Source, l);
+ }
+
+#endif
+
+#ifdef OUTPUT_YUV_SRC
+ vp9_write_yuv_frame(cpi->Source);
+#endif
+
+#if RESET_FOREACH_FILTER
+ if (sf->search_best_filter) {
+ q_low0 = q_low;
+ q_high0 = q_high;
+ Q0 = Q;
+ rate_correction_factor0 = cpi->rate_correction_factor;
+ gf_rate_correction_factor0 = cpi->gf_rate_correction_factor;
+ active_best_quality0 = cpi->active_best_quality;
+ active_worst_quality0 = cpi->active_worst_quality;
+ }
+#endif
+ do {
+ vp9_clear_system_state(); // __asm emms;
+
+ vp9_set_quantizer(cpi, q);
+
+ if (loop_count == 0) {
+
+ // Set up entropy depending on frame type.
+ if (cm->frame_type == KEY_FRAME) {
+ /* Choose which entropy context to use. When using a forward reference
+ * frame, it immediately follows the keyframe, and thus benefits from
+ * using the same entropy context established by the keyframe.
+ * Otherwise, use the default context 0.
+ */
+ cm->frame_context_idx = cpi->oxcf.play_alternate;
+ vp9_setup_key_frame(cpi);
+ } else {
+ /* Choose which entropy context to use. Currently there are only two
+ * contexts used, one for normal frames and one for alt ref frames.
+ */
+ cpi->common.frame_context_idx = cpi->refresh_alt_ref_frame;
+ vp9_setup_inter_frame(cpi);
+ }
+ }
+
+ // transform / motion compensation build reconstruction frame
+
+ vp9_encode_frame(cpi);
+
+ // Update the skip mb flag probabilities based on the distribution
+ // seen in the last encoder iteration.
+ // update_base_skip_probs(cpi);
+
+ vp9_clear_system_state(); // __asm emms;
+
+ // Dummy pack of the bitstream using up to date stats to get an
+ // accurate estimate of output frame size to determine if we need
+ // to recode.
+ vp9_save_coding_context(cpi);
+ cpi->dummy_packing = 1;
+ vp9_pack_bitstream(cpi, dest, size);
+ cpi->projected_frame_size = (*size) << 3;
+ vp9_restore_coding_context(cpi);
+
+ if (frame_over_shoot_limit == 0)
+ frame_over_shoot_limit = 1;
+ active_worst_qchanged = 0;
+
+ // Special case handling for forced key frames
+ if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) {
+ int last_q = q;
+ int kf_err = vp9_calc_ss_err(cpi->Source,
+ &cm->yv12_fb[cm->new_fb_idx]);
+
+ int high_err_target = cpi->ambient_err;
+ int low_err_target = cpi->ambient_err >> 1;
+
+ // Prevent possible divide by zero error below for perfect KF
+ kf_err += !kf_err;
+
+ // The key frame is not good enough or we can afford
+ // to make it better without undue risk of popping.
+ if ((kf_err > high_err_target &&
+ cpi->projected_frame_size <= frame_over_shoot_limit) ||
+ (kf_err > low_err_target &&
+ cpi->projected_frame_size <= frame_under_shoot_limit)) {
+ // Lower q_high
+ q_high = q > q_low ? q - 1 : q_low;
+
+ // Adjust Q
+ q = (q * high_err_target) / kf_err;
+ q = MIN(q, (q_high + q_low) >> 1);
+ } else if (kf_err < low_err_target &&
+ cpi->projected_frame_size >= frame_under_shoot_limit) {
+ // The key frame is much better than the previous frame
+ // Raise q_low
+ q_low = q < q_high ? q + 1 : q_high;
+
+ // Adjust Q
+ q = (q * low_err_target) / kf_err;
+ q = MIN(q, (q_high + q_low + 1) >> 1);
+ }
+
+ // Clamp Q to upper and lower limits:
+ q = clamp(q, q_low, q_high);
+
+ loop = q != last_q;
+ }
+
+ // Is the projected frame size out of range and are we allowed to attempt to recode.
+ else if (recode_loop_test(cpi,
+ frame_over_shoot_limit, frame_under_shoot_limit,
+ q, top_index, bottom_index)) {
+ int last_q = q;
+ int retries = 0;
+
+ // Frame size out of permitted range:
+ // Update correction factor & compute new Q to try...
+
+ // Frame is too large
+ if (cpi->projected_frame_size > cpi->this_frame_target) {
+ // Raise Qlow as to at least the current value
+ q_low = q < q_high ? q + 1 : q_high;
+
+ if (undershoot_seen || loop_count > 1) {
+ // Update rate_correction_factor unless cpi->active_worst_quality
+ // has changed.
+ if (!active_worst_qchanged)
+ vp9_update_rate_correction_factors(cpi, 1);
+
+ q = (q_high + q_low + 1) / 2;
+ } else {
+ // Update rate_correction_factor unless cpi->active_worst_quality has changed.
+ if (!active_worst_qchanged)
+ vp9_update_rate_correction_factors(cpi, 0);
+
+ q = vp9_regulate_q(cpi, cpi->this_frame_target);
+
+ while (q < q_low && retries < 10) {
+ vp9_update_rate_correction_factors(cpi, 0);
+ q = vp9_regulate_q(cpi, cpi->this_frame_target);
+ retries++;
+ }
+ }
+
+ overshoot_seen = 1;
+ } else {
+ // Frame is too small
+ q_high = q > q_low ? q - 1 : q_low;
+
+ if (overshoot_seen || loop_count > 1) {
+ // Update rate_correction_factor unless cpi->active_worst_quality has changed.
+ if (!active_worst_qchanged)
+ vp9_update_rate_correction_factors(cpi, 1);
+
+ q = (q_high + q_low) / 2;
+ } else {
+ // Update rate_correction_factor unless cpi->active_worst_quality has changed.
+ if (!active_worst_qchanged)
+ vp9_update_rate_correction_factors(cpi, 0);
+
+ q = vp9_regulate_q(cpi, cpi->this_frame_target);
+
+ // Special case reset for qlow for constrained quality.
+ // This should only trigger where there is very substantial
+ // undershoot on a frame and the auto cq level is above
+ // the user passsed in value.
+ if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY && q < q_low) {
+ q_low = q;
+ }
+
+ while (q > q_high && retries < 10) {
+ vp9_update_rate_correction_factors(cpi, 0);
+ q = vp9_regulate_q(cpi, cpi->this_frame_target);
+ retries++;
+ }
+ }
+
+ undershoot_seen = 1;
+ }
+
+ // Clamp Q to upper and lower limits:
+ q = clamp(q, q_low, q_high);
+
+ loop = q != last_q;
+ } else {
+ loop = 0;
+ }
+
+ if (cpi->is_src_frame_alt_ref)
+ loop = 0;
+
+ if (!loop && cm->frame_type != KEY_FRAME && sf->search_best_filter) {
+ if (mcomp_filter_index < mcomp_filters) {
+ int64_t err = vp9_calc_ss_err(cpi->Source,
+ &cm->yv12_fb[cm->new_fb_idx]);
+ int64_t rate = cpi->projected_frame_size << 8;
+ mcomp_filter_cost[mcomp_filter_index] =
+ (RDCOST(cpi->RDMULT, cpi->RDDIV, rate, err));
+ mcomp_filter_index++;
+ if (mcomp_filter_index < mcomp_filters) {
+ cm->mcomp_filter_type = mcomp_filters_to_search[mcomp_filter_index];
+ loop_count = -1;
+ loop = 1;
+ } else {
+ int f;
+ int64_t best_cost = mcomp_filter_cost[0];
+ int mcomp_best_filter = mcomp_filters_to_search[0];
+ for (f = 1; f < mcomp_filters; f++) {
+ if (mcomp_filter_cost[f] < best_cost) {
+ mcomp_best_filter = mcomp_filters_to_search[f];
+ best_cost = mcomp_filter_cost[f];
+ }
+ }
+ if (mcomp_best_filter != mcomp_filters_to_search[mcomp_filters - 1]) {
+ loop_count = -1;
+ loop = 1;
+ cm->mcomp_filter_type = mcomp_best_filter;
+ }
+ /*
+ printf(" best filter = %d, ( ", mcomp_best_filter);
+ for (f=0;f<mcomp_filters; f++) printf("%d ", mcomp_filter_cost[f]);
+ printf(")\n");
+ */
+ }
+#if RESET_FOREACH_FILTER
+ if (loop) {
+ overshoot_seen = 0;
+ undershoot_seen = 0;
+ q_low = q_low0;
+ q_high = q_high0;
+ q = Q0;
+ cpi->rate_correction_factor = rate_correction_factor0;
+ cpi->gf_rate_correction_factor = gf_rate_correction_factor0;
+ cpi->active_best_quality = active_best_quality0;
+ cpi->active_worst_quality = active_worst_quality0;
+ }
+#endif
+ }
+ }
+
+ if (loop) {
+ loop_count++;
+
+#if CONFIG_INTERNAL_STATS
+ cpi->tot_recode_hits++;
+#endif
+ }
+ } while (loop);
+
+ // Special case code to reduce pulsing when key frames are forced at a
+ // fixed interval. Note the reconstruction error if it is the frame before
+ // the force key frame
+ if (cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0)) {
+ cpi->ambient_err = vp9_calc_ss_err(cpi->Source,
+ &cm->yv12_fb[cm->new_fb_idx]);
+ }
+
+ if (cm->frame_type == KEY_FRAME)
+ cpi->refresh_last_frame = 1;
+
+ cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
+
+#if WRITE_RECON_BUFFER
+ if (cm->show_frame)
+ write_cx_frame_to_file(cm->frame_to_show,
+ cm->current_video_frame);
+ else
+ write_cx_frame_to_file(cm->frame_to_show,
+ cm->current_video_frame + 1000);
+#endif
+
+ // Pick the loop filter level for the frame.
+ loopfilter_frame(cpi, cm);
+
+#if WRITE_RECON_BUFFER
+ if (cm->show_frame)
+ write_cx_frame_to_file(cm->frame_to_show,
+ cm->current_video_frame + 2000);
+ else
+ write_cx_frame_to_file(cm->frame_to_show,
+ cm->current_video_frame + 3000);
+#endif
+
+ // build the bitstream
+ cpi->dummy_packing = 0;
+ vp9_pack_bitstream(cpi, dest, size);
+
+ if (xd->update_mb_segmentation_map) {
+ update_reference_segmentation_map(cpi);
+ }
+
+ release_scaled_references(cpi);
+ update_reference_frames(cpi);
+
+ for (t = TX_4X4; t <= TX_32X32; t++)
+ vp9_full_to_model_counts(cpi->common.fc.coef_counts[t],
+ cpi->coef_counts[t]);
+ if (!cpi->common.error_resilient_mode &&
+ !cpi->common.frame_parallel_decoding_mode) {
+ vp9_adapt_coef_probs(&cpi->common);
+ }
+
+ if (cpi->common.frame_type != KEY_FRAME) {
+ vp9_copy(cpi->common.fc.y_mode_counts, cpi->y_mode_count);
+ vp9_copy(cpi->common.fc.uv_mode_counts, cpi->y_uv_mode_count);
+ vp9_copy(cpi->common.fc.partition_counts, cpi->partition_count);
+ vp9_copy(cm->fc.intra_inter_count, cpi->intra_inter_count);
+ vp9_copy(cm->fc.comp_inter_count, cpi->comp_inter_count);
+ vp9_copy(cm->fc.single_ref_count, cpi->single_ref_count);
+ vp9_copy(cm->fc.comp_ref_count, cpi->comp_ref_count);
+ cpi->common.fc.NMVcount = cpi->NMVcount;
+ if (!cpi->common.error_resilient_mode &&
+ !cpi->common.frame_parallel_decoding_mode) {
+ vp9_adapt_mode_probs(&cpi->common);
+ vp9_adapt_mode_context(&cpi->common);
+ vp9_adapt_nmv_probs(&cpi->common, cpi->mb.e_mbd.allow_high_precision_mv);
+ }
+ }
+
+#ifdef ENTROPY_STATS
+ vp9_update_mode_context_stats(cpi);
+#endif
+
+ /* Move storing frame_type out of the above loop since it is also
+ * needed in motion search besides loopfilter */
+ cm->last_frame_type = cm->frame_type;
+
+ // Update rate control heuristics
+ cpi->total_byte_count += (*size);
+ cpi->projected_frame_size = (*size) << 3;
+
+ if (!active_worst_qchanged)
+ vp9_update_rate_correction_factors(cpi, 2);
+
+ cpi->last_q[cm->frame_type] = cm->base_qindex;
+
+ // Keep record of last boosted (KF/KF/ARF) Q value.
+ // If the current frame is coded at a lower Q then we also update it.
+ // If all mbs in this group are skipped only update if the Q value is
+ // better than that already stored.
+ // This is used to help set quality in forced key frames to reduce popping
+ if ((cm->base_qindex < cpi->last_boosted_qindex) ||
+ ((cpi->static_mb_pct < 100) &&
+ ((cm->frame_type == KEY_FRAME) ||
+ cpi->refresh_alt_ref_frame ||
+ (cpi->refresh_golden_frame && !cpi->is_src_frame_alt_ref)))) {
+ cpi->last_boosted_qindex = cm->base_qindex;
+ }
+
+ if (cm->frame_type == KEY_FRAME) {
+ vp9_adjust_key_frame_context(cpi);
+ }
+
+ // Keep a record of ambient average Q.
+ if (cm->frame_type != KEY_FRAME)
+ cpi->avg_frame_qindex = (2 + 3 * cpi->avg_frame_qindex + cm->base_qindex) >> 2;
+
+ // Keep a record from which we can calculate the average Q excluding GF updates and key frames
+ if (cm->frame_type != KEY_FRAME &&
+ !cpi->refresh_golden_frame &&
+ !cpi->refresh_alt_ref_frame) {
+ cpi->ni_frames++;
+ cpi->tot_q += vp9_convert_qindex_to_q(q);
+ cpi->avg_q = cpi->tot_q / (double)cpi->ni_frames;
+
+ // Calculate the average Q for normal inter frames (not key or GFU frames).
+ cpi->ni_tot_qi += q;
+ cpi->ni_av_qi = cpi->ni_tot_qi / cpi->ni_frames;
+ }
+
+ // Update the buffer level variable.
+ // Non-viewable frames are a special case and are treated as pure overhead.
+ if (!cm->show_frame)
+ cpi->bits_off_target -= cpi->projected_frame_size;
+ else
+ cpi->bits_off_target += cpi->av_per_frame_bandwidth - cpi->projected_frame_size;
+
+ // Clip the buffer level at the maximum buffer size
+ if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size)
+ cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
+
+ // Rolling monitors of whether we are over or underspending used to help
+ // regulate min and Max Q in two pass.
+ if (cm->frame_type != KEY_FRAME) {
+ cpi->rolling_target_bits =
+ ((cpi->rolling_target_bits * 3) + cpi->this_frame_target + 2) / 4;
+ cpi->rolling_actual_bits =
+ ((cpi->rolling_actual_bits * 3) + cpi->projected_frame_size + 2) / 4;
+ cpi->long_rolling_target_bits =
+ ((cpi->long_rolling_target_bits * 31) + cpi->this_frame_target + 16) / 32;
+ cpi->long_rolling_actual_bits =
+ ((cpi->long_rolling_actual_bits * 31) +
+ cpi->projected_frame_size + 16) / 32;
+ }
+
+ // Actual bits spent
+ cpi->total_actual_bits += cpi->projected_frame_size;
+
+ // Debug stats
+ cpi->total_target_vs_actual += (cpi->this_frame_target - cpi->projected_frame_size);
+
+ cpi->buffer_level = cpi->bits_off_target;
+
+ // Update bits left to the kf and gf groups to account for overshoot or undershoot on these frames
+ if (cm->frame_type == KEY_FRAME) {
+ cpi->twopass.kf_group_bits += cpi->this_frame_target - cpi->projected_frame_size;
+
+ cpi->twopass.kf_group_bits = MAX(cpi->twopass.kf_group_bits, 0);
+ } else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame) {
+ cpi->twopass.gf_group_bits += cpi->this_frame_target - cpi->projected_frame_size;
+
+ cpi->twopass.gf_group_bits = MAX(cpi->twopass.gf_group_bits, 0);
+ }
+
+ // Update the skip mb flag probabilities based on the distribution seen
+ // in this frame.
+ // update_base_skip_probs(cpi);
+
+#if 0 && CONFIG_INTERNAL_STATS
+ {
+ FILE *f = fopen("tmp.stt", "a");
+ int recon_err;
+
+ vp9_clear_system_state(); // __asm emms;
+
+ recon_err = vp9_calc_ss_err(cpi->Source,
+ &cm->yv12_fb[cm->new_fb_idx]);
+
+ if (cpi->twopass.total_left_stats.coded_error != 0.0)
+ fprintf(f, "%10d %10d %10d %10d %10d %10d %10d %10d"
+ "%7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f"
+ "%6d %6d %5d %5d %5d %8.2f %10d %10.3f"
+ "%10.3f %8d %10d %10d %10d\n",
+ cpi->common.current_video_frame, cpi->this_frame_target,
+ cpi->projected_frame_size, 0, //loop_size_estimate,
+ (cpi->projected_frame_size - cpi->this_frame_target),
+ (int)cpi->total_target_vs_actual,
+ (int)(cpi->oxcf.starting_buffer_level - cpi->bits_off_target),
+ (int)cpi->total_actual_bits,
+ vp9_convert_qindex_to_q(cm->base_qindex),
+ (double)vp9_dc_quant(cm->base_qindex, 0) / 4.0,
+ vp9_convert_qindex_to_q(cpi->active_best_quality),
+ vp9_convert_qindex_to_q(cpi->active_worst_quality),
+ cpi->avg_q,
+ vp9_convert_qindex_to_q(cpi->ni_av_qi),
+ vp9_convert_qindex_to_q(cpi->cq_target_quality),
+ cpi->refresh_last_frame,
+ cpi->refresh_golden_frame, cpi->refresh_alt_ref_frame,
+ cm->frame_type, cpi->gfu_boost,
+ cpi->twopass.est_max_qcorrection_factor,
+ (int)cpi->twopass.bits_left,
+ cpi->twopass.total_left_stats.coded_error,
+ (double)cpi->twopass.bits_left /
+ cpi->twopass.total_left_stats.coded_error,
+ cpi->tot_recode_hits, recon_err, cpi->kf_boost,
+ cpi->kf_zeromotion_pct);
+ else
+ fprintf(f, "%10d %10d %10d %10d %10d %10d %10d %10d"
+ "%7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f"
+ "%5d %5d %5d %8d %8d %8.2f %10d %10.3f"
+ "%8d %10d %10d %10d\n",
+ cpi->common.current_video_frame,
+ cpi->this_frame_target, cpi->projected_frame_size,
+ 0, //loop_size_estimate,
+ (cpi->projected_frame_size - cpi->this_frame_target),
+ (int)cpi->total_target_vs_actual,
+ (int)(cpi->oxcf.starting_buffer_level - cpi->bits_off_target),
+ (int)cpi->total_actual_bits,
+ vp9_convert_qindex_to_q(cm->base_qindex),
+ (double)vp9_dc_quant(cm->base_qindex, 0) / 4.0,
+ vp9_convert_qindex_to_q(cpi->active_best_quality),
+ vp9_convert_qindex_to_q(cpi->active_worst_quality),
+ cpi->avg_q,
+ vp9_convert_qindex_to_q(cpi->ni_av_qi),
+ vp9_convert_qindex_to_q(cpi->cq_target_quality),
+ cpi->refresh_last_frame,
+ cpi->refresh_golden_frame, cpi->refresh_alt_ref_frame,
+ cm->frame_type, cpi->gfu_boost,
+ cpi->twopass.est_max_qcorrection_factor,
+ (int)cpi->twopass.bits_left,
+ cpi->twopass.total_left_stats.coded_error,
+ cpi->tot_recode_hits, recon_err, cpi->kf_boost,
+ cpi->kf_zeromotion_pct);
+
+ fclose(f);
+
+ if (0) {
+ FILE *fmodes = fopen("Modes.stt", "a");
+ int i;
+
+ fprintf(fmodes, "%6d:%1d:%1d:%1d ",
+ cpi->common.current_video_frame,
+ cm->frame_type, cpi->refresh_golden_frame,
+ cpi->refresh_alt_ref_frame);
+
+ for (i = 0; i < MAX_MODES; i++)
+ fprintf(fmodes, "%5d ", cpi->mode_chosen_counts[i]);
+
+ fprintf(fmodes, "\n");
+
+ fclose(fmodes);
+ }
+ }
+
+#endif
+
+#if 0
+ // Debug stats for segment feature experiments.
+ print_seg_map(cpi);
+#endif
+
+ // If this was a kf or Gf note the Q
+ if ((cm->frame_type == KEY_FRAME)
+ || cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
+ cm->last_kf_gf_q = cm->base_qindex;
+
+ if (cpi->refresh_golden_frame == 1)
+ cm->frame_flags = cm->frame_flags | FRAMEFLAGS_GOLDEN;
+ else
+ cm->frame_flags = cm->frame_flags&~FRAMEFLAGS_GOLDEN;
+
+ if (cpi->refresh_alt_ref_frame == 1)
+ cm->frame_flags = cm->frame_flags | FRAMEFLAGS_ALTREF;
+ else
+ cm->frame_flags = cm->frame_flags&~FRAMEFLAGS_ALTREF;
+
+
+ if (cpi->refresh_last_frame & cpi->refresh_golden_frame)
+ cpi->gold_is_last = 1;
+ else if (cpi->refresh_last_frame ^ cpi->refresh_golden_frame)
+ cpi->gold_is_last = 0;
+
+ if (cpi->refresh_last_frame & cpi->refresh_alt_ref_frame)
+ cpi->alt_is_last = 1;
+ else if (cpi->refresh_last_frame ^ cpi->refresh_alt_ref_frame)
+ cpi->alt_is_last = 0;
+
+ if (cpi->refresh_alt_ref_frame & cpi->refresh_golden_frame)
+ cpi->gold_is_alt = 1;
+ else if (cpi->refresh_alt_ref_frame ^ cpi->refresh_golden_frame)
+ cpi->gold_is_alt = 0;
+
+ cpi->ref_frame_flags = VP9_ALT_FLAG | VP9_GOLD_FLAG | VP9_LAST_FLAG;
+
+ if (cpi->gold_is_last)
+ cpi->ref_frame_flags &= ~VP9_GOLD_FLAG;
+
+ if (cpi->alt_is_last)
+ cpi->ref_frame_flags &= ~VP9_ALT_FLAG;
+
+ if (cpi->gold_is_alt)
+ cpi->ref_frame_flags &= ~VP9_ALT_FLAG;
+
+ if (cpi->oxcf.play_alternate && cpi->refresh_alt_ref_frame
+ && (cm->frame_type != KEY_FRAME))
+ // Update the alternate reference frame stats as appropriate.
+ update_alt_ref_frame_stats(cpi);
+ else
+ // Update the Golden frame stats as appropriate.
+ update_golden_frame_stats(cpi);
+
+ if (cm->frame_type == KEY_FRAME) {
+ // Tell the caller that the frame was coded as a key frame
+ *frame_flags = cm->frame_flags | FRAMEFLAGS_KEY;
+
+#if CONFIG_MULTIPLE_ARF
+ // Reset the sequence number.
+ if (cpi->multi_arf_enabled) {
+ cpi->sequence_number = 0;
+ cpi->frame_coding_order_period = cpi->new_frame_coding_order_period;
+ cpi->new_frame_coding_order_period = -1;
+ }
+#endif
+
+ // As this frame is a key frame the next defaults to an inter frame.
+ cm->frame_type = INTER_FRAME;
+ } else {
+ *frame_flags = cm->frame_flags&~FRAMEFLAGS_KEY;
+
+#if CONFIG_MULTIPLE_ARF
+ /* Increment position in the coded frame sequence. */
+ if (cpi->multi_arf_enabled) {
+ ++cpi->sequence_number;
+ if (cpi->sequence_number >= cpi->frame_coding_order_period) {
+ cpi->sequence_number = 0;
+ cpi->frame_coding_order_period = cpi->new_frame_coding_order_period;
+ cpi->new_frame_coding_order_period = -1;
+ }
+ cpi->this_frame_weight = cpi->arf_weight[cpi->sequence_number];
+ assert(cpi->this_frame_weight >= 0);
+ }
+#endif
+ }
+
+ // Clear the one shot update flags for segmentation map and mode/ref loop filter deltas.
+ xd->update_mb_segmentation_map = 0;
+ xd->update_mb_segmentation_data = 0;
+ xd->mode_ref_lf_delta_update = 0;
+
+ // keep track of the last coded dimensions
+ cm->last_width = cm->width;
+ cm->last_height = cm->height;
+
+ // Don't increment frame counters if this was an altref buffer
+ // update not a real frame
+ cm->last_show_frame = cm->show_frame;
+ if (cm->show_frame) {
+ ++cm->current_video_frame;
+ ++cpi->frames_since_key;
+ }
+
+ // reset to normal state now that we are done.
+
+#if 0
+ {
+ char filename[512];
+ FILE *recon_file;
+ sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame);
+ recon_file = fopen(filename, "wb");
+ fwrite(cm->yv12_fb[cm->ref_frame_map[cpi->lst_fb_idx]].buffer_alloc,
+ cm->yv12_fb[cm->ref_frame_map[cpi->lst_fb_idx]].frame_size,
+ 1, recon_file);
+ fclose(recon_file);
+ }
+#endif
+#ifdef OUTPUT_YUV_REC
+ vp9_write_yuv_rec_frame(cm);
+#endif
+
+ if (cm->show_frame) {
+ vpx_memcpy(cm->prev_mip, cm->mip,
+ cm->mode_info_stride * (cm->mi_rows + 64 / MI_SIZE) *
+ sizeof(MODE_INFO));
+ } else {
+ vpx_memset(cm->prev_mip, 0,
+ cm->mode_info_stride * (cm->mi_rows + 64 / MI_SIZE) *
+ sizeof(MODE_INFO));
+ }
+ // restore prev_mi
+ cm->prev_mi = cm->prev_mip + cm->mode_info_stride + 1;
+}
+
+static void Pass2Encode(VP9_COMP *cpi, unsigned long *size,
+ unsigned char *dest, unsigned int *frame_flags) {
+
+ if (!cpi->refresh_alt_ref_frame)
+ vp9_second_pass(cpi);
+
+ encode_frame_to_data_rate(cpi, size, dest, frame_flags);
+
+#ifdef DISABLE_RC_LONG_TERM_MEM
+ cpi->twopass.bits_left -= cpi->this_frame_target;
+#else
+ cpi->twopass.bits_left -= 8 * *size;
+#endif
+
+ if (!cpi->refresh_alt_ref_frame) {
+ double lower_bounds_min_rate = FRAME_OVERHEAD_BITS * cpi->oxcf.frame_rate;
+ double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth
+ * cpi->oxcf.two_pass_vbrmin_section / 100);
+
+ if (two_pass_min_rate < lower_bounds_min_rate)
+ two_pass_min_rate = lower_bounds_min_rate;
+
+ cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->oxcf.frame_rate);
+ }
+}
+
+
+int vp9_receive_raw_frame(VP9_PTR ptr, unsigned int frame_flags,
+ YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
+ int64_t end_time) {
+ VP9_COMP *cpi = (VP9_COMP *) ptr;
+ VP9_COMMON *cm = &cpi->common;
+ struct vpx_usec_timer timer;
+ int res = 0;
+
+ if (!cpi->initial_width) {
+ // TODO(jkoleszar): Support 1/4 subsampling?
+ cm->subsampling_x = sd->uv_width < sd->y_width;
+ cm->subsampling_y = sd->uv_height < sd->y_height;
+ alloc_raw_frame_buffers(cpi);
+
+ cpi->initial_width = cm->width;
+ cpi->initial_height = cm->height;
+ }
+ vpx_usec_timer_start(&timer);
+ if (vp9_lookahead_push(cpi->lookahead, sd, time_stamp, end_time, frame_flags,
+ cpi->active_map_enabled ? cpi->active_map : NULL))
+ res = -1;
+ cm->clr_type = sd->clrtype;
+ vpx_usec_timer_mark(&timer);
+ cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
+
+ return res;
+}
+
+
+static int frame_is_reference(const VP9_COMP *cpi) {
+ const VP9_COMMON *cm = &cpi->common;
+ const MACROBLOCKD *mb = &cpi->mb.e_mbd;
+
+ return cm->frame_type == KEY_FRAME ||
+ cpi->refresh_last_frame ||
+ cpi->refresh_golden_frame ||
+ cpi->refresh_alt_ref_frame ||
+ cm->refresh_frame_context ||
+ mb->mode_ref_lf_delta_update ||
+ mb->update_mb_segmentation_map ||
+ mb->update_mb_segmentation_data;
+}
+
+#if CONFIG_MULTIPLE_ARF
+int is_next_frame_arf(VP9_COMP *cpi) {
+ // Negative entry in frame_coding_order indicates an ARF at this position.
+ return cpi->frame_coding_order[cpi->sequence_number + 1] < 0 ? 1 : 0;
+}
+#endif
+
+int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags,
+ unsigned long *size, unsigned char *dest,
+ int64_t *time_stamp, int64_t *time_end, int flush) {
+ VP9_COMP *cpi = (VP9_COMP *) ptr;
+ VP9_COMMON *cm = &cpi->common;
+ struct vpx_usec_timer cmptimer;
+ YV12_BUFFER_CONFIG *force_src_buffer = NULL;
+ int i;
+ // FILE *fp_out = fopen("enc_frame_type.txt", "a");
+
+ if (!cpi)
+ return -1;
+
+ vpx_usec_timer_start(&cmptimer);
+
+ cpi->source = NULL;
+
+ cpi->mb.e_mbd.allow_high_precision_mv = ALTREF_HIGH_PRECISION_MV;
+ set_mvcost(&cpi->mb);
+
+ // Should we code an alternate reference frame.
+ if (cpi->oxcf.play_alternate && cpi->source_alt_ref_pending) {
+ int frames_to_arf;
+
+#if CONFIG_MULTIPLE_ARF
+ assert(!cpi->multi_arf_enabled ||
+ cpi->frame_coding_order[cpi->sequence_number] < 0);
+
+ if (cpi->multi_arf_enabled && (cpi->pass == 2))
+ frames_to_arf = (-cpi->frame_coding_order[cpi->sequence_number])
+ - cpi->next_frame_in_order;
+ else
+#endif
+ frames_to_arf = cpi->frames_till_gf_update_due;
+
+ assert(frames_to_arf < cpi->twopass.frames_to_key);
+
+ if ((cpi->source = vp9_lookahead_peek(cpi->lookahead, frames_to_arf))) {
+#if CONFIG_MULTIPLE_ARF
+ cpi->alt_ref_source[cpi->arf_buffered] = cpi->source;
+#else
+ cpi->alt_ref_source = cpi->source;
+#endif
+
+ if (cpi->oxcf.arnr_max_frames > 0) {
+ // Produce the filtered ARF frame.
+ // TODO(agrange) merge these two functions.
+ configure_arnr_filter(cpi, cm->current_video_frame + frames_to_arf,
+ cpi->gfu_boost);
+ vp9_temporal_filter_prepare(cpi, frames_to_arf);
+ force_src_buffer = &cpi->alt_ref_buffer;
+ }
+
+ cm->show_frame = 0;
+ cm->intra_only = 0;
+ cpi->refresh_alt_ref_frame = 1;
+ cpi->refresh_golden_frame = 0;
+ cpi->refresh_last_frame = 0;
+ cpi->is_src_frame_alt_ref = 0;
+
+ // TODO(agrange) This needs to vary depending on where the next ARF is.
+ cm->frames_till_alt_ref_frame = frames_to_arf;
+
+#if CONFIG_MULTIPLE_ARF
+ if (!cpi->multi_arf_enabled)
+#endif
+ cpi->source_alt_ref_pending = 0; // Clear Pending altf Ref flag.
+ }
+ }
+
+ if (!cpi->source) {
+#if CONFIG_MULTIPLE_ARF
+ int i;
+#endif
+ if ((cpi->source = vp9_lookahead_pop(cpi->lookahead, flush))) {
+ cm->show_frame = 1;
+
+#if CONFIG_MULTIPLE_ARF
+ // Is this frame the ARF overlay.
+ cpi->is_src_frame_alt_ref = 0;
+ for (i = 0; i < cpi->arf_buffered; ++i) {
+ if (cpi->source == cpi->alt_ref_source[i]) {
+ cpi->is_src_frame_alt_ref = 1;
+ cpi->refresh_golden_frame = 1;
+ break;
+ }
+ }
+#else
+ cpi->is_src_frame_alt_ref = cpi->alt_ref_source
+ && (cpi->source == cpi->alt_ref_source);
+#endif
+ if (cpi->is_src_frame_alt_ref) {
+ // Current frame is an ARF overlay frame.
+#if CONFIG_MULTIPLE_ARF
+ cpi->alt_ref_source[i] = NULL;
+#else
+ cpi->alt_ref_source = NULL;
+#endif
+ // Don't refresh the last buffer for an ARF overlay frame. It will
+ // become the GF so preserve last as an alternative prediction option.
+ cpi->refresh_last_frame = 0;
+ }
+#if CONFIG_MULTIPLE_ARF
+ ++cpi->next_frame_in_order;
+#endif
+ }
+ }
+
+ if (cpi->source) {
+ cpi->un_scaled_source = cpi->Source = force_src_buffer ? force_src_buffer
+ : &cpi->source->img;
+ *time_stamp = cpi->source->ts_start;
+ *time_end = cpi->source->ts_end;
+ *frame_flags = cpi->source->flags;
+
+ // fprintf(fp_out, " Frame:%d", cm->current_video_frame);
+#if CONFIG_MULTIPLE_ARF
+ if (cpi->multi_arf_enabled) {
+ // fprintf(fp_out, " seq_no:%d this_frame_weight:%d",
+ // cpi->sequence_number, cpi->this_frame_weight);
+ } else {
+ // fprintf(fp_out, "\n");
+ }
+#else
+ // fprintf(fp_out, "\n");
+#endif
+
+#if CONFIG_MULTIPLE_ARF
+ if ((cm->frame_type != KEY_FRAME) && (cpi->pass == 2))
+ cpi->source_alt_ref_pending = is_next_frame_arf(cpi);
+#endif
+ } else {
+ *size = 0;
+ if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done) {
+ vp9_end_first_pass(cpi); /* get last stats packet */
+ cpi->twopass.first_pass_done = 1;
+ }
+
+ // fclose(fp_out);
+ return -1;
+ }
+
+ if (cpi->source->ts_start < cpi->first_time_stamp_ever) {
+ cpi->first_time_stamp_ever = cpi->source->ts_start;
+ cpi->last_end_time_stamp_seen = cpi->source->ts_start;
+ }
+
+ // adjust frame rates based on timestamps given
+ if (!cpi->refresh_alt_ref_frame) {
+ int64_t this_duration;
+ int step = 0;
+
+ if (cpi->source->ts_start == cpi->first_time_stamp_ever) {
+ this_duration = cpi->source->ts_end - cpi->source->ts_start;
+ step = 1;
+ } else {
+ int64_t last_duration = cpi->last_end_time_stamp_seen
+ - cpi->last_time_stamp_seen;
+
+ this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen;
+
+ // do a step update if the duration changes by 10%
+ if (last_duration)
+ step = (int)((this_duration - last_duration) * 10 / last_duration);
+ }
+
+ if (this_duration) {
+ if (step) {
+ vp9_new_frame_rate(cpi, 10000000.0 / this_duration);
+ } else {
+ // Average this frame's rate into the last second's average
+ // frame rate. If we haven't seen 1 second yet, then average
+ // over the whole interval seen.
+ const double interval = MIN((double)(cpi->source->ts_end
+ - cpi->first_time_stamp_ever), 10000000.0);
+ double avg_duration = 10000000.0 / cpi->oxcf.frame_rate;
+ avg_duration *= (interval - avg_duration + this_duration);
+ avg_duration /= interval;
+
+ vp9_new_frame_rate(cpi, 10000000.0 / avg_duration);
+ }
+ }
+
+ cpi->last_time_stamp_seen = cpi->source->ts_start;
+ cpi->last_end_time_stamp_seen = cpi->source->ts_end;
+ }
+
+ // start with a 0 size frame
+ *size = 0;
+
+ // Clear down mmx registers
+ vp9_clear_system_state(); // __asm emms;
+
+ /* find a free buffer for the new frame, releasing the reference previously
+ * held.
+ */
+ cm->fb_idx_ref_cnt[cm->new_fb_idx]--;
+ cm->new_fb_idx = get_free_fb(cm);
+
+#if CONFIG_MULTIPLE_ARF
+ /* Set up the correct ARF frame. */
+ if (cpi->refresh_alt_ref_frame) {
+ ++cpi->arf_buffered;
+ }
+ if (cpi->multi_arf_enabled && (cm->frame_type != KEY_FRAME) &&
+ (cpi->pass == 2)) {
+ cpi->alt_fb_idx = cpi->arf_buffer_idx[cpi->sequence_number];
+ }
+#endif
+
+ /* Get the mapping of L/G/A to the reference buffer pool */
+ cm->active_ref_idx[0] = cm->ref_frame_map[cpi->lst_fb_idx];
+ cm->active_ref_idx[1] = cm->ref_frame_map[cpi->gld_fb_idx];
+ cm->active_ref_idx[2] = cm->ref_frame_map[cpi->alt_fb_idx];
+
+#if 0 // CONFIG_MULTIPLE_ARF
+ if (cpi->multi_arf_enabled) {
+ fprintf(fp_out, " idx(%d, %d, %d, %d) active(%d, %d, %d)",
+ cpi->lst_fb_idx, cpi->gld_fb_idx, cpi->alt_fb_idx, cm->new_fb_idx,
+ cm->active_ref_idx[0], cm->active_ref_idx[1], cm->active_ref_idx[2]);
+ if (cpi->refresh_alt_ref_frame)
+ fprintf(fp_out, " type:ARF");
+ if (cpi->is_src_frame_alt_ref)
+ fprintf(fp_out, " type:OVERLAY[%d]", cpi->alt_fb_idx);
+ fprintf(fp_out, "\n");
+ }
+#endif
+
+ cm->frame_type = INTER_FRAME;
+ cm->frame_flags = *frame_flags;
+
+ // Reset the frame pointers to the current frame size
+ vp9_realloc_frame_buffer(&cm->yv12_fb[cm->new_fb_idx],
+ cm->width, cm->height,
+ cm->subsampling_x, cm->subsampling_y,
+ VP9BORDERINPIXELS);
+
+ // Calculate scaling factors for each of the 3 available references
+ for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i)
+ vp9_setup_scale_factors(cm, i);
+
+ vp9_setup_interp_filters(&cpi->mb.e_mbd, DEFAULT_INTERP_FILTER, cm);
+
+ if (cpi->pass == 1) {
+ Pass1Encode(cpi, size, dest, frame_flags);
+ } else if (cpi->pass == 2) {
+ Pass2Encode(cpi, size, dest, frame_flags);
+ } else {
+ encode_frame_to_data_rate(cpi, size, dest, frame_flags);
+ }
+
+ if (cm->refresh_frame_context)
+ cm->frame_contexts[cm->frame_context_idx] = cm->fc;
+
+ if (*size > 0) {
+ // if its a dropped frame honor the requests on subsequent frames
+ cpi->droppable = !frame_is_reference(cpi);
+
+ // return to normal state
+ cm->reset_frame_context = 0;
+ cm->refresh_frame_context = 1;
+ cpi->refresh_alt_ref_frame = 0;
+ cpi->refresh_golden_frame = 0;
+ cpi->refresh_last_frame = 1;
+ cm->frame_type = INTER_FRAME;
+ }
+
+ vpx_usec_timer_mark(&cmptimer);
+ cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
+
+ if (cpi->b_calculate_psnr && cpi->pass != 1 && cm->show_frame)
+ generate_psnr_packet(cpi);
+
+#if CONFIG_INTERNAL_STATS
+
+ if (cpi->pass != 1) {
+ cpi->bytes += *size;
+
+ if (cm->show_frame) {
+
+ cpi->count++;
+
+ if (cpi->b_calculate_psnr) {
+ double ye, ue, ve;
+ double frame_psnr;
+ YV12_BUFFER_CONFIG *orig = cpi->Source;
+ YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
+ YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer;
+ int y_samples = orig->y_height * orig->y_width;
+ int uv_samples = orig->uv_height * orig->uv_width;
+ int t_samples = y_samples + 2 * uv_samples;
+ double sq_error;
+
+ ye = (double)calc_plane_error(orig->y_buffer, orig->y_stride,
+ recon->y_buffer, recon->y_stride, orig->y_width,
+ orig->y_height);
+
+ ue = (double)calc_plane_error(orig->u_buffer, orig->uv_stride,
+ recon->u_buffer, recon->uv_stride, orig->uv_width,
+ orig->uv_height);
+
+ ve = (double)calc_plane_error(orig->v_buffer, orig->uv_stride,
+ recon->v_buffer, recon->uv_stride, orig->uv_width,
+ orig->uv_height);
+
+ sq_error = ye + ue + ve;
+
+ frame_psnr = vp9_mse2psnr(t_samples, 255.0, sq_error);
+
+ cpi->total_y += vp9_mse2psnr(y_samples, 255.0, ye);
+ cpi->total_u += vp9_mse2psnr(uv_samples, 255.0, ue);
+ cpi->total_v += vp9_mse2psnr(uv_samples, 255.0, ve);
+ cpi->total_sq_error += sq_error;
+ cpi->total += frame_psnr;
+ {
+ double frame_psnr2, frame_ssim2 = 0;
+ double weight = 0;
+#if CONFIG_POSTPROC
+ vp9_deblock(cm->frame_to_show, &cm->post_proc_buffer,
+ cm->filter_level * 10 / 6);
+#endif
+ vp9_clear_system_state();
+
+ ye = (double)calc_plane_error(orig->y_buffer, orig->y_stride,
+ pp->y_buffer, pp->y_stride, orig->y_width,
+ orig->y_height);
+
+ ue = (double)calc_plane_error(orig->u_buffer, orig->uv_stride,
+ pp->u_buffer, pp->uv_stride, orig->uv_width,
+ orig->uv_height);
+
+ ve = (double)calc_plane_error(orig->v_buffer, orig->uv_stride,
+ pp->v_buffer, pp->uv_stride, orig->uv_width,
+ orig->uv_height);
+
+ sq_error = ye + ue + ve;
+
+ frame_psnr2 = vp9_mse2psnr(t_samples, 255.0, sq_error);
+
+ cpi->totalp_y += vp9_mse2psnr(y_samples, 255.0, ye);
+ cpi->totalp_u += vp9_mse2psnr(uv_samples, 255.0, ue);
+ cpi->totalp_v += vp9_mse2psnr(uv_samples, 255.0, ve);
+ cpi->total_sq_error2 += sq_error;
+ cpi->totalp += frame_psnr2;
+
+ frame_ssim2 = vp9_calc_ssim(cpi->Source,
+ recon, 1, &weight);
+
+ cpi->summed_quality += frame_ssim2 * weight;
+ cpi->summed_weights += weight;
+
+ frame_ssim2 = vp9_calc_ssim(cpi->Source,
+ &cm->post_proc_buffer, 1, &weight);
+
+ cpi->summedp_quality += frame_ssim2 * weight;
+ cpi->summedp_weights += weight;
+#if 0
+ {
+ FILE *f = fopen("q_used.stt", "a");
+ fprintf(f, "%5d : Y%f7.3:U%f7.3:V%f7.3:F%f7.3:S%7.3f\n",
+ cpi->common.current_video_frame, y2, u2, v2,
+ frame_psnr2, frame_ssim2);
+ fclose(f);
+ }
+#endif
+ }
+ }
+
+ if (cpi->b_calculate_ssimg) {
+ double y, u, v, frame_all;
+ frame_all = vp9_calc_ssimg(cpi->Source, cm->frame_to_show,
+ &y, &u, &v);
+ cpi->total_ssimg_y += y;
+ cpi->total_ssimg_u += u;
+ cpi->total_ssimg_v += v;
+ cpi->total_ssimg_all += frame_all;
+ }
+ }
+ }
+
+#endif
+ // fclose(fp_out);
+ return 0;
+}
+
+int vp9_get_preview_raw_frame(VP9_PTR comp, YV12_BUFFER_CONFIG *dest,
+ vp9_ppflags_t *flags) {
+ VP9_COMP *cpi = (VP9_COMP *) comp;
+
+ if (!cpi->common.show_frame)
+ return -1;
+ else {
+ int ret;
+#if CONFIG_POSTPROC
+ ret = vp9_post_proc_frame(&cpi->common, dest, flags);
+#else
+
+ if (cpi->common.frame_to_show) {
+ *dest = *cpi->common.frame_to_show;
+ dest->y_width = cpi->common.width;
+ dest->y_height = cpi->common.height;
+ dest->uv_height = cpi->common.height / 2;
+ ret = 0;
+ } else {
+ ret = -1;
+ }
+
+#endif // !CONFIG_POSTPROC
+ vp9_clear_system_state();
+ return ret;
+ }
+}
+
+int vp9_set_roimap(VP9_PTR comp, unsigned char *map, unsigned int rows,
+ unsigned int cols, int delta_q[MAX_MB_SEGMENTS],
+ int delta_lf[MAX_MB_SEGMENTS],
+ unsigned int threshold[MAX_MB_SEGMENTS]) {
+ VP9_COMP *cpi = (VP9_COMP *) comp;
+ signed char feature_data[SEG_LVL_MAX][MAX_MB_SEGMENTS];
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+ int i;
+
+ if (cpi->common.mb_rows != rows || cpi->common.mb_cols != cols)
+ return -1;
+
+ if (!map) {
+ vp9_disable_segmentation((VP9_PTR)cpi);
+ return 0;
+ }
+
+ // Set the segmentation Map
+ vp9_set_segmentation_map((VP9_PTR)cpi, map);
+
+ // Activate segmentation.
+ vp9_enable_segmentation((VP9_PTR)cpi);
+
+ // Set up the quan, LF and breakout threshold segment data
+ for (i = 0; i < MAX_MB_SEGMENTS; i++) {
+ feature_data[SEG_LVL_ALT_Q][i] = delta_q[i];
+ feature_data[SEG_LVL_ALT_LF][i] = delta_lf[i];
+ cpi->segment_encode_breakout[i] = threshold[i];
+ }
+
+ // Enable the loop and quant changes in the feature mask
+ for (i = 0; i < MAX_MB_SEGMENTS; i++) {
+ if (delta_q[i])
+ vp9_enable_segfeature(xd, i, SEG_LVL_ALT_Q);
+ else
+ vp9_disable_segfeature(xd, i, SEG_LVL_ALT_Q);
+
+ if (delta_lf[i])
+ vp9_enable_segfeature(xd, i, SEG_LVL_ALT_LF);
+ else
+ vp9_disable_segfeature(xd, i, SEG_LVL_ALT_LF);
+ }
+
+ // Initialise the feature data structure
+ // SEGMENT_DELTADATA 0, SEGMENT_ABSDATA 1
+ vp9_set_segment_data((VP9_PTR)cpi, &feature_data[0][0], SEGMENT_DELTADATA);
+
+ return 0;
+}
+
+int vp9_set_active_map(VP9_PTR comp, unsigned char *map,
+ unsigned int rows, unsigned int cols) {
+ VP9_COMP *cpi = (VP9_COMP *) comp;
+
+ if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols) {
+ if (map) {
+ vpx_memcpy(cpi->active_map, map, rows * cols);
+ cpi->active_map_enabled = 1;
+ } else {
+ cpi->active_map_enabled = 0;
+ }
+
+ return 0;
+ } else {
+ // cpi->active_map_enabled = 0;
+ return -1;
+ }
+}
+
+int vp9_set_internal_size(VP9_PTR comp,
+ VPX_SCALING horiz_mode, VPX_SCALING vert_mode) {
+ VP9_COMP *cpi = (VP9_COMP *) comp;
+ VP9_COMMON *cm = &cpi->common;
+ int hr = 0, hs = 0, vr = 0, vs = 0;
+
+ if (horiz_mode > ONETWO || vert_mode > ONETWO)
+ return -1;
+
+ Scale2Ratio(horiz_mode, &hr, &hs);
+ Scale2Ratio(vert_mode, &vr, &vs);
+
+ // always go to the next whole number
+ cm->width = (hs - 1 + cpi->oxcf.width * hr) / hs;
+ cm->height = (vs - 1 + cpi->oxcf.height * vr) / vs;
+
+ assert(cm->width <= cpi->initial_width);
+ assert(cm->height <= cpi->initial_height);
+ update_frame_size(cpi);
+ return 0;
+}
+
+
+
+int vp9_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest) {
+ int i, j;
+ int total = 0;
+
+ uint8_t *src = source->y_buffer;
+ uint8_t *dst = dest->y_buffer;
+
+ // Loop through the Y plane raw and reconstruction data summing
+ // (square differences)
+ for (i = 0; i < source->y_height; i += 16) {
+ for (j = 0; j < source->y_width; j += 16) {
+ unsigned int sse;
+ total += vp9_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
+ &sse);
+ }
+
+ src += 16 * source->y_stride;
+ dst += 16 * dest->y_stride;
+ }
+
+ return total;
+}
+
+
+int vp9_get_quantizer(VP9_PTR c) {
+ return ((VP9_COMP *)c)->common.base_qindex;
+}
diff --git a/libvpx/vp9/encoder/vp9_onyx_int.h b/libvpx/vp9/encoder/vp9_onyx_int.h
new file mode 100644
index 0000000..f5f1c07
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_onyx_int.h
@@ -0,0 +1,637 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_VP9_ONYX_INT_H_
+#define VP9_ENCODER_VP9_ONYX_INT_H_
+
+#include <stdio.h>
+#include "./vpx_config.h"
+#include "vp9/common/vp9_onyx.h"
+#include "vp9/encoder/vp9_treewriter.h"
+#include "vp9/encoder/vp9_tokenize.h"
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/encoder/vp9_variance.h"
+#include "vp9/encoder/vp9_encodemb.h"
+#include "vp9/encoder/vp9_quantize.h"
+#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_entropymode.h"
+#include "vpx_ports/mem.h"
+#include "vpx/internal/vpx_codec_internal.h"
+#include "vp9/encoder/vp9_mcomp.h"
+#include "vp9/common/vp9_findnearmv.h"
+#include "vp9/encoder/vp9_lookahead.h"
+
+// Experimental rate control switches
+#if CONFIG_ONESHOTQ
+#define ONE_SHOT_Q_ESTIMATE 0
+#define STRICT_ONE_SHOT_Q 0
+#define DISABLE_RC_LONG_TERM_MEM 0
+#endif
+
+// #define SPEEDSTATS 1
+#if CONFIG_MULTIPLE_ARF
+// Set MIN_GF_INTERVAL to 1 for the full decomposition.
+#define MIN_GF_INTERVAL 2
+#else
+#define MIN_GF_INTERVAL 4
+#endif
+#define DEFAULT_GF_INTERVAL 7
+
+#define KEY_FRAME_CONTEXT 5
+
+#define MAX_MODES 36
+
+#define MIN_THRESHMULT 32
+#define MAX_THRESHMULT 512
+
+#define GF_ZEROMV_ZBIN_BOOST 0
+#define LF_ZEROMV_ZBIN_BOOST 0
+#define MV_ZBIN_BOOST 0
+#define SPLIT_MV_ZBIN_BOOST 0
+#define INTRA_ZBIN_BOOST 0
+
+typedef struct {
+ nmv_context nmvc;
+ int nmvjointcost[MV_JOINTS];
+ int nmvcosts[2][MV_VALS];
+ int nmvcosts_hp[2][MV_VALS];
+
+ vp9_prob segment_pred_probs[PREDICTION_PROBS];
+ vp9_prob intra_inter_prob[INTRA_INTER_CONTEXTS];
+ vp9_prob comp_inter_prob[COMP_INTER_CONTEXTS];
+ vp9_prob single_ref_prob[REF_CONTEXTS][2];
+ vp9_prob comp_ref_prob[REF_CONTEXTS];
+
+ unsigned char *last_frame_seg_map_copy;
+
+ // 0 = Intra, Last, GF, ARF
+ signed char last_ref_lf_deltas[MAX_REF_LF_DELTAS];
+ // 0 = ZERO_MV, MV
+ signed char last_mode_lf_deltas[MAX_MODE_LF_DELTAS];
+
+ vp9_coeff_probs_model coef_probs[TX_SIZE_MAX_SB][BLOCK_TYPES];
+
+ vp9_prob y_mode_prob[4][VP9_INTRA_MODES - 1];
+ vp9_prob uv_mode_prob[VP9_INTRA_MODES][VP9_INTRA_MODES - 1];
+ vp9_prob partition_prob[2][NUM_PARTITION_CONTEXTS][PARTITION_TYPES - 1];
+
+ vp9_prob switchable_interp_prob[VP9_SWITCHABLE_FILTERS + 1]
+ [VP9_SWITCHABLE_FILTERS - 1];
+
+ int inter_mode_counts[INTER_MODE_CONTEXTS][VP9_INTER_MODES - 1][2];
+ vp9_prob inter_mode_probs[INTER_MODE_CONTEXTS][VP9_INTER_MODES - 1];
+
+ vp9_prob tx_probs_8x8p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 3];
+ vp9_prob tx_probs_16x16p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 2];
+ vp9_prob tx_probs_32x32p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 1];
+ vp9_prob mbskip_probs[MBSKIP_CONTEXTS];
+} CODING_CONTEXT;
+
+typedef struct {
+ double frame;
+ double intra_error;
+ double coded_error;
+ double sr_coded_error;
+ double ssim_weighted_pred_err;
+ double pcnt_inter;
+ double pcnt_motion;
+ double pcnt_second_ref;
+ double pcnt_neutral;
+ double MVr;
+ double mvr_abs;
+ double MVc;
+ double mvc_abs;
+ double MVrv;
+ double MVcv;
+ double mv_in_out_count;
+ double new_mv_count;
+ double duration;
+ double count;
+} FIRSTPASS_STATS;
+
+typedef struct {
+ int frames_so_far;
+ double frame_intra_error;
+ double frame_coded_error;
+ double frame_pcnt_inter;
+ double frame_pcnt_motion;
+ double frame_mvr;
+ double frame_mvr_abs;
+ double frame_mvc;
+ double frame_mvc_abs;
+} ONEPASS_FRAMESTATS;
+
+typedef struct {
+ struct {
+ int err;
+ union {
+ int_mv mv;
+ MB_PREDICTION_MODE mode;
+ } m;
+ } ref[MAX_REF_FRAMES];
+} MBGRAPH_MB_STATS;
+
+typedef struct {
+ MBGRAPH_MB_STATS *mb_stats;
+} MBGRAPH_FRAME_STATS;
+
+typedef enum {
+ THR_ZEROMV,
+ THR_DC,
+
+ THR_NEARESTMV,
+ THR_NEARMV,
+
+ THR_ZEROG,
+ THR_NEARESTG,
+
+ THR_ZEROA,
+ THR_NEARESTA,
+
+ THR_NEARG,
+ THR_NEARA,
+
+ THR_V_PRED,
+ THR_H_PRED,
+ THR_D45_PRED,
+ THR_D135_PRED,
+ THR_D117_PRED,
+ THR_D153_PRED,
+ THR_D27_PRED,
+ THR_D63_PRED,
+ THR_TM,
+
+ THR_NEWMV,
+ THR_NEWG,
+ THR_NEWA,
+
+ THR_SPLITMV,
+ THR_SPLITG,
+ THR_SPLITA,
+
+ THR_B_PRED,
+
+ THR_COMP_ZEROLA,
+ THR_COMP_NEARESTLA,
+ THR_COMP_NEARLA,
+
+ THR_COMP_ZEROGA,
+ THR_COMP_NEARESTGA,
+ THR_COMP_NEARGA,
+
+ THR_COMP_NEWLA,
+ THR_COMP_NEWGA,
+
+ THR_COMP_SPLITLA,
+ THR_COMP_SPLITGA,
+} THR_MODES;
+
+typedef enum {
+ DIAMOND = 0,
+ NSTEP = 1,
+ HEX = 2
+} SEARCH_METHODS;
+
+typedef struct {
+ int RD;
+ SEARCH_METHODS search_method;
+ int auto_filter;
+ int recode_loop;
+ int iterative_sub_pixel;
+ int half_pixel_search;
+ int quarter_pixel_search;
+ int thresh_mult[MAX_MODES];
+ int max_step_search_steps;
+ int first_step;
+ int optimize_coefficients;
+ int search_best_filter;
+ int static_segmentation;
+ int comp_inter_joint_search_thresh;
+ int adpative_rd_thresh;
+} SPEED_FEATURES;
+
+enum BlockSize {
+ BLOCK_4X4,
+ BLOCK_4X8,
+ BLOCK_8X4,
+ BLOCK_8X8,
+ BLOCK_8X16,
+ BLOCK_16X8,
+ BLOCK_16X16,
+ BLOCK_32X32,
+ BLOCK_32X16,
+ BLOCK_16X32,
+ BLOCK_64X32,
+ BLOCK_32X64,
+ BLOCK_64X64,
+ BLOCK_MAX_SB_SEGMENTS,
+};
+
+typedef struct VP9_COMP {
+
+ DECLARE_ALIGNED(16, short, y_quant[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, unsigned char, y_quant_shift[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, y_zbin[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, y_round[QINDEX_RANGE][16]);
+
+ DECLARE_ALIGNED(16, short, uv_quant[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, unsigned char, uv_quant_shift[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, uv_zbin[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, uv_round[QINDEX_RANGE][16]);
+
+#if CONFIG_ALPHA
+ DECLARE_ALIGNED(16, short, a_quant[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, unsigned char, a_quant_shift[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, a_zbin[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, a_round[QINDEX_RANGE][16]);
+
+ DECLARE_ALIGNED(16, short, zrun_zbin_boost_a[QINDEX_RANGE][16]);
+#endif
+ DECLARE_ALIGNED(16, short, zrun_zbin_boost_y[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, zrun_zbin_boost_uv[QINDEX_RANGE][16]);
+
+ MACROBLOCK mb;
+ VP9_COMMON common;
+ VP9_CONFIG oxcf;
+
+ struct lookahead_ctx *lookahead;
+ struct lookahead_entry *source;
+#if CONFIG_MULTIPLE_ARF
+ struct lookahead_entry *alt_ref_source[NUM_REF_FRAMES];
+#else
+ struct lookahead_entry *alt_ref_source;
+#endif
+
+ YV12_BUFFER_CONFIG *Source;
+ YV12_BUFFER_CONFIG *un_scaled_source;
+ YV12_BUFFER_CONFIG scaled_source;
+
+ int source_alt_ref_pending; // frame in src_buffers has been identified to be encoded as an alt ref
+ int source_alt_ref_active; // an alt ref frame has been encoded and is usable
+
+ int is_src_frame_alt_ref; // source of frame to encode is an exact copy of an alt ref frame
+
+ int gold_is_last; // golden frame same as last frame ( short circuit gold searches)
+ int alt_is_last; // Alt reference frame same as last ( short circuit altref search)
+ int gold_is_alt; // don't do both alt and gold search ( just do gold).
+
+ int scaled_ref_idx[3];
+ int lst_fb_idx;
+ int gld_fb_idx;
+ int alt_fb_idx;
+#if CONFIG_MULTIPLE_ARF
+ int alt_ref_fb_idx[NUM_REF_FRAMES - 3];
+#endif
+ int refresh_last_frame;
+ int refresh_golden_frame;
+ int refresh_alt_ref_frame;
+ YV12_BUFFER_CONFIG last_frame_uf;
+
+ TOKENEXTRA *tok;
+ unsigned int tok_count[4][1 << 6];
+
+
+ unsigned int frames_since_key;
+ unsigned int key_frame_frequency;
+ unsigned int this_key_frame_forced;
+ unsigned int next_key_frame_forced;
+#if CONFIG_MULTIPLE_ARF
+ // Position within a frame coding order (including any additional ARF frames).
+ unsigned int sequence_number;
+ // Next frame in naturally occurring order that has not yet been coded.
+ int next_frame_in_order;
+#endif
+
+ // Ambient reconstruction err target for force key frames
+ int ambient_err;
+
+ unsigned int mode_check_freq[MAX_MODES];
+ unsigned int mode_test_hit_counts[MAX_MODES];
+ unsigned int mode_chosen_counts[MAX_MODES];
+
+ int rd_thresh_mult[MAX_MODES];
+ int rd_baseline_thresh[BLOCK_SIZE_TYPES][MAX_MODES];
+ int rd_threshes[BLOCK_SIZE_TYPES][MAX_MODES];
+ int rd_thresh_freq_fact[BLOCK_SIZE_TYPES][MAX_MODES];
+
+ int64_t rd_comp_pred_diff[NB_PREDICTION_TYPES];
+ int rd_prediction_type_threshes[4][NB_PREDICTION_TYPES];
+ unsigned int intra_inter_count[INTRA_INTER_CONTEXTS][2];
+ unsigned int comp_inter_count[COMP_INTER_CONTEXTS][2];
+ unsigned int single_ref_count[REF_CONTEXTS][2][2];
+ unsigned int comp_ref_count[REF_CONTEXTS][2];
+
+ // FIXME contextualize
+
+ int64_t rd_tx_select_diff[NB_TXFM_MODES];
+ int rd_tx_select_threshes[4][NB_TXFM_MODES];
+
+ int RDMULT;
+ int RDDIV;
+
+ CODING_CONTEXT coding_context;
+
+ // Rate targetting variables
+ int this_frame_target;
+ int projected_frame_size;
+ int last_q[2]; // Separate values for Intra/Inter
+ int last_boosted_qindex; // Last boosted GF/KF/ARF q
+
+ double rate_correction_factor;
+ double key_frame_rate_correction_factor;
+ double gf_rate_correction_factor;
+
+ int frames_till_gf_update_due; // Count down till next GF
+
+ int gf_overspend_bits; // Total bits overspent becasue of GF boost (cumulative)
+
+ int non_gf_bitrate_adjustment; // Used in the few frames following a GF to recover the extra bits spent in that GF
+
+ int kf_overspend_bits; // Extra bits spent on key frames that need to be recovered on inter frames
+ int kf_bitrate_adjustment; // Current number of bit s to try and recover on each inter frame.
+ int max_gf_interval;
+ int baseline_gf_interval;
+ int active_arnr_frames; // <= cpi->oxcf.arnr_max_frames
+ int active_arnr_strength; // <= cpi->oxcf.arnr_max_strength
+
+ int64_t key_frame_count;
+ int prior_key_frame_distance[KEY_FRAME_CONTEXT];
+ int per_frame_bandwidth; // Current section per frame bandwidth target
+ int av_per_frame_bandwidth; // Average frame size target for clip
+ int min_frame_bandwidth; // Minimum allocation that should be used for any frame
+ int inter_frame_target;
+ double output_frame_rate;
+ int64_t last_time_stamp_seen;
+ int64_t last_end_time_stamp_seen;
+ int64_t first_time_stamp_ever;
+
+ int ni_av_qi;
+ int ni_tot_qi;
+ int ni_frames;
+ int avg_frame_qindex;
+ double tot_q;
+ double avg_q;
+
+ int zbin_mode_boost;
+ int zbin_mode_boost_enabled;
+
+ int64_t total_byte_count;
+
+ int buffered_mode;
+
+ int buffer_level;
+ int bits_off_target;
+
+ int rolling_target_bits;
+ int rolling_actual_bits;
+
+ int long_rolling_target_bits;
+ int long_rolling_actual_bits;
+
+ int64_t total_actual_bits;
+ int total_target_vs_actual; // debug stats
+
+ int worst_quality;
+ int active_worst_quality;
+ int best_quality;
+ int active_best_quality;
+
+ int cq_target_quality;
+
+ int y_mode_count[4][VP9_INTRA_MODES];
+ int y_uv_mode_count[VP9_INTRA_MODES][VP9_INTRA_MODES];
+ unsigned int partition_count[NUM_PARTITION_CONTEXTS][PARTITION_TYPES];
+
+ nmv_context_counts NMVcount;
+
+ vp9_coeff_count coef_counts[TX_SIZE_MAX_SB][BLOCK_TYPES];
+ vp9_coeff_probs_model frame_coef_probs[TX_SIZE_MAX_SB][BLOCK_TYPES];
+ vp9_coeff_stats frame_branch_ct[TX_SIZE_MAX_SB][BLOCK_TYPES];
+
+ int gfu_boost;
+ int last_boost;
+ int kf_boost;
+ int kf_zeromotion_pct;
+
+ int64_t target_bandwidth;
+ struct vpx_codec_pkt_list *output_pkt_list;
+
+#if 0
+ // Experimental code for lagged and one pass
+ ONEPASS_FRAMESTATS one_pass_frame_stats[MAX_LAG_BUFFERS];
+ int one_pass_frame_index;
+#endif
+ MBGRAPH_FRAME_STATS mbgraph_stats[MAX_LAG_BUFFERS];
+ int mbgraph_n_frames; // number of frames filled in the above
+ int static_mb_pct; // % forced skip mbs by segmentation
+ int seg0_progress, seg0_idx, seg0_cnt;
+
+ int decimation_factor;
+ int decimation_count;
+
+ // for real time encoding
+ int avg_encode_time; // microsecond
+ int avg_pick_mode_time; // microsecond
+ int speed;
+ unsigned int cpu_freq; // Mhz
+ int compressor_speed;
+
+ int interquantizer;
+ int goldfreq;
+ int auto_worst_q;
+ int cpu_used;
+ int pass;
+
+ vp9_prob last_skip_false_probs[3][MBSKIP_CONTEXTS];
+ int last_skip_probs_q[3];
+
+ int ref_frame_flags;
+
+ SPEED_FEATURES sf;
+ int error_bins[1024];
+
+ // Data used for real time conferencing mode to help determine if it would be good to update the gf
+ int inter_zz_count;
+ int gf_bad_count;
+ int gf_update_recommended;
+
+ unsigned char *segmentation_map;
+
+ // segment threashold for encode breakout
+ int segment_encode_breakout[MAX_MB_SEGMENTS];
+
+ unsigned char *active_map;
+ unsigned int active_map_enabled;
+
+ fractional_mv_step_fp *find_fractional_mv_step;
+ vp9_full_search_fn_t full_search_sad;
+ vp9_refining_search_fn_t refining_search_sad;
+ vp9_diamond_search_fn_t diamond_search_sad;
+ vp9_variance_fn_ptr_t fn_ptr[BLOCK_MAX_SB_SEGMENTS];
+ uint64_t time_receive_data;
+ uint64_t time_compress_data;
+ uint64_t time_pick_lpf;
+ uint64_t time_encode_mb_row;
+
+ struct twopass_rc {
+ unsigned int section_intra_rating;
+ unsigned int next_iiratio;
+ unsigned int this_iiratio;
+ FIRSTPASS_STATS total_stats;
+ FIRSTPASS_STATS this_frame_stats;
+ FIRSTPASS_STATS *stats_in, *stats_in_end, *stats_in_start;
+ FIRSTPASS_STATS total_left_stats;
+ int first_pass_done;
+ int64_t bits_left;
+ int64_t clip_bits_total;
+ double avg_iiratio;
+ double modified_error_total;
+ double modified_error_used;
+ double modified_error_left;
+ double kf_intra_err_min;
+ double gf_intra_err_min;
+ int frames_to_key;
+ int maxq_max_limit;
+ int maxq_min_limit;
+ int static_scene_max_gf_interval;
+ int kf_bits;
+ // Remaining error from uncoded frames in a gf group. Two pass use only
+ int64_t gf_group_error_left;
+
+ // Projected total bits available for a key frame group of frames
+ int64_t kf_group_bits;
+
+ // Error score of frames still to be coded in kf group
+ int64_t kf_group_error_left;
+
+ // Projected Bits available for a group of frames including 1 GF or ARF
+ int64_t gf_group_bits;
+ // Bits for the golden frame or ARF - 2 pass only
+ int gf_bits;
+ int alt_extra_bits;
+
+ int sr_update_lag;
+ double est_max_qcorrection_factor;
+ } twopass;
+
+ YV12_BUFFER_CONFIG alt_ref_buffer;
+ YV12_BUFFER_CONFIG *frames[MAX_LAG_BUFFERS];
+ int fixed_divide[512];
+
+#if CONFIG_INTERNAL_STATS
+ int count;
+ double total_y;
+ double total_u;
+ double total_v;
+ double total;
+ double total_sq_error;
+ double totalp_y;
+ double totalp_u;
+ double totalp_v;
+ double totalp;
+ double total_sq_error2;
+ int bytes;
+ double summed_quality;
+ double summed_weights;
+ double summedp_quality;
+ double summedp_weights;
+ unsigned int tot_recode_hits;
+
+
+ double total_ssimg_y;
+ double total_ssimg_u;
+ double total_ssimg_v;
+ double total_ssimg_all;
+
+ int b_calculate_ssimg;
+#endif
+ int b_calculate_psnr;
+
+ // Per MB activity measurement
+ unsigned int activity_avg;
+ unsigned int *mb_activity_map;
+ int *mb_norm_activity_map;
+ int output_partition;
+
+ /* force next frame to intra when kf_auto says so */
+ int force_next_frame_intra;
+
+ int droppable;
+
+ int dummy_packing; /* flag to indicate if packing is dummy */
+
+ unsigned int switchable_interp_count[VP9_SWITCHABLE_FILTERS + 1]
+ [VP9_SWITCHABLE_FILTERS];
+ unsigned int best_switchable_interp_count[VP9_SWITCHABLE_FILTERS];
+
+ int initial_width;
+ int initial_height;
+
+#if CONFIG_MULTIPLE_ARF
+ // ARF tracking variables.
+ int multi_arf_enabled;
+ unsigned int frame_coding_order_period;
+ unsigned int new_frame_coding_order_period;
+ int frame_coding_order[MAX_LAG_BUFFERS * 2];
+ int arf_buffer_idx[MAX_LAG_BUFFERS * 3 / 2];
+ int arf_weight[MAX_LAG_BUFFERS];
+ int arf_buffered;
+ int this_frame_weight;
+ int max_arf_level;
+#endif
+
+#ifdef ENTROPY_STATS
+ int64_t mv_ref_stats[INTER_MODE_CONTEXTS][VP9_INTER_MODES - 1][2];
+#endif
+} VP9_COMP;
+
+static int get_ref_frame_idx(VP9_COMP *cpi, MV_REFERENCE_FRAME ref_frame) {
+ if (ref_frame == LAST_FRAME) {
+ return cpi->lst_fb_idx;
+ } else if (ref_frame == GOLDEN_FRAME) {
+ return cpi->gld_fb_idx;
+ } else {
+ return cpi->alt_fb_idx;
+ }
+}
+
+void vp9_encode_frame(VP9_COMP *cpi);
+
+void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
+ unsigned long *size);
+
+void vp9_activity_masking(VP9_COMP *cpi, MACROBLOCK *x);
+
+void vp9_set_speed_features(VP9_COMP *cpi);
+
+extern int vp9_calc_ss_err(YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *dest);
+
+extern void vp9_alloc_compressor_data(VP9_COMP *cpi);
+
+#if CONFIG_DEBUG
+#define CHECK_MEM_ERROR(lval,expr) do {\
+ lval = (expr); \
+ if(!lval) \
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,\
+ "Failed to allocate "#lval" at %s:%d", \
+ __FILE__,__LINE__);\
+ } while(0)
+#else
+#define CHECK_MEM_ERROR(lval,expr) do {\
+ lval = (expr); \
+ if(!lval) \
+ vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,\
+ "Failed to allocate "#lval);\
+ } while(0)
+#endif
+
+#endif // VP9_ENCODER_VP9_ONYX_INT_H_
diff --git a/libvpx/vp9/encoder/vp9_picklpf.c b/libvpx/vp9/encoder/vp9_picklpf.c
new file mode 100644
index 0000000..a87d058
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_picklpf.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <limits.h>
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vp9/encoder/vp9_picklpf.h"
+#include "vp9/encoder/vp9_quantize.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_scale/vpx_scale.h"
+#include "vp9/common/vp9_alloccommon.h"
+#include "vp9/common/vp9_loopfilter.h"
+#include "./vpx_scale_rtcd.h"
+
+void vp9_yv12_copy_partial_frame_c(YV12_BUFFER_CONFIG *src_ybc,
+ YV12_BUFFER_CONFIG *dst_ybc, int Fraction) {
+ uint8_t *src_y, *dst_y;
+ int yheight;
+ int ystride;
+ int yoffset;
+ int linestocopy;
+
+ assert(src_ybc->y_stride == dst_ybc->y_stride);
+ yheight = src_ybc->y_height;
+ ystride = src_ybc->y_stride;
+
+ linestocopy = (yheight >> (Fraction + 4));
+
+ if (linestocopy < 1)
+ linestocopy = 1;
+
+ linestocopy <<= 4;
+
+ yoffset = ystride * ((yheight >> 5) * 16 - 8);
+ src_y = src_ybc->y_buffer + yoffset;
+ dst_y = dst_ybc->y_buffer + yoffset;
+
+ vpx_memcpy(dst_y, src_y, ystride * (linestocopy + 16));
+}
+
+static int calc_partial_ssl_err(YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *dest, int Fraction) {
+ int i, j;
+ int Total = 0;
+ int srcoffset, dstoffset;
+ uint8_t *src = source->y_buffer;
+ uint8_t *dst = dest->y_buffer;
+
+ int linestocopy = (source->y_height >> (Fraction + 4));
+
+ if (linestocopy < 1)
+ linestocopy = 1;
+
+ linestocopy <<= 4;
+
+
+ srcoffset = source->y_stride * (dest->y_height >> 5) * 16;
+ dstoffset = dest->y_stride * (dest->y_height >> 5) * 16;
+
+ src += srcoffset;
+ dst += dstoffset;
+
+ // Loop through the Y plane raw and reconstruction data summing (square differences)
+ for (i = 0; i < linestocopy; i += 16) {
+ for (j = 0; j < source->y_width; j += 16) {
+ unsigned int sse;
+ Total += vp9_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
+ &sse);
+ }
+
+ src += 16 * source->y_stride;
+ dst += 16 * dest->y_stride;
+ }
+
+ return Total;
+}
+
+// Enforce a minimum filter level based upon baseline Q
+static int get_min_filter_level(VP9_COMP *cpi, int base_qindex) {
+ int min_filter_level;
+ /*int q = (int) vp9_convert_qindex_to_q(base_qindex);
+
+ if (cpi->source_alt_ref_active && cpi->common.refresh_golden_frame && !cpi->common.refresh_alt_ref_frame)
+ min_filter_level = 0;
+ else
+ {
+ if (q <= 10)
+ min_filter_level = 0;
+ else if (q <= 64)
+ min_filter_level = 1;
+ else
+ min_filter_level = (q >> 6);
+ }
+ */
+ min_filter_level = 0;
+
+ return min_filter_level;
+}
+
+// Enforce a maximum filter level based upon baseline Q
+static int get_max_filter_level(VP9_COMP *cpi, int base_qindex) {
+ // PGW August 2006: Highest filter values almost always a bad idea
+
+ // jbb chg: 20100118 - not so any more with this overquant stuff allow high values
+ // with lots of intra coming in.
+ int max_filter_level = MAX_LOOP_FILTER;// * 3 / 4;
+ (void)base_qindex;
+
+ if (cpi->twopass.section_intra_rating > 8)
+ max_filter_level = MAX_LOOP_FILTER * 3 / 4;
+
+ return max_filter_level;
+}
+
+
+// Stub function for now Alt LF not used
+void vp9_set_alt_lf_level(VP9_COMP *cpi, int filt_val) {
+}
+
+void vp9_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP9_COMP *cpi) {
+ VP9_COMMON *cm = &cpi->common;
+
+ int best_err = 0;
+ int filt_err = 0;
+ int min_filter_level = get_min_filter_level(cpi, cm->base_qindex);
+ int max_filter_level = get_max_filter_level(cpi, cm->base_qindex);
+
+ int filter_step;
+ int filt_high = 0;
+ int filt_mid = cm->filter_level; // Start search at previous frame filter level
+ int filt_low = 0;
+ int filt_best;
+ int filt_direction = 0;
+
+ int Bias = 0; // Bias against raising loop filter and in favour of lowering it
+
+ // Make a copy of the unfiltered / processed recon buffer
+ vp8_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf);
+
+ if (cm->frame_type == KEY_FRAME)
+ cm->sharpness_level = 0;
+ else
+ cm->sharpness_level = cpi->oxcf.Sharpness;
+
+ // Start the search at the previous frame filter level unless it is now out of range.
+ filt_mid = cm->filter_level;
+
+ if (filt_mid < min_filter_level)
+ filt_mid = min_filter_level;
+ else if (filt_mid > max_filter_level)
+ filt_mid = max_filter_level;
+
+ // Define the initial step size
+ filter_step = (filt_mid < 16) ? 4 : filt_mid / 4;
+
+ // Get baseline error score
+ vp9_set_alt_lf_level(cpi, filt_mid);
+ vp9_loop_filter_frame(cm, &cpi->mb.e_mbd, filt_mid, 1);
+
+ best_err = vp9_calc_ss_err(sd, cm->frame_to_show);
+ filt_best = filt_mid;
+
+ // Re-instate the unfiltered frame
+ vp8_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
+
+ while (filter_step > 0) {
+ Bias = (best_err >> (15 - (filt_mid / 8))) * filter_step; // PGW change 12/12/06 for small images
+
+ // jbb chg: 20100118 - in sections with lots of new material coming in don't bias as much to a low filter value
+ if (cpi->twopass.section_intra_rating < 20)
+ Bias = Bias * cpi->twopass.section_intra_rating / 20;
+
+ // yx, bias less for large block size
+ if (cpi->common.txfm_mode != ONLY_4X4)
+ Bias >>= 1;
+
+ filt_high = ((filt_mid + filter_step) > max_filter_level) ? max_filter_level : (filt_mid + filter_step);
+ filt_low = ((filt_mid - filter_step) < min_filter_level) ? min_filter_level : (filt_mid - filter_step);
+
+ if ((filt_direction <= 0) && (filt_low != filt_mid)) {
+ // Get Low filter error score
+ vp9_set_alt_lf_level(cpi, filt_low);
+ vp9_loop_filter_frame(cm, &cpi->mb.e_mbd, filt_low, 1);
+
+ filt_err = vp9_calc_ss_err(sd, cm->frame_to_show);
+
+ // Re-instate the unfiltered frame
+ vp8_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
+
+ // If value is close to the best so far then bias towards a lower loop filter value.
+ if ((filt_err - Bias) < best_err) {
+ // Was it actually better than the previous best?
+ if (filt_err < best_err)
+ best_err = filt_err;
+
+ filt_best = filt_low;
+ }
+ }
+
+ // Now look at filt_high
+ if ((filt_direction >= 0) && (filt_high != filt_mid)) {
+ vp9_set_alt_lf_level(cpi, filt_high);
+ vp9_loop_filter_frame(cm, &cpi->mb.e_mbd, filt_high, 1);
+
+ filt_err = vp9_calc_ss_err(sd, cm->frame_to_show);
+
+ // Re-instate the unfiltered frame
+ vp8_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
+
+ // Was it better than the previous best?
+ if (filt_err < (best_err - Bias)) {
+ best_err = filt_err;
+ filt_best = filt_high;
+ }
+ }
+
+ // Half the step distance if the best filter value was the same as last time
+ if (filt_best == filt_mid) {
+ filter_step = filter_step / 2;
+ filt_direction = 0;
+ } else {
+ filt_direction = (filt_best < filt_mid) ? -1 : 1;
+ filt_mid = filt_best;
+ }
+ }
+
+ cm->filter_level = filt_best;
+}
diff --git a/libvpx/vp9/encoder/vp9_picklpf.h b/libvpx/vp9/encoder/vp9_picklpf.h
new file mode 100644
index 0000000..698cb8d
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_picklpf.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_VP9_PICKLPF_H_
+#define VP9_ENCODER_VP9_PICKLPF_H_
+
+struct yv12_buffer_config;
+struct VP9_COMP;
+
+void vp9_set_alt_lf_level(struct VP9_COMP *cpi, int filt_val);
+
+void vp9_pick_filter_level(struct yv12_buffer_config *sd,
+ struct VP9_COMP *cpi);
+
+#endif // VP9_ENCODER_VP9_PICKLPF_H_
diff --git a/libvpx/vp9/encoder/vp9_psnr.c b/libvpx/vp9/encoder/vp9_psnr.c
new file mode 100644
index 0000000..9439434
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_psnr.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vpx_scale/yv12config.h"
+#include "math.h"
+
+#define MAX_PSNR 100
+
+double vp9_mse2psnr(double samples, double peak, double mse) {
+ double psnr;
+
+ if (mse > 0.0)
+ psnr = 10.0 * log10(peak * peak * samples / mse);
+ else
+ psnr = MAX_PSNR; // Limit to prevent / 0
+
+ if (psnr > MAX_PSNR)
+ psnr = MAX_PSNR;
+
+ return psnr;
+}
diff --git a/libvpx/vp9/encoder/vp9_psnr.h b/libvpx/vp9/encoder/vp9_psnr.h
new file mode 100644
index 0000000..15dd836
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_psnr.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_VP9_PSNR_H_
+#define VP9_ENCODER_VP9_PSNR_H_
+
+double vp9_mse2psnr(double samples, double peak, double mse);
+
+#endif // VP9_ENCODER_VP9_PSNR_H_
diff --git a/libvpx/vp9/encoder/vp9_quantize.c b/libvpx/vp9/encoder/vp9_quantize.c
new file mode 100644
index 0000000..53d8be7
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_quantize.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include "vpx_mem/vpx_mem.h"
+
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vp9/encoder/vp9_quantize.h"
+#include "vp9/common/vp9_quant_common.h"
+
+#include "vp9/common/vp9_seg_common.h"
+
+#ifdef ENC_DEBUG
+extern int enc_debug;
+#endif
+
+static INLINE int plane_idx(int plane) {
+ return plane == 0 ? 0 :
+ plane == 1 ? 16 : 20;
+}
+
+static void quantize(int16_t *zbin_boost_orig_ptr,
+ int16_t *coeff_ptr, int n_coeffs, int skip_block,
+ int16_t *zbin_ptr, int16_t *round_ptr, int16_t *quant_ptr,
+ uint8_t *quant_shift_ptr,
+ int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr,
+ int16_t *dequant_ptr, int zbin_oq_value,
+ uint16_t *eob_ptr,
+ const int *scan, int mul) {
+ int i, rc, eob;
+ int zbin;
+ int x, y, z, sz;
+ int zero_run = 0;
+ int16_t *zbin_boost_ptr = zbin_boost_orig_ptr;
+
+ vpx_memset(qcoeff_ptr, 0, n_coeffs*sizeof(int16_t));
+ vpx_memset(dqcoeff_ptr, 0, n_coeffs*sizeof(int16_t));
+
+ eob = -1;
+
+ if (!skip_block) {
+ for (i = 0; i < n_coeffs; i++) {
+ rc = scan[i];
+ z = coeff_ptr[rc] * mul;
+
+ zbin = (zbin_ptr[rc != 0] + zbin_boost_ptr[zero_run] + zbin_oq_value);
+ zero_run += (zero_run < 15);
+
+ sz = (z >> 31); // sign of z
+ x = (z ^ sz) - sz; // x = abs(z)
+
+ if (x >= zbin) {
+ x += (round_ptr[rc != 0]);
+ y = ((int)(((int)(x * quant_ptr[rc != 0]) >> 16) + x))
+ >> quant_shift_ptr[rc != 0]; // quantize (x)
+ x = (y ^ sz) - sz; // get the sign back
+ qcoeff_ptr[rc] = x; // write to destination
+ dqcoeff_ptr[rc] = x * dequant_ptr[rc != 0] / mul; // dequantized value
+
+ if (y) {
+ eob = i; // last nonzero coeffs
+ zero_run = 0;
+ }
+ }
+ }
+ }
+
+ *eob_ptr = eob + 1;
+}
+
+void vp9_quantize(MACROBLOCK *mb, int plane, int block, int n_coeffs,
+ TX_TYPE tx_type) {
+ MACROBLOCKD *const xd = &mb->e_mbd;
+ const int mul = n_coeffs == 1024 ? 2 : 1;
+ const int *scan;
+
+ // These contexts may be available in the caller
+ switch (n_coeffs) {
+ case 4 * 4:
+ scan = get_scan_4x4(tx_type);
+ break;
+ case 8 * 8:
+ scan = get_scan_8x8(tx_type);
+ break;
+ case 16 * 16:
+ scan = get_scan_16x16(tx_type);
+ break;
+ default:
+ scan = vp9_default_scan_32x32;
+ break;
+ }
+
+ quantize(mb->plane[plane].zrun_zbin_boost,
+ BLOCK_OFFSET(mb->plane[plane].coeff, block, 16),
+ n_coeffs, mb->skip_block,
+ mb->plane[plane].zbin,
+ mb->plane[plane].round,
+ mb->plane[plane].quant,
+ mb->plane[plane].quant_shift,
+ BLOCK_OFFSET(xd->plane[plane].qcoeff, block, 16),
+ BLOCK_OFFSET(xd->plane[plane].dqcoeff, block, 16),
+ xd->plane[plane].dequant,
+ mb->plane[plane].zbin_extra,
+ &xd->plane[plane].eobs[block],
+ scan, mul);
+}
+
+void vp9_regular_quantize_b_4x4(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type,
+ int y_blocks) {
+ MACROBLOCKD *const xd = &mb->e_mbd;
+ const struct plane_block_idx pb_idx = plane_block_idx(y_blocks, b_idx);
+ const int *pt_scan = get_scan_4x4(tx_type);
+
+ quantize(mb->plane[pb_idx.plane].zrun_zbin_boost,
+ BLOCK_OFFSET(mb->plane[pb_idx.plane].coeff, pb_idx.block, 16),
+ 16, mb->skip_block,
+ mb->plane[pb_idx.plane].zbin,
+ mb->plane[pb_idx.plane].round,
+ mb->plane[pb_idx.plane].quant,
+ mb->plane[pb_idx.plane].quant_shift,
+ BLOCK_OFFSET(xd->plane[pb_idx.plane].qcoeff, pb_idx.block, 16),
+ BLOCK_OFFSET(xd->plane[pb_idx.plane].dqcoeff, pb_idx.block, 16),
+ xd->plane[pb_idx.plane].dequant,
+ mb->plane[pb_idx.plane].zbin_extra,
+ &xd->plane[pb_idx.plane].eobs[pb_idx.block],
+ pt_scan, 1);
+}
+
+static void invert_quant(int16_t *quant, uint8_t *shift, int d) {
+ unsigned t;
+ int l;
+ t = d;
+ for (l = 0; t > 1; l++)
+ t >>= 1;
+ t = 1 + (1 << (16 + l)) / d;
+ *quant = (int16_t)(t - (1 << 16));
+ *shift = l;
+}
+
+void vp9_init_quantizer(VP9_COMP *cpi) {
+ int i;
+ int quant_val;
+ int quant_uv_val;
+#if CONFIG_ALPHA
+ int quant_alpha_val;
+#endif
+ int q;
+
+ static const int zbin_boost[16] = { 0, 0, 0, 8, 8, 8, 10, 12,
+ 14, 16, 20, 24, 28, 32, 36, 40 };
+
+ for (q = 0; q < QINDEX_RANGE; q++) {
+ int qzbin_factor = (vp9_dc_quant(q, 0) < 148) ? 84 : 80;
+ int qrounding_factor = 48;
+ if (q == 0) {
+ qzbin_factor = 64;
+ qrounding_factor = 64;
+ }
+ // dc values
+ quant_val = vp9_dc_quant(q, cpi->common.y_dc_delta_q);
+ invert_quant(cpi->y_quant[q] + 0, cpi->y_quant_shift[q] + 0, quant_val);
+ cpi->y_zbin[q][0] = ROUND_POWER_OF_TWO(qzbin_factor * quant_val, 7);
+ cpi->y_round[q][0] = (qrounding_factor * quant_val) >> 7;
+ cpi->common.y_dequant[q][0] = quant_val;
+ cpi->zrun_zbin_boost_y[q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+ quant_val = vp9_dc_quant(q, cpi->common.uv_dc_delta_q);
+ invert_quant(cpi->uv_quant[q] + 0, cpi->uv_quant_shift[q] + 0, quant_val);
+ cpi->uv_zbin[q][0] = ROUND_POWER_OF_TWO(qzbin_factor * quant_val, 7);
+ cpi->uv_round[q][0] = (qrounding_factor * quant_val) >> 7;
+ cpi->common.uv_dequant[q][0] = quant_val;
+ cpi->zrun_zbin_boost_uv[q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+#if CONFIG_ALPHA
+ quant_val = vp9_dc_quant(q, cpi->common.a_dc_delta_q);
+ invert_quant(cpi->a_quant[q] + 0, cpi->a_quant_shift[q] + 0, quant_val);
+ cpi->a_zbin[q][0] = ROUND_POWER_OF_TWO(qzbin_factor * quant_val, 7);
+ cpi->a_round[q][0] = (qrounding_factor * quant_val) >> 7;
+ cpi->common.a_dequant[q][0] = quant_val;
+ cpi->zrun_zbin_boost_a[q][0] = (quant_val * zbin_boost[0]) >> 7;
+#endif
+
+ quant_val = vp9_ac_quant(q, 0);
+ cpi->common.y_dequant[q][1] = quant_val;
+ quant_uv_val = vp9_ac_quant(q, cpi->common.uv_ac_delta_q);
+ cpi->common.uv_dequant[q][1] = quant_uv_val;
+#if CONFIG_ALPHA
+ quant_alpha_val = vp9_ac_quant(q, cpi->common.a_ac_delta_q);
+ cpi->common.a_dequant[q][1] = quant_alpha_val;
+#endif
+ // all the 4x4 ac values =;
+ for (i = 1; i < 16; i++) {
+ int rc = vp9_default_scan_4x4[i];
+
+ invert_quant(cpi->y_quant[q] + rc, cpi->y_quant_shift[q] + rc, quant_val);
+ cpi->y_zbin[q][rc] = ROUND_POWER_OF_TWO(qzbin_factor * quant_val, 7);
+ cpi->y_round[q][rc] = (qrounding_factor * quant_val) >> 7;
+ cpi->zrun_zbin_boost_y[q][i] =
+ ROUND_POWER_OF_TWO(quant_val * zbin_boost[i], 7);
+
+ invert_quant(cpi->uv_quant[q] + rc, cpi->uv_quant_shift[q] + rc,
+ quant_uv_val);
+ cpi->uv_zbin[q][rc] = ROUND_POWER_OF_TWO(qzbin_factor * quant_uv_val, 7);
+ cpi->uv_round[q][rc] = (qrounding_factor * quant_uv_val) >> 7;
+ cpi->zrun_zbin_boost_uv[q][i] =
+ ROUND_POWER_OF_TWO(quant_uv_val * zbin_boost[i], 7);
+
+#if CONFIG_ALPHA
+ invert_quant(cpi->a_quant[q] + rc, cpi->a_quant_shift[q] + rc,
+ quant_alpha_val);
+ cpi->a_zbin[q][rc] =
+ ROUND_POWER_OF_TWO(qzbin_factor * quant_alpha_val, 7);
+ cpi->a_round[q][rc] = (qrounding_factor * quant_alpha_val) >> 7;
+ cpi->zrun_zbin_boost_a[q][i] =
+ ROUND_POWER_OF_TWO(quant_alpha_val * zbin_boost[i], 7);
+#endif
+ }
+ }
+}
+
+void vp9_mb_init_quantizer(VP9_COMP *cpi, MACROBLOCK *x) {
+ int i;
+ MACROBLOCKD *xd = &x->e_mbd;
+ int zbin_extra;
+ int segment_id = xd->mode_info_context->mbmi.segment_id;
+ const int qindex = vp9_get_qindex(xd, segment_id, cpi->common.base_qindex);
+
+ // Y
+ zbin_extra = (cpi->common.y_dequant[qindex][1] *
+ (cpi->zbin_mode_boost + x->act_zbin_adj)) >> 7;
+
+ x->plane[0].quant = cpi->y_quant[qindex];
+ x->plane[0].quant_shift = cpi->y_quant_shift[qindex];
+ x->plane[0].zbin = cpi->y_zbin[qindex];
+ x->plane[0].round = cpi->y_round[qindex];
+ x->plane[0].zrun_zbin_boost = cpi->zrun_zbin_boost_y[qindex];
+ x->plane[0].zbin_extra = (int16_t)zbin_extra;
+ x->e_mbd.plane[0].dequant = cpi->common.y_dequant[qindex];
+
+ // UV
+ zbin_extra = (cpi->common.uv_dequant[qindex][1] *
+ (cpi->zbin_mode_boost + x->act_zbin_adj)) >> 7;
+
+ for (i = 1; i < 3; i++) {
+ x->plane[i].quant = cpi->uv_quant[qindex];
+ x->plane[i].quant_shift = cpi->uv_quant_shift[qindex];
+ x->plane[i].zbin = cpi->uv_zbin[qindex];
+ x->plane[i].round = cpi->uv_round[qindex];
+ x->plane[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[qindex];
+ x->plane[i].zbin_extra = (int16_t)zbin_extra;
+ x->e_mbd.plane[i].dequant = cpi->common.uv_dequant[qindex];
+ }
+
+#if CONFIG_ALPHA
+ x->plane[3].quant = cpi->a_quant[qindex];
+ x->plane[3].quant_shift = cpi->a_quant_shift[qindex];
+ x->plane[3].zbin = cpi->a_zbin[qindex];
+ x->plane[3].round = cpi->a_round[qindex];
+ x->plane[3].zrun_zbin_boost = cpi->zrun_zbin_boost_a[qindex];
+ x->plane[3].zbin_extra = (int16_t)zbin_extra;
+ x->e_mbd.plane[3].dequant = cpi->common.a_dequant[qindex];
+#endif
+
+ x->skip_block = vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP);
+
+ /* save this macroblock QIndex for vp9_update_zbin_extra() */
+ x->e_mbd.q_index = qindex;
+}
+
+void vp9_update_zbin_extra(VP9_COMP *cpi, MACROBLOCK *x) {
+ const int qindex = x->e_mbd.q_index;
+ const int y_zbin_extra = (cpi->common.y_dequant[qindex][1] *
+ (cpi->zbin_mode_boost + x->act_zbin_adj)) >> 7;
+ const int uv_zbin_extra = (cpi->common.uv_dequant[qindex][1] *
+ (cpi->zbin_mode_boost + x->act_zbin_adj)) >> 7;
+
+ x->plane[0].zbin_extra = (int16_t)y_zbin_extra;
+ x->plane[1].zbin_extra = (int16_t)uv_zbin_extra;
+ x->plane[2].zbin_extra = (int16_t)uv_zbin_extra;
+}
+
+void vp9_frame_init_quantizer(VP9_COMP *cpi) {
+ // Clear Zbin mode boost for default case
+ cpi->zbin_mode_boost = 0;
+
+ // MB level quantizer setup
+ vp9_mb_init_quantizer(cpi, &cpi->mb);
+}
+
+void vp9_set_quantizer(struct VP9_COMP *cpi, int Q) {
+ VP9_COMMON *cm = &cpi->common;
+
+ cm->base_qindex = Q;
+
+ // if any of the delta_q values are changing update flag will
+ // have to be set.
+ cm->y_dc_delta_q = 0;
+ cm->uv_dc_delta_q = 0;
+ cm->uv_ac_delta_q = 0;
+
+ // quantizer has to be reinitialized if any delta_q changes.
+ // As there are not any here for now this is inactive code.
+ // if(update)
+ // vp9_init_quantizer(cpi);
+}
diff --git a/libvpx/vp9/encoder/vp9_quantize.h b/libvpx/vp9/encoder/vp9_quantize.h
new file mode 100644
index 0000000..2b1eeab
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_quantize.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_ENCODER_VP9_QUANTIZE_H_
+#define VP9_ENCODER_VP9_QUANTIZE_H_
+
+#include "vp9/encoder/vp9_block.h"
+
+#define prototype_quantize_block(sym) \
+ void (sym)(MACROBLOCK *mb, int b_idx)
+
+#define prototype_quantize_block_pair(sym) \
+ void (sym)(MACROBLOCK *mb, int b_idx1, int b_idx2)
+
+#define prototype_quantize_mb(sym) \
+ void (sym)(MACROBLOCK *x)
+
+void vp9_quantize(MACROBLOCK *mb, int plane, int block, int n_coefs,
+ TX_TYPE tx_type);
+
+void vp9_regular_quantize_b_4x4_pair(MACROBLOCK *mb, int b_idx1, int b_idx2,
+ int y_blocks);
+void vp9_regular_quantize_b_4x4(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type,
+ int y_blocks);
+void vp9_regular_quantize_b_8x8(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type,
+ int y_blocks);
+struct VP9_COMP;
+
+extern void vp9_set_quantizer(struct VP9_COMP *cpi, int Q);
+
+extern void vp9_frame_init_quantizer(struct VP9_COMP *cpi);
+
+extern void vp9_update_zbin_extra(struct VP9_COMP *cpi, MACROBLOCK *x);
+
+extern void vp9_mb_init_quantizer(struct VP9_COMP *cpi, MACROBLOCK *x);
+
+extern void vp9_init_quantizer(struct VP9_COMP *cpi);
+
+#endif // VP9_ENCODER_VP9_QUANTIZE_H_
diff --git a/libvpx/vp9/encoder/vp9_ratectrl.c b/libvpx/vp9/encoder/vp9_ratectrl.c
new file mode 100644
index 0000000..430d3a8
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_ratectrl.c
@@ -0,0 +1,550 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <limits.h>
+#include <assert.h>
+#include <math.h>
+
+#include "vp9/common/vp9_alloccommon.h"
+#include "vp9/common/vp9_modecont.h"
+#include "vp9/common/vp9_common.h"
+#include "vp9/encoder/vp9_ratectrl.h"
+#include "vp9/common/vp9_entropymode.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp9/common/vp9_systemdependent.h"
+#include "vp9/encoder/vp9_encodemv.h"
+#include "vp9/common/vp9_quant_common.h"
+#include "vp9/common/vp9_seg_common.h"
+
+#define MIN_BPB_FACTOR 0.005
+#define MAX_BPB_FACTOR 50
+
+// Bits Per MB at different Q (Multiplied by 512)
+#define BPER_MB_NORMBITS 9
+
+// % adjustment to target kf size based on seperation from previous frame
+static const int kf_boost_seperation_adjustment[16] = {
+ 30, 40, 50, 55, 60, 65, 70, 75,
+ 80, 85, 90, 95, 100, 100, 100, 100,
+};
+
+static const int gf_adjust_table[101] = {
+ 100,
+ 115, 130, 145, 160, 175, 190, 200, 210, 220, 230,
+ 240, 260, 270, 280, 290, 300, 310, 320, 330, 340,
+ 350, 360, 370, 380, 390, 400, 400, 400, 400, 400,
+ 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
+ 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
+ 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
+ 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
+ 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
+ 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
+ 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
+};
+
+static const int gf_intra_usage_adjustment[20] = {
+ 125, 120, 115, 110, 105, 100, 95, 85, 80, 75,
+ 70, 65, 60, 55, 50, 50, 50, 50, 50, 50,
+};
+
+static const int gf_interval_table[101] = {
+ 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+};
+
+static const unsigned int prior_key_frame_weight[KEY_FRAME_CONTEXT] = { 1, 2, 3, 4, 5 };
+
+// These functions use formulaic calculations to make playing with the
+// quantizer tables easier. If necessary they can be replaced by lookup
+// tables if and when things settle down in the experimental bitstream
+double vp9_convert_qindex_to_q(int qindex) {
+ // Convert the index to a real Q value (scaled down to match old Q values)
+ return vp9_ac_quant(qindex, 0) / 4.0;
+}
+
+int vp9_gfboost_qadjust(int qindex) {
+ const double q = vp9_convert_qindex_to_q(qindex);
+ return (int)((0.00000828 * q * q * q) +
+ (-0.0055 * q * q) +
+ (1.32 * q) + 79.3);
+}
+
+static int kfboost_qadjust(int qindex) {
+ const double q = vp9_convert_qindex_to_q(qindex);
+ return (int)((0.00000973 * q * q * q) +
+ (-0.00613 * q * q) +
+ (1.316 * q) + 121.2);
+}
+
+int vp9_bits_per_mb(FRAME_TYPE frame_type, int qindex,
+ double correction_factor) {
+
+ const double q = vp9_convert_qindex_to_q(qindex);
+ int enumerator = frame_type == KEY_FRAME ? 4000000 : 2500000;
+
+ // q based adjustment to baseline enumerator
+ enumerator += (int)(enumerator * q) >> 12;
+ return (int)(0.5 + (enumerator * correction_factor / q));
+}
+
+void vp9_save_coding_context(VP9_COMP *cpi) {
+ CODING_CONTEXT *const cc = &cpi->coding_context;
+ VP9_COMMON *cm = &cpi->common;
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+
+ // Stores a snapshot of key state variables which can subsequently be
+ // restored with a call to vp9_restore_coding_context. These functions are
+ // intended for use in a re-code loop in vp9_compress_frame where the
+ // quantizer value is adjusted between loop iterations.
+
+ cc->nmvc = cm->fc.nmvc;
+ vp9_copy(cc->nmvjointcost, cpi->mb.nmvjointcost);
+ vp9_copy(cc->nmvcosts, cpi->mb.nmvcosts);
+ vp9_copy(cc->nmvcosts_hp, cpi->mb.nmvcosts_hp);
+
+ vp9_copy(cc->inter_mode_probs, cm->fc.inter_mode_probs);
+
+ vp9_copy(cc->y_mode_prob, cm->fc.y_mode_prob);
+ vp9_copy(cc->uv_mode_prob, cm->fc.uv_mode_prob);
+ vp9_copy(cc->partition_prob, cm->fc.partition_prob);
+
+ vp9_copy(cc->segment_pred_probs, cm->segment_pred_probs);
+
+ vp9_copy(cc->intra_inter_prob, cm->fc.intra_inter_prob);
+ vp9_copy(cc->comp_inter_prob, cm->fc.comp_inter_prob);
+ vp9_copy(cc->single_ref_prob, cm->fc.single_ref_prob);
+ vp9_copy(cc->comp_ref_prob, cm->fc.comp_ref_prob);
+
+ vpx_memcpy(cpi->coding_context.last_frame_seg_map_copy,
+ cm->last_frame_seg_map, (cm->mi_rows * cm->mi_cols));
+
+ vp9_copy(cc->last_ref_lf_deltas, xd->last_ref_lf_deltas);
+ vp9_copy(cc->last_mode_lf_deltas, xd->last_mode_lf_deltas);
+
+ vp9_copy(cc->coef_probs, cm->fc.coef_probs);
+ vp9_copy(cc->switchable_interp_prob, cm->fc.switchable_interp_prob);
+ vp9_copy(cc->tx_probs_8x8p, cm->fc.tx_probs_8x8p);
+ vp9_copy(cc->tx_probs_16x16p, cm->fc.tx_probs_16x16p);
+ vp9_copy(cc->tx_probs_32x32p, cm->fc.tx_probs_32x32p);
+ vp9_copy(cc->mbskip_probs, cm->fc.mbskip_probs);
+}
+
+void vp9_restore_coding_context(VP9_COMP *cpi) {
+ CODING_CONTEXT *const cc = &cpi->coding_context;
+ VP9_COMMON *cm = &cpi->common;
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+
+ // Restore key state variables to the snapshot state stored in the
+ // previous call to vp9_save_coding_context.
+
+ cm->fc.nmvc = cc->nmvc;
+ vp9_copy(cpi->mb.nmvjointcost, cc->nmvjointcost);
+ vp9_copy(cpi->mb.nmvcosts, cc->nmvcosts);
+ vp9_copy(cpi->mb.nmvcosts_hp, cc->nmvcosts_hp);
+
+ vp9_copy(cm->fc.inter_mode_probs, cc->inter_mode_probs);
+
+ vp9_copy(cm->fc.y_mode_prob, cc->y_mode_prob);
+ vp9_copy(cm->fc.uv_mode_prob, cc->uv_mode_prob);
+ vp9_copy(cm->fc.partition_prob, cc->partition_prob);
+
+ vp9_copy(cm->segment_pred_probs, cc->segment_pred_probs);
+
+ vp9_copy(cm->fc.intra_inter_prob, cc->intra_inter_prob);
+ vp9_copy(cm->fc.comp_inter_prob, cc->comp_inter_prob);
+ vp9_copy(cm->fc.single_ref_prob, cc->single_ref_prob);
+ vp9_copy(cm->fc.comp_ref_prob, cc->comp_ref_prob);
+
+ vpx_memcpy(cm->last_frame_seg_map,
+ cpi->coding_context.last_frame_seg_map_copy,
+ (cm->mi_rows * cm->mi_cols));
+
+ vp9_copy(xd->last_ref_lf_deltas, cc->last_ref_lf_deltas);
+ vp9_copy(xd->last_mode_lf_deltas, cc->last_mode_lf_deltas);
+
+ vp9_copy(cm->fc.coef_probs, cc->coef_probs);
+ vp9_copy(cm->fc.switchable_interp_prob, cc->switchable_interp_prob);
+ vp9_copy(cm->fc.tx_probs_8x8p, cc->tx_probs_8x8p);
+ vp9_copy(cm->fc.tx_probs_16x16p, cc->tx_probs_16x16p);
+ vp9_copy(cm->fc.tx_probs_32x32p, cc->tx_probs_32x32p);
+ vp9_copy(cm->fc.mbskip_probs, cc->mbskip_probs);
+}
+
+void vp9_setup_key_frame(VP9_COMP *cpi) {
+ VP9_COMMON *cm = &cpi->common;
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+
+ vp9_setup_past_independence(cm, xd);
+
+ // interval before next GF
+ cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
+ /* All buffers are implicitly updated on key frames. */
+ cpi->refresh_golden_frame = 1;
+ cpi->refresh_alt_ref_frame = 1;
+}
+
+void vp9_setup_inter_frame(VP9_COMP *cpi) {
+ VP9_COMMON *cm = &cpi->common;
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+ if (cm->error_resilient_mode || cm->intra_only)
+ vp9_setup_past_independence(cm, xd);
+
+ assert(cm->frame_context_idx < NUM_FRAME_CONTEXTS);
+ cm->fc = cm->frame_contexts[cm->frame_context_idx];
+}
+
+static int estimate_bits_at_q(int frame_kind, int q, int mbs,
+ double correction_factor) {
+ const int bpm = (int)(vp9_bits_per_mb(frame_kind, q, correction_factor));
+
+ // Attempt to retain reasonable accuracy without overflow. The cutoff is
+ // chosen such that the maximum product of Bpm and MBs fits 31 bits. The
+ // largest Bpm takes 20 bits.
+ return (mbs > (1 << 11)) ? (bpm >> BPER_MB_NORMBITS) * mbs
+ : (bpm * mbs) >> BPER_MB_NORMBITS;
+}
+
+
+static void calc_iframe_target_size(VP9_COMP *cpi) {
+ // boost defaults to half second
+ int target;
+
+ // Clear down mmx registers to allow floating point in what follows
+ vp9_clear_system_state(); // __asm emms;
+
+ // New Two pass RC
+ target = cpi->per_frame_bandwidth;
+
+ if (cpi->oxcf.rc_max_intra_bitrate_pct) {
+ int max_rate = cpi->per_frame_bandwidth
+ * cpi->oxcf.rc_max_intra_bitrate_pct / 100;
+
+ if (target > max_rate)
+ target = max_rate;
+ }
+
+ cpi->this_frame_target = target;
+}
+
+
+// Do the best we can to define the parameters for the next GF based
+// on what information we have available.
+//
+// In this experimental code only two pass is supported
+// so we just use the interval determined in the two pass code.
+static void calc_gf_params(VP9_COMP *cpi) {
+ // Set the gf interval
+ cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
+}
+
+
+static void calc_pframe_target_size(VP9_COMP *cpi) {
+ const int min_frame_target = MAX(cpi->min_frame_bandwidth,
+ cpi->av_per_frame_bandwidth >> 5);
+ if (cpi->refresh_alt_ref_frame) {
+ // Special alt reference frame case
+ // Per frame bit target for the alt ref frame
+ cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
+ cpi->this_frame_target = cpi->per_frame_bandwidth;
+ } else {
+ // Normal frames (gf,and inter)
+ cpi->this_frame_target = cpi->per_frame_bandwidth;
+ }
+
+ // Sanity check that the total sum of adjustments is not above the maximum allowed
+ // That is that having allowed for KF and GF penalties we have not pushed the
+ // current interframe target to low. If the adjustment we apply here is not capable of recovering
+ // all the extra bits we have spent in the KF or GF then the remainder will have to be recovered over
+ // a longer time span via other buffer / rate control mechanisms.
+ if (cpi->this_frame_target < min_frame_target)
+ cpi->this_frame_target = min_frame_target;
+
+ if (!cpi->refresh_alt_ref_frame)
+ // Note the baseline target data rate for this inter frame.
+ cpi->inter_frame_target = cpi->this_frame_target;
+
+ // Adjust target frame size for Golden Frames:
+ if (cpi->frames_till_gf_update_due == 0) {
+ const int q = (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME]
+ : cpi->oxcf.fixed_q;
+
+ cpi->refresh_golden_frame = 1;
+
+ calc_gf_params(cpi);
+
+ // If we are using alternate ref instead of gf then do not apply the boost
+ // It will instead be applied to the altref update
+ // Jims modified boost
+ if (!cpi->source_alt_ref_active) {
+ if (cpi->oxcf.fixed_q < 0) {
+ // The spend on the GF is defined in the two pass code
+ // for two pass encodes
+ cpi->this_frame_target = cpi->per_frame_bandwidth;
+ } else {
+ cpi->this_frame_target =
+ (estimate_bits_at_q(1, q, cpi->common.MBs, 1.0)
+ * cpi->last_boost) / 100;
+ }
+ } else {
+ // If there is an active ARF at this location use the minimum
+ // bits on this frame even if it is a constructed arf.
+ // The active maximum quantizer insures that an appropriate
+ // number of bits will be spent if needed for constructed ARFs.
+ cpi->this_frame_target = 0;
+ }
+ }
+}
+
+
+void vp9_update_rate_correction_factors(VP9_COMP *cpi, int damp_var) {
+ const int q = cpi->common.base_qindex;
+ int correction_factor = 100;
+ double rate_correction_factor;
+ double adjustment_limit;
+
+ int projected_size_based_on_q = 0;
+
+ // Clear down mmx registers to allow floating point in what follows
+ vp9_clear_system_state(); // __asm emms;
+
+ if (cpi->common.frame_type == KEY_FRAME) {
+ rate_correction_factor = cpi->key_frame_rate_correction_factor;
+ } else {
+ if (cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame)
+ rate_correction_factor = cpi->gf_rate_correction_factor;
+ else
+ rate_correction_factor = cpi->rate_correction_factor;
+ }
+
+ // Work out how big we would have expected the frame to be at this Q given
+ // the current correction factor.
+ // Stay in double to avoid int overflow when values are large
+ projected_size_based_on_q = estimate_bits_at_q(cpi->common.frame_type, q,
+ cpi->common.MBs,
+ rate_correction_factor);
+
+ // Work out a size correction factor.
+ // if ( cpi->this_frame_target > 0 )
+ // correction_factor = (100 * cpi->projected_frame_size) / cpi->this_frame_target;
+ if (projected_size_based_on_q > 0)
+ correction_factor = (100 * cpi->projected_frame_size) / projected_size_based_on_q;
+
+ // More heavily damped adjustment used if we have been oscillating either side of target
+ switch (damp_var) {
+ case 0:
+ adjustment_limit = 0.75;
+ break;
+ case 1:
+ adjustment_limit = 0.375;
+ break;
+ case 2:
+ default:
+ adjustment_limit = 0.25;
+ break;
+ }
+
+ // if ( (correction_factor > 102) && (Q < cpi->active_worst_quality) )
+ if (correction_factor > 102) {
+ // We are not already at the worst allowable quality
+ correction_factor = (int)(100.5 + ((correction_factor - 100) * adjustment_limit));
+ rate_correction_factor = ((rate_correction_factor * correction_factor) / 100);
+
+ // Keep rate_correction_factor within limits
+ if (rate_correction_factor > MAX_BPB_FACTOR)
+ rate_correction_factor = MAX_BPB_FACTOR;
+ }
+ // else if ( (correction_factor < 99) && (Q > cpi->active_best_quality) )
+ else if (correction_factor < 99) {
+ // We are not already at the best allowable quality
+ correction_factor = (int)(100.5 - ((100 - correction_factor) * adjustment_limit));
+ rate_correction_factor = ((rate_correction_factor * correction_factor) / 100);
+
+ // Keep rate_correction_factor within limits
+ if (rate_correction_factor < MIN_BPB_FACTOR)
+ rate_correction_factor = MIN_BPB_FACTOR;
+ }
+
+ if (cpi->common.frame_type == KEY_FRAME)
+ cpi->key_frame_rate_correction_factor = rate_correction_factor;
+ else {
+ if (cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame)
+ cpi->gf_rate_correction_factor = rate_correction_factor;
+ else
+ cpi->rate_correction_factor = rate_correction_factor;
+ }
+}
+
+
+int vp9_regulate_q(VP9_COMP *cpi, int target_bits_per_frame) {
+ int q = cpi->active_worst_quality;
+
+ int i;
+ int last_error = INT_MAX;
+ int target_bits_per_mb;
+ int bits_per_mb_at_this_q;
+ double correction_factor;
+
+ // Select the appropriate correction factor based upon type of frame.
+ if (cpi->common.frame_type == KEY_FRAME)
+ correction_factor = cpi->key_frame_rate_correction_factor;
+ else {
+ if (cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame)
+ correction_factor = cpi->gf_rate_correction_factor;
+ else
+ correction_factor = cpi->rate_correction_factor;
+ }
+
+ // Calculate required scaling factor based on target frame size and size of frame produced using previous Q
+ if (target_bits_per_frame >= (INT_MAX >> BPER_MB_NORMBITS))
+ target_bits_per_mb = (target_bits_per_frame / cpi->common.MBs) << BPER_MB_NORMBITS; // Case where we would overflow int
+ else
+ target_bits_per_mb = (target_bits_per_frame << BPER_MB_NORMBITS) / cpi->common.MBs;
+
+ i = cpi->active_best_quality;
+
+ do {
+ bits_per_mb_at_this_q = (int)vp9_bits_per_mb(cpi->common.frame_type, i,
+ correction_factor);
+
+ if (bits_per_mb_at_this_q <= target_bits_per_mb) {
+ if ((target_bits_per_mb - bits_per_mb_at_this_q) <= last_error)
+ q = i;
+ else
+ q = i - 1;
+
+ break;
+ } else {
+ last_error = bits_per_mb_at_this_q - target_bits_per_mb;
+ }
+ } while (++i <= cpi->active_worst_quality);
+
+ return q;
+}
+
+
+static int estimate_keyframe_frequency(VP9_COMP *cpi) {
+ int i;
+
+ // Average key frame frequency
+ int av_key_frame_frequency = 0;
+
+ /* First key frame at start of sequence is a special case. We have no
+ * frequency data.
+ */
+ if (cpi->key_frame_count == 1) {
+ /* Assume a default of 1 kf every 2 seconds, or the max kf interval,
+ * whichever is smaller.
+ */
+ int key_freq = cpi->oxcf.key_freq > 0 ? cpi->oxcf.key_freq : 1;
+ av_key_frame_frequency = (int)cpi->output_frame_rate * 2;
+
+ if (cpi->oxcf.auto_key && av_key_frame_frequency > key_freq)
+ av_key_frame_frequency = cpi->oxcf.key_freq;
+
+ cpi->prior_key_frame_distance[KEY_FRAME_CONTEXT - 1]
+ = av_key_frame_frequency;
+ } else {
+ unsigned int total_weight = 0;
+ int last_kf_interval =
+ (cpi->frames_since_key > 0) ? cpi->frames_since_key : 1;
+
+ /* reset keyframe context and calculate weighted average of last
+ * KEY_FRAME_CONTEXT keyframes
+ */
+ for (i = 0; i < KEY_FRAME_CONTEXT; i++) {
+ if (i < KEY_FRAME_CONTEXT - 1)
+ cpi->prior_key_frame_distance[i]
+ = cpi->prior_key_frame_distance[i + 1];
+ else
+ cpi->prior_key_frame_distance[i] = last_kf_interval;
+
+ av_key_frame_frequency += prior_key_frame_weight[i]
+ * cpi->prior_key_frame_distance[i];
+ total_weight += prior_key_frame_weight[i];
+ }
+
+ av_key_frame_frequency /= total_weight;
+
+ }
+ return av_key_frame_frequency;
+}
+
+
+void vp9_adjust_key_frame_context(VP9_COMP *cpi) {
+ // Clear down mmx registers to allow floating point in what follows
+ vp9_clear_system_state();
+
+ cpi->frames_since_key = 0;
+ cpi->key_frame_count++;
+}
+
+
+void vp9_compute_frame_size_bounds(VP9_COMP *cpi, int *frame_under_shoot_limit,
+ int *frame_over_shoot_limit) {
+ // Set-up bounds on acceptable frame size:
+ if (cpi->oxcf.fixed_q >= 0) {
+ // Fixed Q scenario: frame size never outranges target (there is no target!)
+ *frame_under_shoot_limit = 0;
+ *frame_over_shoot_limit = INT_MAX;
+ } else {
+ if (cpi->common.frame_type == KEY_FRAME) {
+ *frame_over_shoot_limit = cpi->this_frame_target * 9 / 8;
+ *frame_under_shoot_limit = cpi->this_frame_target * 7 / 8;
+ } else {
+ if (cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) {
+ *frame_over_shoot_limit = cpi->this_frame_target * 9 / 8;
+ *frame_under_shoot_limit = cpi->this_frame_target * 7 / 8;
+ } else {
+ // Stron overshoot limit for constrained quality
+ if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
+ *frame_over_shoot_limit = cpi->this_frame_target * 11 / 8;
+ *frame_under_shoot_limit = cpi->this_frame_target * 2 / 8;
+ } else {
+ *frame_over_shoot_limit = cpi->this_frame_target * 11 / 8;
+ *frame_under_shoot_limit = cpi->this_frame_target * 5 / 8;
+ }
+ }
+ }
+
+ // For very small rate targets where the fractional adjustment
+ // (eg * 7/8) may be tiny make sure there is at least a minimum
+ // range.
+ *frame_over_shoot_limit += 200;
+ *frame_under_shoot_limit -= 200;
+ if (*frame_under_shoot_limit < 0)
+ *frame_under_shoot_limit = 0;
+ }
+}
+
+
+// return of 0 means drop frame
+int vp9_pick_frame_size(VP9_COMP *cpi) {
+ VP9_COMMON *cm = &cpi->common;
+
+ if (cm->frame_type == KEY_FRAME)
+ calc_iframe_target_size(cpi);
+ else
+ calc_pframe_target_size(cpi);
+
+ return 1;
+}
diff --git a/libvpx/vp9/encoder/vp9_ratectrl.h b/libvpx/vp9/encoder/vp9_ratectrl.h
new file mode 100644
index 0000000..4733176
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_ratectrl.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_VP9_RATECTRL_H_
+#define VP9_ENCODER_VP9_RATECTRL_H_
+
+#include "vp9/encoder/vp9_onyx_int.h"
+
+#define FRAME_OVERHEAD_BITS 200
+
+void vp9_save_coding_context(VP9_COMP *cpi);
+void vp9_restore_coding_context(VP9_COMP *cpi);
+
+void vp9_setup_key_frame(VP9_COMP *cpi);
+void vp9_update_rate_correction_factors(VP9_COMP *cpi, int damp_var);
+int vp9_regulate_q(VP9_COMP *cpi, int target_bits_per_frame);
+void vp9_adjust_key_frame_context(VP9_COMP *cpi);
+void vp9_compute_frame_size_bounds(VP9_COMP *cpi,
+ int *frame_under_shoot_limit,
+ int *frame_over_shoot_limit);
+
+// return of 0 means drop frame
+int vp9_pick_frame_size(VP9_COMP *cpi);
+
+double vp9_convert_qindex_to_q(int qindex);
+int vp9_gfboost_qadjust(int qindex);
+extern int vp9_bits_per_mb(FRAME_TYPE frame_type, int qindex,
+ double correction_factor);
+void vp9_setup_inter_frame(VP9_COMP *cpi);
+
+#endif // VP9_ENCODER_VP9_RATECTRL_H_
diff --git a/libvpx/vp9/encoder/vp9_rdopt.c b/libvpx/vp9/encoder/vp9_rdopt.c
new file mode 100644
index 0000000..9cb7ab0
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_rdopt.c
@@ -0,0 +1,3255 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include <stdio.h>
+#include <math.h>
+#include <limits.h>
+#include <assert.h>
+
+#include "vp9/common/vp9_pragmas.h"
+#include "vp9/encoder/vp9_tokenize.h"
+#include "vp9/encoder/vp9_treewriter.h"
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vp9/encoder/vp9_modecosts.h"
+#include "vp9/encoder/vp9_encodeintra.h"
+#include "vp9/common/vp9_entropymode.h"
+#include "vp9/common/vp9_reconinter.h"
+#include "vp9/common/vp9_reconintra.h"
+#include "vp9/common/vp9_findnearmv.h"
+#include "vp9/common/vp9_quant_common.h"
+#include "vp9/encoder/vp9_encodemb.h"
+#include "vp9/encoder/vp9_quantize.h"
+#include "vp9/encoder/vp9_variance.h"
+#include "vp9/encoder/vp9_mcomp.h"
+#include "vp9/encoder/vp9_rdopt.h"
+#include "vp9/encoder/vp9_ratectrl.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp9/common/vp9_systemdependent.h"
+#include "vp9/encoder/vp9_encodemv.h"
+#include "vp9/common/vp9_seg_common.h"
+#include "vp9/common/vp9_pred_common.h"
+#include "vp9/common/vp9_entropy.h"
+#include "vp9_rtcd.h"
+#include "vp9/common/vp9_mvref_common.h"
+#include "vp9/common/vp9_common.h"
+
+#define INVALID_MV 0x80008000
+
+/* Factor to weigh the rate for switchable interp filters */
+#define SWITCHABLE_INTERP_RATE_FACTOR 1
+
+DECLARE_ALIGNED(16, extern const uint8_t,
+ vp9_pt_energy_class[MAX_ENTROPY_TOKENS]);
+
+#define I4X4_PRED 0x8000
+#define SPLITMV 0x10000
+
+const MODE_DEFINITION vp9_mode_order[MAX_MODES] = {
+ {ZEROMV, LAST_FRAME, NONE},
+ {DC_PRED, INTRA_FRAME, NONE},
+
+ {NEARESTMV, LAST_FRAME, NONE},
+ {NEARMV, LAST_FRAME, NONE},
+
+ {ZEROMV, GOLDEN_FRAME, NONE},
+ {NEARESTMV, GOLDEN_FRAME, NONE},
+
+ {ZEROMV, ALTREF_FRAME, NONE},
+ {NEARESTMV, ALTREF_FRAME, NONE},
+
+ {NEARMV, GOLDEN_FRAME, NONE},
+ {NEARMV, ALTREF_FRAME, NONE},
+
+ {V_PRED, INTRA_FRAME, NONE},
+ {H_PRED, INTRA_FRAME, NONE},
+ {D45_PRED, INTRA_FRAME, NONE},
+ {D135_PRED, INTRA_FRAME, NONE},
+ {D117_PRED, INTRA_FRAME, NONE},
+ {D153_PRED, INTRA_FRAME, NONE},
+ {D27_PRED, INTRA_FRAME, NONE},
+ {D63_PRED, INTRA_FRAME, NONE},
+
+ {TM_PRED, INTRA_FRAME, NONE},
+
+ {NEWMV, LAST_FRAME, NONE},
+ {NEWMV, GOLDEN_FRAME, NONE},
+ {NEWMV, ALTREF_FRAME, NONE},
+
+ {SPLITMV, LAST_FRAME, NONE},
+ {SPLITMV, GOLDEN_FRAME, NONE},
+ {SPLITMV, ALTREF_FRAME, NONE},
+
+ {I4X4_PRED, INTRA_FRAME, NONE},
+
+ /* compound prediction modes */
+ {ZEROMV, LAST_FRAME, ALTREF_FRAME},
+ {NEARESTMV, LAST_FRAME, ALTREF_FRAME},
+ {NEARMV, LAST_FRAME, ALTREF_FRAME},
+
+ {ZEROMV, GOLDEN_FRAME, ALTREF_FRAME},
+ {NEARESTMV, GOLDEN_FRAME, ALTREF_FRAME},
+ {NEARMV, GOLDEN_FRAME, ALTREF_FRAME},
+
+ {NEWMV, LAST_FRAME, ALTREF_FRAME},
+ {NEWMV, GOLDEN_FRAME, ALTREF_FRAME},
+
+ {SPLITMV, LAST_FRAME, ALTREF_FRAME},
+ {SPLITMV, GOLDEN_FRAME, ALTREF_FRAME},
+};
+
+// The baseline rd thresholds for breaking out of the rd loop for
+// certain modes are assumed to be based on 8x8 blocks.
+// This table is used to correct for blocks size.
+// The factors here are << 2 (2 = x0.5, 32 = x8 etc).
+static int rd_thresh_block_size_factor[BLOCK_SIZE_TYPES] =
+ {2, 3, 3, 4, 6, 6, 8, 12, 12, 16, 24, 24, 32};
+
+#define BASE_RD_THRESH_FREQ_FACT 16
+#define MAX_RD_THRESH_FREQ_FACT 32
+#define MAX_RD_THRESH_FREQ_INC 1
+
+static void fill_token_costs(vp9_coeff_count (*c)[BLOCK_TYPES],
+ vp9_coeff_count (*cnoskip)[BLOCK_TYPES],
+ vp9_coeff_probs_model (*p)[BLOCK_TYPES]) {
+ int i, j, k, l;
+ TX_SIZE t;
+ for (t = TX_4X4; t <= TX_32X32; t++)
+ for (i = 0; i < BLOCK_TYPES; i++)
+ for (j = 0; j < REF_TYPES; j++)
+ for (k = 0; k < COEF_BANDS; k++)
+ for (l = 0; l < PREV_COEF_CONTEXTS; l++) {
+ vp9_prob probs[ENTROPY_NODES];
+ vp9_model_to_full_probs(p[t][i][j][k][l], probs);
+ vp9_cost_tokens((int *)cnoskip[t][i][j][k][l], probs,
+ vp9_coef_tree);
+#if CONFIG_BALANCED_COEFTREE
+ // Replace the eob node prob with a very small value so that the
+ // cost approximately equals the cost without the eob node
+ probs[1] = 1;
+ vp9_cost_tokens((int *)c[t][i][j][k][l], probs, vp9_coef_tree);
+#else
+ vp9_cost_tokens_skip((int *)c[t][i][j][k][l], probs,
+ vp9_coef_tree);
+ assert(c[t][i][j][k][l][DCT_EOB_TOKEN] ==
+ cnoskip[t][i][j][k][l][DCT_EOB_TOKEN]);
+#endif
+ }
+}
+
+static int rd_iifactor[32] = { 4, 4, 3, 2, 1, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, };
+
+// 3* dc_qlookup[Q]*dc_qlookup[Q];
+
+/* values are now correlated to quantizer */
+static int sad_per_bit16lut[QINDEX_RANGE];
+static int sad_per_bit4lut[QINDEX_RANGE];
+
+void vp9_init_me_luts() {
+ int i;
+
+ // Initialize the sad lut tables using a formulaic calculation for now
+ // This is to make it easier to resolve the impact of experimental changes
+ // to the quantizer tables.
+ for (i = 0; i < QINDEX_RANGE; i++) {
+ sad_per_bit16lut[i] =
+ (int)((0.0418 * vp9_convert_qindex_to_q(i)) + 2.4107);
+ sad_per_bit4lut[i] = (int)(0.063 * vp9_convert_qindex_to_q(i) + 2.742);
+ }
+}
+
+static int compute_rd_mult(int qindex) {
+ const int q = vp9_dc_quant(qindex, 0);
+ return (11 * q * q) >> 2;
+}
+
+void vp9_initialize_me_consts(VP9_COMP *cpi, int qindex) {
+ cpi->mb.sadperbit16 = sad_per_bit16lut[qindex];
+ cpi->mb.sadperbit4 = sad_per_bit4lut[qindex];
+}
+
+
+void vp9_initialize_rd_consts(VP9_COMP *cpi, int qindex) {
+ int q, i, bsize;
+
+ vp9_clear_system_state(); // __asm emms;
+
+ // Further tests required to see if optimum is different
+ // for key frames, golden frames and arf frames.
+ // if (cpi->common.refresh_golden_frame ||
+ // cpi->common.refresh_alt_ref_frame)
+ qindex = clamp(qindex, 0, MAXQ);
+
+ cpi->RDMULT = compute_rd_mult(qindex);
+ if (cpi->pass == 2 && (cpi->common.frame_type != KEY_FRAME)) {
+ if (cpi->twopass.next_iiratio > 31)
+ cpi->RDMULT += (cpi->RDMULT * rd_iifactor[31]) >> 4;
+ else
+ cpi->RDMULT +=
+ (cpi->RDMULT * rd_iifactor[cpi->twopass.next_iiratio]) >> 4;
+ }
+ cpi->mb.errorperbit = cpi->RDMULT >> 6;
+ cpi->mb.errorperbit += (cpi->mb.errorperbit == 0);
+
+ vp9_set_speed_features(cpi);
+
+ q = (int)pow(vp9_dc_quant(qindex, 0) >> 2, 1.25);
+ q <<= 2;
+ if (q < 8)
+ q = 8;
+
+ if (cpi->RDMULT > 1000) {
+ cpi->RDDIV = 1;
+ cpi->RDMULT /= 100;
+
+ for (bsize = 0; bsize < BLOCK_SIZE_TYPES; ++bsize) {
+ for (i = 0; i < MAX_MODES; ++i) {
+ // Threshold here seem unecessarily harsh but fine given actual
+ // range of values used for cpi->sf.thresh_mult[]
+ int thresh_max = INT_MAX / (q * rd_thresh_block_size_factor[bsize]);
+
+ // *4 relates to the scaling of rd_thresh_block_size_factor[]
+ if ((int64_t)cpi->sf.thresh_mult[i] < thresh_max) {
+ cpi->rd_threshes[bsize][i] =
+ cpi->sf.thresh_mult[i] * q *
+ rd_thresh_block_size_factor[bsize] / (4 * 100);
+ } else {
+ cpi->rd_threshes[bsize][i] = INT_MAX;
+ }
+ cpi->rd_baseline_thresh[bsize][i] = cpi->rd_threshes[bsize][i];
+ cpi->rd_thresh_freq_fact[bsize][i] = BASE_RD_THRESH_FREQ_FACT;
+ }
+ }
+ } else {
+ cpi->RDDIV = 100;
+
+ for (bsize = 0; bsize < BLOCK_SIZE_TYPES; ++bsize) {
+ for (i = 0; i < MAX_MODES; i++) {
+ // Threshold here seem unecessarily harsh but fine given actual
+ // range of values used for cpi->sf.thresh_mult[]
+ int thresh_max = INT_MAX / (q * rd_thresh_block_size_factor[bsize]);
+
+ if (cpi->sf.thresh_mult[i] < thresh_max) {
+ cpi->rd_threshes[bsize][i] =
+ cpi->sf.thresh_mult[i] * q *
+ rd_thresh_block_size_factor[bsize] / 4;
+ } else {
+ cpi->rd_threshes[bsize][i] = INT_MAX;
+ }
+ cpi->rd_baseline_thresh[bsize][i] = cpi->rd_threshes[bsize][i];
+ cpi->rd_thresh_freq_fact[bsize][i] = BASE_RD_THRESH_FREQ_FACT;
+ }
+ }
+ }
+
+ fill_token_costs(cpi->mb.token_costs,
+ cpi->mb.token_costs_noskip,
+ cpi->common.fc.coef_probs);
+
+ for (i = 0; i < NUM_PARTITION_CONTEXTS; i++)
+ vp9_cost_tokens(cpi->mb.partition_cost[i],
+ cpi->common.fc.partition_prob[cpi->common.frame_type][i],
+ vp9_partition_tree);
+
+ /*rough estimate for costing*/
+ vp9_init_mode_costs(cpi);
+
+ if (cpi->common.frame_type != KEY_FRAME) {
+ vp9_build_nmv_cost_table(
+ cpi->mb.nmvjointcost,
+ cpi->mb.e_mbd.allow_high_precision_mv ?
+ cpi->mb.nmvcost_hp : cpi->mb.nmvcost,
+ &cpi->common.fc.nmvc,
+ cpi->mb.e_mbd.allow_high_precision_mv, 1, 1);
+ }
+}
+
+int vp9_block_error_c(int16_t *coeff, int16_t *dqcoeff, int block_size) {
+ int i, error = 0;
+
+ for (i = 0; i < block_size; i++) {
+ int this_diff = coeff[i] - dqcoeff[i];
+ error += this_diff * this_diff;
+ }
+
+ return error;
+}
+
+static INLINE int cost_coeffs(VP9_COMMON *const cm, MACROBLOCK *mb,
+ int plane, int block, PLANE_TYPE type,
+ ENTROPY_CONTEXT *A,
+ ENTROPY_CONTEXT *L,
+ TX_SIZE tx_size,
+ int y_blocks) {
+ MACROBLOCKD *const xd = &mb->e_mbd;
+ MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
+ int pt;
+ int c = 0;
+ int cost = 0, pad;
+ const int *scan, *nb;
+ const int eob = xd->plane[plane].eobs[block];
+ const int16_t *qcoeff_ptr = BLOCK_OFFSET(xd->plane[plane].qcoeff,
+ block, 16);
+ const int ref = mbmi->ref_frame[0] != INTRA_FRAME;
+ unsigned int (*token_costs)[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS] =
+ mb->token_costs[tx_size][type][ref];
+ ENTROPY_CONTEXT above_ec, left_ec;
+ TX_TYPE tx_type = DCT_DCT;
+
+ const int segment_id = xd->mode_info_context->mbmi.segment_id;
+ unsigned int (*token_costs_noskip)[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS] =
+ mb->token_costs_noskip[tx_size][type][ref];
+
+ int seg_eob, default_eob;
+ uint8_t token_cache[1024];
+ const uint8_t * band_translate;
+
+ // Check for consistency of tx_size with mode info
+ assert((!type && !plane) || (type && plane));
+ if (type == PLANE_TYPE_Y_WITH_DC) {
+ assert(xd->mode_info_context->mbmi.txfm_size == tx_size);
+ } else {
+ TX_SIZE tx_size_uv = get_uv_tx_size(mbmi);
+ assert(tx_size == tx_size_uv);
+ }
+
+ switch (tx_size) {
+ case TX_4X4: {
+ tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
+ get_tx_type_4x4(xd, block) : DCT_DCT;
+ above_ec = A[0] != 0;
+ left_ec = L[0] != 0;
+ seg_eob = 16;
+ scan = get_scan_4x4(tx_type);
+ band_translate = vp9_coefband_trans_4x4;
+ break;
+ }
+ case TX_8X8: {
+ const BLOCK_SIZE_TYPE sb_type = xd->mode_info_context->mbmi.sb_type;
+ const int sz = 1 + b_width_log2(sb_type);
+ const int x = block & ((1 << sz) - 1), y = block - x;
+ TX_TYPE tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
+ get_tx_type_8x8(xd, y + (x >> 1)) : DCT_DCT;
+ above_ec = (A[0] + A[1]) != 0;
+ left_ec = (L[0] + L[1]) != 0;
+ scan = get_scan_8x8(tx_type);
+ seg_eob = 64;
+ band_translate = vp9_coefband_trans_8x8plus;
+ break;
+ }
+ case TX_16X16: {
+ const BLOCK_SIZE_TYPE sb_type = xd->mode_info_context->mbmi.sb_type;
+ const int sz = 2 + b_width_log2(sb_type);
+ const int x = block & ((1 << sz) - 1), y = block - x;
+ TX_TYPE tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
+ get_tx_type_16x16(xd, y + (x >> 2)) : DCT_DCT;
+ scan = get_scan_16x16(tx_type);
+ seg_eob = 256;
+ above_ec = (A[0] + A[1] + A[2] + A[3]) != 0;
+ left_ec = (L[0] + L[1] + L[2] + L[3]) != 0;
+ band_translate = vp9_coefband_trans_8x8plus;
+ break;
+ }
+ case TX_32X32:
+ scan = vp9_default_scan_32x32;
+ seg_eob = 1024;
+ above_ec = (A[0] + A[1] + A[2] + A[3] + A[4] + A[5] + A[6] + A[7]) != 0;
+ left_ec = (L[0] + L[1] + L[2] + L[3] + L[4] + L[5] + L[6] + L[7]) != 0;
+ band_translate = vp9_coefband_trans_8x8plus;
+ break;
+ default:
+ abort();
+ break;
+ }
+ assert(eob <= seg_eob);
+
+ pt = combine_entropy_contexts(above_ec, left_ec);
+ nb = vp9_get_coef_neighbors_handle(scan, &pad);
+ default_eob = seg_eob;
+
+ if (vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP))
+ seg_eob = 0;
+
+ /* sanity check to ensure that we do not have spurious non-zero q values */
+ if (eob < seg_eob)
+ assert(qcoeff_ptr[scan[eob]] == 0);
+
+ {
+ for (c = 0; c < eob; c++) {
+ int v = qcoeff_ptr[scan[c]];
+ int t = vp9_dct_value_tokens_ptr[v].token;
+ int band = get_coef_band(band_translate, c);
+ if (c)
+ pt = vp9_get_coef_context(scan, nb, pad, token_cache, c, default_eob);
+
+ if (!c || token_cache[scan[c - 1]]) // do not skip eob
+ cost += token_costs_noskip[band][pt][t] + vp9_dct_value_cost_ptr[v];
+ else
+ cost += token_costs[band][pt][t] + vp9_dct_value_cost_ptr[v];
+ token_cache[scan[c]] = vp9_pt_energy_class[t];
+ }
+ if (c < seg_eob) {
+ if (c)
+ pt = vp9_get_coef_context(scan, nb, pad, token_cache, c, default_eob);
+ cost += mb->token_costs_noskip[tx_size][type][ref]
+ [get_coef_band(band_translate, c)]
+ [pt][DCT_EOB_TOKEN];
+ }
+ }
+
+ // is eob first coefficient;
+ for (pt = 0; pt < (1 << tx_size); pt++) {
+ A[pt] = L[pt] = c > 0;
+ }
+
+ return cost;
+}
+
+static void choose_txfm_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x,
+ int (*r)[2], int *rate,
+ int *d, int *distortion,
+ int *s, int *skip,
+ int64_t txfm_cache[NB_TXFM_MODES],
+ TX_SIZE max_txfm_size) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
+ vp9_prob skip_prob = vp9_get_pred_prob(cm, xd, PRED_MBSKIP);
+ int64_t rd[TX_SIZE_MAX_SB][2];
+ int n, m;
+ int s0, s1;
+
+ const vp9_prob *tx_probs = vp9_get_pred_probs(cm, xd, PRED_TX_SIZE);
+
+ for (n = TX_4X4; n <= max_txfm_size; n++) {
+ r[n][1] = r[n][0];
+ for (m = 0; m <= n - (n == max_txfm_size); m++) {
+ if (m == n)
+ r[n][1] += vp9_cost_zero(tx_probs[m]);
+ else
+ r[n][1] += vp9_cost_one(tx_probs[m]);
+ }
+ }
+
+ assert(skip_prob > 0);
+ s0 = vp9_cost_bit(skip_prob, 0);
+ s1 = vp9_cost_bit(skip_prob, 1);
+
+ for (n = TX_4X4; n <= max_txfm_size; n++) {
+ if (s[n]) {
+ rd[n][0] = rd[n][1] = RDCOST(x->rdmult, x->rddiv, s1, d[n]);
+ } else {
+ rd[n][0] = RDCOST(x->rdmult, x->rddiv, r[n][0] + s0, d[n]);
+ rd[n][1] = RDCOST(x->rdmult, x->rddiv, r[n][1] + s0, d[n]);
+ }
+ }
+
+ if (max_txfm_size == TX_32X32 &&
+ (cm->txfm_mode == ALLOW_32X32 ||
+ (cm->txfm_mode == TX_MODE_SELECT &&
+ rd[TX_32X32][1] < rd[TX_16X16][1] && rd[TX_32X32][1] < rd[TX_8X8][1] &&
+ rd[TX_32X32][1] < rd[TX_4X4][1]))) {
+ mbmi->txfm_size = TX_32X32;
+ } else if (max_txfm_size >= TX_16X16 &&
+ (cm->txfm_mode == ALLOW_16X16 ||
+ cm->txfm_mode == ALLOW_32X32 ||
+ (cm->txfm_mode == TX_MODE_SELECT &&
+ rd[TX_16X16][1] < rd[TX_8X8][1] &&
+ rd[TX_16X16][1] < rd[TX_4X4][1]))) {
+ mbmi->txfm_size = TX_16X16;
+ } else if (cm->txfm_mode == ALLOW_8X8 ||
+ cm->txfm_mode == ALLOW_16X16 ||
+ cm->txfm_mode == ALLOW_32X32 ||
+ (cm->txfm_mode == TX_MODE_SELECT && rd[TX_8X8][1] < rd[TX_4X4][1])) {
+ mbmi->txfm_size = TX_8X8;
+ } else {
+ mbmi->txfm_size = TX_4X4;
+ }
+
+ *distortion = d[mbmi->txfm_size];
+ *rate = r[mbmi->txfm_size][cm->txfm_mode == TX_MODE_SELECT];
+ *skip = s[mbmi->txfm_size];
+
+ txfm_cache[ONLY_4X4] = rd[TX_4X4][0];
+ txfm_cache[ALLOW_8X8] = rd[TX_8X8][0];
+ txfm_cache[ALLOW_16X16] = rd[MIN(max_txfm_size, TX_16X16)][0];
+ txfm_cache[ALLOW_32X32] = rd[MIN(max_txfm_size, TX_32X32)][0];
+ if (max_txfm_size == TX_32X32 &&
+ rd[TX_32X32][1] < rd[TX_16X16][1] && rd[TX_32X32][1] < rd[TX_8X8][1] &&
+ rd[TX_32X32][1] < rd[TX_4X4][1])
+ txfm_cache[TX_MODE_SELECT] = rd[TX_32X32][1];
+ else if (max_txfm_size >= TX_16X16 &&
+ rd[TX_16X16][1] < rd[TX_8X8][1] && rd[TX_16X16][1] < rd[TX_4X4][1])
+ txfm_cache[TX_MODE_SELECT] = rd[TX_16X16][1];
+ else
+ txfm_cache[TX_MODE_SELECT] = rd[TX_4X4][1] < rd[TX_8X8][1] ?
+ rd[TX_4X4][1] : rd[TX_8X8][1];
+}
+
+static int block_error(int16_t *coeff, int16_t *dqcoeff,
+ int block_size, int shift) {
+ int i;
+ int64_t error = 0;
+
+ for (i = 0; i < block_size; i++) {
+ int this_diff = coeff[i] - dqcoeff[i];
+ error += (unsigned)this_diff * this_diff;
+ }
+ error >>= shift;
+
+ return error > INT_MAX ? INT_MAX : (int)error;
+}
+
+static int block_error_sby(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize, int shift) {
+ const int bwl = b_width_log2(bsize), bhl = b_height_log2(bsize);
+ return block_error(x->plane[0].coeff, x->e_mbd.plane[0].dqcoeff,
+ 16 << (bwl + bhl), shift);
+}
+
+static int block_error_sbuv(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize, int shift) {
+ const int bwl = b_width_log2(bsize), bhl = b_height_log2(bsize);
+ int64_t sum = 0;
+ int plane;
+
+ for (plane = 1; plane < MAX_MB_PLANE; plane++) {
+ const int subsampling = x->e_mbd.plane[plane].subsampling_x +
+ x->e_mbd.plane[plane].subsampling_y;
+ sum += block_error(x->plane[plane].coeff, x->e_mbd.plane[plane].dqcoeff,
+ 16 << (bwl + bhl - subsampling), 0);
+ }
+ sum >>= shift;
+ return sum > INT_MAX ? INT_MAX : (int)sum;
+}
+
+struct rdcost_block_args {
+ VP9_COMMON *cm;
+ MACROBLOCK *x;
+ ENTROPY_CONTEXT t_above[16];
+ ENTROPY_CONTEXT t_left[16];
+ TX_SIZE tx_size;
+ int bw;
+ int bh;
+ int cost;
+};
+
+static void rdcost_block(int plane, int block, BLOCK_SIZE_TYPE bsize,
+ int ss_txfrm_size, void *arg) {
+ struct rdcost_block_args* args = arg;
+ int x_idx, y_idx;
+ MACROBLOCKD * const xd = &args->x->e_mbd;
+
+ txfrm_block_to_raster_xy(xd, bsize, plane, block, args->tx_size * 2, &x_idx,
+ &y_idx);
+
+ args->cost += cost_coeffs(args->cm, args->x, plane, block,
+ xd->plane[plane].plane_type, args->t_above + x_idx,
+ args->t_left + y_idx, args->tx_size,
+ args->bw * args->bh);
+}
+
+static int rdcost_plane(VP9_COMMON * const cm, MACROBLOCK *x, int plane,
+ BLOCK_SIZE_TYPE bsize, TX_SIZE tx_size) {
+ MACROBLOCKD * const xd = &x->e_mbd;
+ const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
+ const int bhl = b_height_log2(bsize) - xd->plane[plane].subsampling_y;
+ const int bw = 1 << bwl, bh = 1 << bhl;
+ struct rdcost_block_args args = { cm, x, { 0 }, { 0 }, tx_size, bw, bh, 0 };
+
+ vpx_memcpy(&args.t_above, xd->plane[plane].above_context,
+ sizeof(ENTROPY_CONTEXT) * bw);
+ vpx_memcpy(&args.t_left, xd->plane[plane].left_context,
+ sizeof(ENTROPY_CONTEXT) * bh);
+
+ foreach_transformed_block_in_plane(xd, bsize, plane, rdcost_block, &args);
+
+ return args.cost;
+}
+
+static int rdcost_uv(VP9_COMMON *const cm, MACROBLOCK *x,
+ BLOCK_SIZE_TYPE bsize, TX_SIZE tx_size) {
+ int cost = 0, plane;
+
+ for (plane = 1; plane < MAX_MB_PLANE; plane++) {
+ cost += rdcost_plane(cm, x, plane, bsize, tx_size);
+ }
+ return cost;
+}
+
+static void super_block_yrd_for_txfm(VP9_COMMON *const cm, MACROBLOCK *x,
+ int *rate, int *distortion, int *skippable,
+ BLOCK_SIZE_TYPE bsize, TX_SIZE tx_size) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+ xd->mode_info_context->mbmi.txfm_size = tx_size;
+
+ if (xd->mode_info_context->mbmi.ref_frame[0] == INTRA_FRAME)
+ vp9_encode_intra_block_y(cm, x, bsize);
+ else
+ vp9_xform_quant_sby(cm, x, bsize);
+
+ *distortion = block_error_sby(x, bsize, tx_size == TX_32X32 ? 0 : 2);
+ *rate = rdcost_plane(cm, x, 0, bsize, tx_size);
+ *skippable = vp9_sby_is_skippable(xd, bsize);
+}
+
+static void super_block_yrd(VP9_COMP *cpi,
+ MACROBLOCK *x, int *rate, int *distortion,
+ int *skip, BLOCK_SIZE_TYPE bs,
+ int64_t txfm_cache[NB_TXFM_MODES]) {
+ VP9_COMMON *const cm = &cpi->common;
+ int r[TX_SIZE_MAX_SB][2], d[TX_SIZE_MAX_SB], s[TX_SIZE_MAX_SB];
+ MACROBLOCKD *xd = &x->e_mbd;
+ MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
+
+ assert(bs == mbmi->sb_type);
+ if (mbmi->ref_frame[0] > INTRA_FRAME)
+ vp9_subtract_sby(x, bs);
+
+ if (cpi->speed > 4) {
+ if (bs >= BLOCK_SIZE_SB32X32) {
+ mbmi->txfm_size = TX_32X32;
+ } else if (bs >= BLOCK_SIZE_MB16X16) {
+ mbmi->txfm_size = TX_16X16;
+ } else if (bs >= BLOCK_SIZE_SB8X8) {
+ mbmi->txfm_size = TX_8X8;
+ } else {
+ mbmi->txfm_size = TX_4X4;
+ }
+ vpx_memset(txfm_cache, 0, NB_TXFM_MODES * sizeof(int64_t));
+ super_block_yrd_for_txfm(cm, x, rate, distortion, skip, bs,
+ mbmi->txfm_size);
+ return;
+ }
+ if (bs >= BLOCK_SIZE_SB32X32)
+ super_block_yrd_for_txfm(cm, x, &r[TX_32X32][0], &d[TX_32X32], &s[TX_32X32],
+ bs, TX_32X32);
+ if (bs >= BLOCK_SIZE_MB16X16)
+ super_block_yrd_for_txfm(cm, x, &r[TX_16X16][0], &d[TX_16X16], &s[TX_16X16],
+ bs, TX_16X16);
+ super_block_yrd_for_txfm(cm, x, &r[TX_8X8][0], &d[TX_8X8], &s[TX_8X8], bs,
+ TX_8X8);
+ super_block_yrd_for_txfm(cm, x, &r[TX_4X4][0], &d[TX_4X4], &s[TX_4X4], bs,
+ TX_4X4);
+
+ choose_txfm_size_from_rd(cpi, x, r, rate, d, distortion, s,
+ skip, txfm_cache,
+ TX_32X32 - (bs < BLOCK_SIZE_SB32X32)
+ - (bs < BLOCK_SIZE_MB16X16));
+}
+
+static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
+ MB_PREDICTION_MODE *best_mode,
+ int *bmode_costs,
+ ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
+ int *bestrate, int *bestratey,
+ int *bestdistortion,
+ BLOCK_SIZE_TYPE bsize) {
+ MB_PREDICTION_MODE mode;
+ MACROBLOCKD *xd = &x->e_mbd;
+ int64_t best_rd = INT64_MAX;
+ int rate = 0;
+ int distortion;
+ VP9_COMMON *const cm = &cpi->common;
+ const int src_stride = x->plane[0].src.stride;
+ uint8_t *src, *dst;
+ int16_t *src_diff, *coeff;
+
+ ENTROPY_CONTEXT ta[2], tempa[2];
+ ENTROPY_CONTEXT tl[2], templ[2];
+ TX_TYPE tx_type = DCT_DCT;
+ TX_TYPE best_tx_type = DCT_DCT;
+ int bw = 1 << b_width_log2(bsize);
+ int bh = 1 << b_height_log2(bsize);
+ int idx, idy, block;
+ DECLARE_ALIGNED(16, int16_t, best_dqcoeff[4][16]);
+
+ assert(ib < 4);
+
+ vpx_memcpy(ta, a, sizeof(ta));
+ vpx_memcpy(tl, l, sizeof(tl));
+ xd->mode_info_context->mbmi.txfm_size = TX_4X4;
+
+ for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
+ int64_t this_rd;
+ int ratey = 0;
+
+ rate = bmode_costs[mode];
+ distortion = 0;
+
+ vpx_memcpy(tempa, ta, sizeof(ta));
+ vpx_memcpy(templ, tl, sizeof(tl));
+
+ for (idy = 0; idy < bh; ++idy) {
+ for (idx = 0; idx < bw; ++idx) {
+ block = ib + idy * 2 + idx;
+ xd->mode_info_context->bmi[block].as_mode.first = mode;
+ src = raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, block,
+ x->plane[0].src.buf, src_stride);
+ src_diff = raster_block_offset_int16(xd, BLOCK_SIZE_SB8X8, 0, block,
+ x->plane[0].src_diff);
+ coeff = BLOCK_OFFSET(x->plane[0].coeff, block, 16);
+ dst = raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, block,
+ xd->plane[0].dst.buf,
+ xd->plane[0].dst.stride);
+ vp9_intra4x4_predict(xd, block, BLOCK_SIZE_SB8X8, mode,
+ dst, xd->plane[0].dst.stride);
+ vp9_subtract_block(4, 4, src_diff, 8,
+ src, src_stride,
+ dst, xd->plane[0].dst.stride);
+
+ tx_type = get_tx_type_4x4(xd, block);
+ if (tx_type != DCT_DCT) {
+ vp9_short_fht4x4(src_diff, coeff, 8, tx_type);
+ x->quantize_b_4x4(x, block, tx_type, 16);
+ } else {
+ x->fwd_txm4x4(src_diff, coeff, 16);
+ x->quantize_b_4x4(x, block, tx_type, 16);
+ }
+
+ ratey += cost_coeffs(cm, x, 0, block, PLANE_TYPE_Y_WITH_DC,
+ tempa + idx, templ + idy, TX_4X4, 16);
+ distortion += vp9_block_error(coeff, BLOCK_OFFSET(xd->plane[0].dqcoeff,
+ block, 16), 16) >> 2;
+
+ if (best_tx_type != DCT_DCT)
+ vp9_short_iht4x4_add(BLOCK_OFFSET(xd->plane[0].dqcoeff, block, 16),
+ dst, xd->plane[0].dst.stride, best_tx_type);
+ else
+ xd->inv_txm4x4_add(BLOCK_OFFSET(xd->plane[0].dqcoeff, block, 16),
+ dst, xd->plane[0].dst.stride);
+ }
+ }
+
+ rate += ratey;
+ this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
+
+ if (this_rd < best_rd) {
+ *bestrate = rate;
+ *bestratey = ratey;
+ *bestdistortion = distortion;
+ best_rd = this_rd;
+ *best_mode = mode;
+ best_tx_type = tx_type;
+ vpx_memcpy(a, tempa, sizeof(tempa));
+ vpx_memcpy(l, templ, sizeof(templ));
+ for (idy = 0; idy < bh; ++idy) {
+ for (idx = 0; idx < bw; ++idx) {
+ block = ib + idy * 2 + idx;
+ vpx_memcpy(best_dqcoeff[idy * 2 + idx],
+ BLOCK_OFFSET(xd->plane[0].dqcoeff, block, 16),
+ sizeof(best_dqcoeff[0]));
+ }
+ }
+ }
+ }
+
+ for (idy = 0; idy < bh; ++idy) {
+ for (idx = 0; idx < bw; ++idx) {
+ block = ib + idy * 2 + idx;
+ xd->mode_info_context->bmi[block].as_mode.first = *best_mode;
+ dst = raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, block,
+ xd->plane[0].dst.buf,
+ xd->plane[0].dst.stride);
+
+ vp9_intra4x4_predict(xd, block, BLOCK_SIZE_SB8X8, *best_mode,
+ dst, xd->plane[0].dst.stride);
+ // inverse transform
+ if (best_tx_type != DCT_DCT)
+ vp9_short_iht4x4_add(best_dqcoeff[idy * 2 + idx], dst,
+ xd->plane[0].dst.stride, best_tx_type);
+ else
+ xd->inv_txm4x4_add(best_dqcoeff[idy * 2 + idx], dst,
+ xd->plane[0].dst.stride);
+ }
+ }
+
+ return best_rd;
+}
+
+static int64_t rd_pick_intra4x4mby_modes(VP9_COMP *cpi, MACROBLOCK *mb,
+ int *Rate, int *rate_y,
+ int *Distortion, int64_t best_rd) {
+ int i, j;
+ MACROBLOCKD *const xd = &mb->e_mbd;
+ BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type;
+ int bw = 1 << b_width_log2(bsize);
+ int bh = 1 << b_height_log2(bsize);
+ int idx, idy;
+ int cost = 0;
+ int distortion = 0;
+ int tot_rate_y = 0;
+ int64_t total_rd = 0;
+ ENTROPY_CONTEXT t_above[4], t_left[4];
+ int *bmode_costs;
+ MODE_INFO *const mic = xd->mode_info_context;
+
+ vpx_memcpy(t_above, xd->plane[0].above_context, sizeof(t_above));
+ vpx_memcpy(t_left, xd->plane[0].left_context, sizeof(t_left));
+
+ bmode_costs = mb->mbmode_cost;
+
+ for (idy = 0; idy < 2; idy += bh) {
+ for (idx = 0; idx < 2; idx += bw) {
+ const int mis = xd->mode_info_stride;
+ MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode);
+ int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(ry);
+ int UNINITIALIZED_IS_SAFE(d);
+ i = idy * 2 + idx;
+
+ if (xd->frame_type == KEY_FRAME) {
+ const MB_PREDICTION_MODE A = above_block_mode(mic, i, mis);
+ const MB_PREDICTION_MODE L = (xd->left_available || idx) ?
+ left_block_mode(mic, i) : DC_PRED;
+
+ bmode_costs = mb->y_mode_costs[A][L];
+ }
+
+ total_rd += rd_pick_intra4x4block(cpi, mb, i, &best_mode, bmode_costs,
+ t_above + idx, t_left + idy,
+ &r, &ry, &d, bsize);
+ cost += r;
+ distortion += d;
+ tot_rate_y += ry;
+
+ mic->bmi[i].as_mode.first = best_mode;
+ for (j = 1; j < bh; ++j)
+ mic->bmi[i + j * 2].as_mode.first = best_mode;
+ for (j = 1; j < bw; ++j)
+ mic->bmi[i + j].as_mode.first = best_mode;
+
+ if (total_rd >= best_rd)
+ break;
+ }
+ }
+
+ if (total_rd >= best_rd)
+ return INT64_MAX;
+
+ *Rate = cost;
+ *rate_y = tot_rate_y;
+ *Distortion = distortion;
+ xd->mode_info_context->mbmi.mode = mic->bmi[3].as_mode.first;
+
+ return RDCOST(mb->rdmult, mb->rddiv, cost, distortion);
+}
+
+static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x,
+ int *rate, int *rate_tokenonly,
+ int *distortion, int *skippable,
+ BLOCK_SIZE_TYPE bsize,
+ int64_t txfm_cache[NB_TXFM_MODES]) {
+ MB_PREDICTION_MODE mode;
+ MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
+ MACROBLOCKD *const xd = &x->e_mbd;
+ int this_rate, this_rate_tokenonly;
+ int this_distortion, s;
+ int64_t best_rd = INT64_MAX, this_rd;
+ TX_SIZE UNINITIALIZED_IS_SAFE(best_tx);
+ int i;
+ int *bmode_costs = x->mbmode_cost;
+
+ if (bsize < BLOCK_SIZE_SB8X8) {
+ x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
+ return best_rd;
+ }
+
+ for (i = 0; i < NB_TXFM_MODES; i++)
+ txfm_cache[i] = INT64_MAX;
+
+ /* Y Search for 32x32 intra prediction mode */
+ for (mode = DC_PRED; mode <= TM_PRED; mode++) {
+ int64_t local_txfm_cache[NB_TXFM_MODES];
+ MODE_INFO *const mic = xd->mode_info_context;
+ const int mis = xd->mode_info_stride;
+
+ if (cpi->common.frame_type == KEY_FRAME) {
+ const MB_PREDICTION_MODE A = above_block_mode(mic, 0, mis);
+ const MB_PREDICTION_MODE L = xd->left_available ?
+ left_block_mode(mic, 0) : DC_PRED;
+
+ bmode_costs = x->y_mode_costs[A][L];
+ }
+ x->e_mbd.mode_info_context->mbmi.mode = mode;
+
+ super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s,
+ bsize, local_txfm_cache);
+
+ this_rate = this_rate_tokenonly + bmode_costs[mode];
+ this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
+
+ if (this_rd < best_rd) {
+ mode_selected = mode;
+ best_rd = this_rd;
+ best_tx = x->e_mbd.mode_info_context->mbmi.txfm_size;
+ *rate = this_rate;
+ *rate_tokenonly = this_rate_tokenonly;
+ *distortion = this_distortion;
+ *skippable = s;
+ }
+
+ for (i = 0; i < NB_TXFM_MODES; i++) {
+ int64_t adj_rd = this_rd + local_txfm_cache[i] -
+ local_txfm_cache[cpi->common.txfm_mode];
+ if (adj_rd < txfm_cache[i]) {
+ txfm_cache[i] = adj_rd;
+ }
+ }
+ }
+
+ x->e_mbd.mode_info_context->mbmi.mode = mode_selected;
+ x->e_mbd.mode_info_context->mbmi.txfm_size = best_tx;
+
+ return best_rd;
+}
+
+static void super_block_uvrd_for_txfm(VP9_COMMON *const cm, MACROBLOCK *x,
+ int *rate, int *distortion,
+ int *skippable, BLOCK_SIZE_TYPE bsize,
+ TX_SIZE uv_tx_size) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+ if (xd->mode_info_context->mbmi.ref_frame[0] == INTRA_FRAME)
+ vp9_encode_intra_block_uv(cm, x, bsize);
+ else
+ vp9_xform_quant_sbuv(cm, x, bsize);
+
+ *distortion = block_error_sbuv(x, bsize, uv_tx_size == TX_32X32 ? 0 : 2);
+ *rate = rdcost_uv(cm, x, bsize, uv_tx_size);
+ *skippable = vp9_sbuv_is_skippable(xd, bsize);
+}
+
+static void super_block_uvrd(VP9_COMMON *const cm, MACROBLOCK *x,
+ int *rate, int *distortion, int *skippable,
+ BLOCK_SIZE_TYPE bsize) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
+
+ if (mbmi->ref_frame[0] > INTRA_FRAME)
+ vp9_subtract_sbuv(x, bsize);
+
+ if (mbmi->txfm_size >= TX_32X32 && bsize >= BLOCK_SIZE_SB64X64) {
+ super_block_uvrd_for_txfm(cm, x, rate, distortion, skippable, bsize,
+ TX_32X32);
+ } else if (mbmi->txfm_size >= TX_16X16 && bsize >= BLOCK_SIZE_SB32X32) {
+ super_block_uvrd_for_txfm(cm, x, rate, distortion, skippable, bsize,
+ TX_16X16);
+ } else if (mbmi->txfm_size >= TX_8X8 && bsize >= BLOCK_SIZE_MB16X16) {
+ super_block_uvrd_for_txfm(cm, x, rate, distortion, skippable, bsize,
+ TX_8X8);
+ } else {
+ super_block_uvrd_for_txfm(cm, x, rate, distortion, skippable, bsize,
+ TX_4X4);
+ }
+}
+
+static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, MACROBLOCK *x,
+ int *rate, int *rate_tokenonly,
+ int *distortion, int *skippable,
+ BLOCK_SIZE_TYPE bsize) {
+ MB_PREDICTION_MODE mode;
+ MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
+ int64_t best_rd = INT64_MAX, this_rd;
+ int this_rate_tokenonly, this_rate;
+ int this_distortion, s;
+
+ for (mode = DC_PRED; mode <= TM_PRED; mode++) {
+ x->e_mbd.mode_info_context->mbmi.uv_mode = mode;
+ super_block_uvrd(&cpi->common, x, &this_rate_tokenonly,
+ &this_distortion, &s, bsize);
+ this_rate = this_rate_tokenonly +
+ x->intra_uv_mode_cost[x->e_mbd.frame_type][mode];
+ this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
+
+ if (this_rd < best_rd) {
+ mode_selected = mode;
+ best_rd = this_rd;
+ *rate = this_rate;
+ *rate_tokenonly = this_rate_tokenonly;
+ *distortion = this_distortion;
+ *skippable = s;
+ }
+ }
+
+ x->e_mbd.mode_info_context->mbmi.uv_mode = mode_selected;
+
+ return best_rd;
+}
+
+int vp9_cost_mv_ref(VP9_COMP *cpi,
+ MB_PREDICTION_MODE m,
+ const int mode_context) {
+ MACROBLOCKD *xd = &cpi->mb.e_mbd;
+ int segment_id = xd->mode_info_context->mbmi.segment_id;
+
+ // Dont account for mode here if segment skip is enabled.
+ if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)) {
+ VP9_COMMON *pc = &cpi->common;
+ assert(NEARESTMV <= m && m <= NEWMV);
+ return cost_token(vp9_sb_mv_ref_tree,
+ pc->fc.inter_mode_probs[mode_context],
+ vp9_sb_mv_ref_encoding_array - NEARESTMV + m);
+ } else
+ return 0;
+}
+
+void vp9_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv) {
+ x->e_mbd.mode_info_context->mbmi.mode = mb;
+ x->e_mbd.mode_info_context->mbmi.mv[0].as_int = mv->as_int;
+}
+
+static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
+ BLOCK_SIZE_TYPE bsize,
+ int_mv *frame_mv,
+ int mi_row, int mi_col,
+ int_mv single_newmv[MAX_REF_FRAMES],
+ int *rate_mv);
+static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
+ BLOCK_SIZE_TYPE bsize,
+ int mi_row, int mi_col,
+ int_mv *tmp_mv, int *rate_mv);
+
+static int labels2mode(MACROBLOCK *x, int i,
+ MB_PREDICTION_MODE this_mode,
+ int_mv *this_mv, int_mv *this_second_mv,
+ int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
+ int_mv seg_mvs[MAX_REF_FRAMES],
+ int_mv *best_ref_mv,
+ int_mv *second_best_ref_mv,
+ int *mvjcost, int *mvcost[2], VP9_COMP *cpi) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MODE_INFO *const mic = xd->mode_info_context;
+ MB_MODE_INFO * mbmi = &mic->mbmi;
+ int cost = 0, thismvcost = 0;
+ int idx, idy;
+ int bw = 1 << b_width_log2(mbmi->sb_type);
+ int bh = 1 << b_height_log2(mbmi->sb_type);
+
+ /* We have to be careful retrieving previously-encoded motion vectors.
+ Ones from this macroblock have to be pulled from the BLOCKD array
+ as they have not yet made it to the bmi array in our MB_MODE_INFO. */
+ MB_PREDICTION_MODE m;
+
+ // the only time we should do costing for new motion vector or mode
+ // is when we are on a new label (jbb May 08, 2007)
+ switch (m = this_mode) {
+ case NEWMV:
+ this_mv->as_int = seg_mvs[mbmi->ref_frame[0]].as_int;
+ thismvcost = vp9_mv_bit_cost(this_mv, best_ref_mv, mvjcost, mvcost,
+ 102, xd->allow_high_precision_mv);
+ if (mbmi->ref_frame[1] > 0) {
+ this_second_mv->as_int = seg_mvs[mbmi->ref_frame[1]].as_int;
+ thismvcost += vp9_mv_bit_cost(this_second_mv, second_best_ref_mv,
+ mvjcost, mvcost, 102,
+ xd->allow_high_precision_mv);
+ }
+ break;
+ case NEARESTMV:
+ this_mv->as_int = frame_mv[NEARESTMV][mbmi->ref_frame[0]].as_int;
+ if (mbmi->ref_frame[1] > 0)
+ this_second_mv->as_int =
+ frame_mv[NEARESTMV][mbmi->ref_frame[1]].as_int;
+ break;
+ case NEARMV:
+ this_mv->as_int = frame_mv[NEARMV][mbmi->ref_frame[0]].as_int;
+ if (mbmi->ref_frame[1] > 0)
+ this_second_mv->as_int =
+ frame_mv[NEARMV][mbmi->ref_frame[1]].as_int;
+ break;
+ case ZEROMV:
+ this_mv->as_int = 0;
+ if (mbmi->ref_frame[1] > 0)
+ this_second_mv->as_int = 0;
+ break;
+ default:
+ break;
+ }
+
+ cost = vp9_cost_mv_ref(cpi, this_mode,
+ mbmi->mb_mode_context[mbmi->ref_frame[0]]);
+
+ mic->bmi[i].as_mv[0].as_int = this_mv->as_int;
+ if (mbmi->ref_frame[1] > 0)
+ mic->bmi[i].as_mv[1].as_int = this_second_mv->as_int;
+
+ x->partition_info->bmi[i].mode = m;
+ x->partition_info->bmi[i].mv.as_int = this_mv->as_int;
+ if (mbmi->ref_frame[1] > 0)
+ x->partition_info->bmi[i].second_mv.as_int = this_second_mv->as_int;
+ for (idy = 0; idy < bh; ++idy) {
+ for (idx = 0; idx < bw; ++idx) {
+ vpx_memcpy(&mic->bmi[i + idy * 2 + idx],
+ &mic->bmi[i], sizeof(mic->bmi[i]));
+ vpx_memcpy(&x->partition_info->bmi[i + idy * 2 + idx],
+ &x->partition_info->bmi[i],
+ sizeof(x->partition_info->bmi[i]));
+ }
+ }
+
+ cost += thismvcost;
+ return cost;
+}
+
+static int64_t encode_inter_mb_segment(VP9_COMMON *const cm,
+ MACROBLOCK *x,
+ int i,
+ int *labelyrate,
+ int *distortion,
+ ENTROPY_CONTEXT *ta,
+ ENTROPY_CONTEXT *tl) {
+ int k;
+ MACROBLOCKD *xd = &x->e_mbd;
+ BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type;
+ int bwl = b_width_log2(bsize), bw = 1 << bwl;
+ int bhl = b_height_log2(bsize), bh = 1 << bhl;
+ int idx, idy;
+ const int src_stride = x->plane[0].src.stride;
+ uint8_t* const src =
+ raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, i,
+ x->plane[0].src.buf, src_stride);
+ int16_t* src_diff =
+ raster_block_offset_int16(xd, BLOCK_SIZE_SB8X8, 0, i,
+ x->plane[0].src_diff);
+ int16_t* coeff = BLOCK_OFFSET(x->plane[0].coeff, 16, i);
+ uint8_t* const pre =
+ raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, i,
+ xd->plane[0].pre[0].buf,
+ xd->plane[0].pre[0].stride);
+ uint8_t* const dst =
+ raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, i,
+ xd->plane[0].dst.buf,
+ xd->plane[0].dst.stride);
+ int thisdistortion = 0;
+ int thisrate = 0;
+
+ *labelyrate = 0;
+ *distortion = 0;
+
+ vp9_build_inter_predictor(pre,
+ xd->plane[0].pre[0].stride,
+ dst,
+ xd->plane[0].dst.stride,
+ &xd->mode_info_context->bmi[i].as_mv[0],
+ &xd->scale_factor[0],
+ 4 * bw, 4 * bh, 0 /* no avg */, &xd->subpix);
+
+ // TODO(debargha): Make this work properly with the
+ // implicit-compoundinter-weight experiment when implicit
+ // weighting for splitmv modes is turned on.
+ if (xd->mode_info_context->mbmi.ref_frame[1] > 0) {
+ uint8_t* const second_pre =
+ raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, i,
+ xd->plane[0].pre[1].buf,
+ xd->plane[0].pre[1].stride);
+ vp9_build_inter_predictor(second_pre, xd->plane[0].pre[1].stride,
+ dst, xd->plane[0].dst.stride,
+ &xd->mode_info_context->bmi[i].as_mv[1],
+ &xd->scale_factor[1], 4 * bw, 4 * bh, 1,
+ &xd->subpix);
+ }
+
+ vp9_subtract_block(4 * bh, 4 * bw, src_diff, 8,
+ src, src_stride,
+ dst, xd->plane[0].dst.stride);
+
+ k = i;
+ for (idy = 0; idy < bh; ++idy) {
+ for (idx = 0; idx < bw; ++idx) {
+ k += (idy * 2 + idx);
+ src_diff = raster_block_offset_int16(xd, BLOCK_SIZE_SB8X8, 0, k,
+ x->plane[0].src_diff);
+ coeff = BLOCK_OFFSET(x->plane[0].coeff, 16, k);
+ x->fwd_txm4x4(src_diff, coeff, 16);
+ x->quantize_b_4x4(x, k, DCT_DCT, 16);
+ thisdistortion += vp9_block_error(coeff,
+ BLOCK_OFFSET(xd->plane[0].dqcoeff,
+ k, 16), 16);
+ thisrate += cost_coeffs(cm, x, 0, k, PLANE_TYPE_Y_WITH_DC,
+ ta + (k & 1),
+ tl + (k >> 1), TX_4X4, 16);
+ }
+ }
+ *distortion += thisdistortion;
+ *labelyrate += thisrate;
+
+ *distortion >>= 2;
+ return RDCOST(x->rdmult, x->rddiv, *labelyrate, *distortion);
+}
+
+typedef struct {
+ int_mv *ref_mv, *second_ref_mv;
+ int_mv mvp;
+
+ int64_t segment_rd;
+ int r;
+ int d;
+ int segment_yrate;
+ MB_PREDICTION_MODE modes[4];
+ int_mv mvs[4], second_mvs[4];
+ int eobs[4];
+ int mvthresh;
+} BEST_SEG_INFO;
+
+static INLINE int mv_check_bounds(MACROBLOCK *x, int_mv *mv) {
+ int r = 0;
+ r |= (mv->as_mv.row >> 3) < x->mv_row_min;
+ r |= (mv->as_mv.row >> 3) > x->mv_row_max;
+ r |= (mv->as_mv.col >> 3) < x->mv_col_min;
+ r |= (mv->as_mv.col >> 3) > x->mv_col_max;
+ return r;
+}
+
+static enum BlockSize get_block_size(int bw, int bh) {
+ if (bw == 4 && bh == 4)
+ return BLOCK_4X4;
+
+ if (bw == 4 && bh == 8)
+ return BLOCK_4X8;
+
+ if (bw == 8 && bh == 4)
+ return BLOCK_8X4;
+
+ if (bw == 8 && bh == 8)
+ return BLOCK_8X8;
+
+ if (bw == 8 && bh == 16)
+ return BLOCK_8X16;
+
+ if (bw == 16 && bh == 8)
+ return BLOCK_16X8;
+
+ if (bw == 16 && bh == 16)
+ return BLOCK_16X16;
+
+ if (bw == 32 && bh == 32)
+ return BLOCK_32X32;
+
+ if (bw == 32 && bh == 16)
+ return BLOCK_32X16;
+
+ if (bw == 16 && bh == 32)
+ return BLOCK_16X32;
+
+ if (bw == 64 && bh == 32)
+ return BLOCK_64X32;
+
+ if (bw == 32 && bh == 64)
+ return BLOCK_32X64;
+
+ if (bw == 64 && bh == 64)
+ return BLOCK_64X64;
+
+ assert(0);
+ return -1;
+}
+
+static INLINE void mi_buf_shift(MACROBLOCK *x, int i) {
+ MB_MODE_INFO *mbmi = &x->e_mbd.mode_info_context->mbmi;
+ x->plane[0].src.buf =
+ raster_block_offset_uint8(&x->e_mbd, BLOCK_SIZE_SB8X8, 0, i,
+ x->plane[0].src.buf,
+ x->plane[0].src.stride);
+ assert(((intptr_t)x->e_mbd.plane[0].pre[0].buf & 0x7) == 0);
+ x->e_mbd.plane[0].pre[0].buf =
+ raster_block_offset_uint8(&x->e_mbd, BLOCK_SIZE_SB8X8, 0, i,
+ x->e_mbd.plane[0].pre[0].buf,
+ x->e_mbd.plane[0].pre[0].stride);
+ if (mbmi->ref_frame[1])
+ x->e_mbd.plane[0].pre[1].buf =
+ raster_block_offset_uint8(&x->e_mbd, BLOCK_SIZE_SB8X8, 0, i,
+ x->e_mbd.plane[0].pre[1].buf,
+ x->e_mbd.plane[0].pre[1].stride);
+}
+
+static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src,
+ struct buf_2d orig_pre[2]) {
+ MB_MODE_INFO *mbmi = &x->e_mbd.mode_info_context->mbmi;
+ x->plane[0].src = orig_src;
+ x->e_mbd.plane[0].pre[0] = orig_pre[0];
+ if (mbmi->ref_frame[1])
+ x->e_mbd.plane[0].pre[1] = orig_pre[1];
+}
+
+static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x,
+ BEST_SEG_INFO *bsi,
+ int_mv seg_mvs[4][MAX_REF_FRAMES],
+ int mi_row, int mi_col) {
+ int i, j;
+ int br = 0, bd = 0;
+ MB_PREDICTION_MODE this_mode;
+ MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
+ const int label_count = 4;
+ int64_t this_segment_rd = 0, other_segment_rd;
+ int label_mv_thresh;
+ int rate = 0;
+ int sbr = 0, sbd = 0;
+ int segmentyrate = 0;
+ int best_eobs[4] = { 0 };
+ BLOCK_SIZE_TYPE bsize = mbmi->sb_type;
+ int bwl = b_width_log2(bsize), bw = 1 << bwl;
+ int bhl = b_height_log2(bsize), bh = 1 << bhl;
+ int idx, idy;
+ vp9_variance_fn_ptr_t *v_fn_ptr;
+ ENTROPY_CONTEXT t_above[4], t_left[4];
+ ENTROPY_CONTEXT t_above_b[4], t_left_b[4];
+
+ vpx_memcpy(t_above, x->e_mbd.plane[0].above_context, sizeof(t_above));
+ vpx_memcpy(t_left, x->e_mbd.plane[0].left_context, sizeof(t_left));
+
+ v_fn_ptr = &cpi->fn_ptr[get_block_size(4 << bwl, 4 << bhl)];
+
+ // 64 makes this threshold really big effectively
+ // making it so that we very rarely check mvs on
+ // segments. setting this to 1 would make mv thresh
+ // roughly equal to what it is for macroblocks
+ label_mv_thresh = 1 * bsi->mvthresh / label_count;
+
+ // Segmentation method overheads
+ other_segment_rd = this_segment_rd;
+
+ for (idy = 0; idy < 2; idy += bh) {
+ for (idx = 0; idx < 2; idx += bw) {
+ // TODO(jingning,rbultje): rewrite the rate-distortion optimization
+ // loop for 4x4/4x8/8x4 block coding. to be replaced with new rd loop
+ int_mv mode_mv[MB_MODE_COUNT], second_mode_mv[MB_MODE_COUNT];
+ int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
+ int64_t best_label_rd = INT64_MAX, best_other_rd = INT64_MAX;
+ MB_PREDICTION_MODE mode_selected = ZEROMV;
+ int bestlabelyrate = 0;
+ i = idy * 2 + idx;
+
+ frame_mv[ZEROMV][mbmi->ref_frame[0]].as_int = 0;
+ frame_mv[ZEROMV][mbmi->ref_frame[1]].as_int = 0;
+ vp9_append_sub8x8_mvs_for_idx(&cpi->common, &x->e_mbd,
+ &frame_mv[NEARESTMV][mbmi->ref_frame[0]],
+ &frame_mv[NEARMV][mbmi->ref_frame[0]],
+ i, 0);
+ if (mbmi->ref_frame[1] > 0)
+ vp9_append_sub8x8_mvs_for_idx(&cpi->common, &x->e_mbd,
+ &frame_mv[NEARESTMV][mbmi->ref_frame[1]],
+ &frame_mv[NEARMV][mbmi->ref_frame[1]],
+ i, 1);
+
+ // search for the best motion vector on this segment
+ for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
+ int64_t this_rd;
+ int distortion;
+ int labelyrate;
+ ENTROPY_CONTEXT t_above_s[4], t_left_s[4];
+ const struct buf_2d orig_src = x->plane[0].src;
+ struct buf_2d orig_pre[2];
+
+ vpx_memcpy(orig_pre, x->e_mbd.plane[0].pre, sizeof(orig_pre));
+
+ vpx_memcpy(t_above_s, t_above, sizeof(t_above_s));
+ vpx_memcpy(t_left_s, t_left, sizeof(t_left_s));
+
+ // motion search for newmv (single predictor case only)
+ if (mbmi->ref_frame[1] <= 0 && this_mode == NEWMV) {
+ int step_param = 0;
+ int further_steps;
+ int thissme, bestsme = INT_MAX;
+ int sadpb = x->sadperbit4;
+ int_mv mvp_full;
+
+ /* Is the best so far sufficiently good that we cant justify doing
+ * and new motion search. */
+ if (best_label_rd < label_mv_thresh)
+ break;
+
+ if (cpi->compressor_speed) {
+ // use previous block's result as next block's MV predictor.
+ if (i > 0) {
+ bsi->mvp.as_int =
+ x->e_mbd.mode_info_context->bmi[i - 1].as_mv[0].as_int;
+ if (i == 2)
+ bsi->mvp.as_int =
+ x->e_mbd.mode_info_context->bmi[i - 2].as_mv[0].as_int;
+ step_param = 2;
+ }
+ }
+
+ further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param;
+
+ mvp_full.as_mv.row = bsi->mvp.as_mv.row >> 3;
+ mvp_full.as_mv.col = bsi->mvp.as_mv.col >> 3;
+
+ // adjust src pointer for this block
+ mi_buf_shift(x, i);
+ bestsme = vp9_full_pixel_diamond(cpi, x, &mvp_full, step_param,
+ sadpb, further_steps, 0, v_fn_ptr,
+ bsi->ref_mv, &mode_mv[NEWMV]);
+
+ // Should we do a full search (best quality only)
+ if (cpi->compressor_speed == 0) {
+ /* Check if mvp_full is within the range. */
+ clamp_mv(&mvp_full, x->mv_col_min, x->mv_col_max,
+ x->mv_row_min, x->mv_row_max);
+
+ thissme = cpi->full_search_sad(x, &mvp_full,
+ sadpb, 16, v_fn_ptr,
+ x->nmvjointcost, x->mvcost,
+ bsi->ref_mv, i);
+
+ if (thissme < bestsme) {
+ bestsme = thissme;
+ mode_mv[NEWMV].as_int =
+ x->e_mbd.mode_info_context->bmi[i].as_mv[0].as_int;
+ } else {
+ /* The full search result is actually worse so re-instate the
+ * previous best vector */
+ x->e_mbd.mode_info_context->bmi[i].as_mv[0].as_int =
+ mode_mv[NEWMV].as_int;
+ }
+ }
+
+ if (bestsme < INT_MAX) {
+ int distortion;
+ unsigned int sse;
+ cpi->find_fractional_mv_step(x, &mode_mv[NEWMV],
+ bsi->ref_mv, x->errorperbit, v_fn_ptr,
+ x->nmvjointcost, x->mvcost,
+ &distortion, &sse);
+
+ // safe motion search result for use in compound prediction
+ seg_mvs[i][mbmi->ref_frame[0]].as_int = mode_mv[NEWMV].as_int;
+ }
+
+ // restore src pointers
+ mi_buf_restore(x, orig_src, orig_pre);
+ } else if (mbmi->ref_frame[1] > 0 && this_mode == NEWMV) {
+ if (seg_mvs[i][mbmi->ref_frame[1]].as_int == INVALID_MV ||
+ seg_mvs[i][mbmi->ref_frame[0]].as_int == INVALID_MV)
+ continue;
+
+ // adjust src pointers
+ mi_buf_shift(x, i);
+ if (cpi->sf.comp_inter_joint_search_thresh < bsize) {
+ int rate_mv;
+ joint_motion_search(cpi, x, bsize, frame_mv[this_mode],
+ mi_row, mi_col, seg_mvs[i],
+ &rate_mv);
+ seg_mvs[i][mbmi->ref_frame[0]].as_int =
+ frame_mv[this_mode][mbmi->ref_frame[0]].as_int;
+ seg_mvs[i][mbmi->ref_frame[1]].as_int =
+ frame_mv[this_mode][mbmi->ref_frame[1]].as_int;
+ }
+ // restore src pointers
+ mi_buf_restore(x, orig_src, orig_pre);
+ }
+
+ rate = labels2mode(x, i, this_mode, &mode_mv[this_mode],
+ &second_mode_mv[this_mode], frame_mv, seg_mvs[i],
+ bsi->ref_mv, bsi->second_ref_mv, x->nmvjointcost,
+ x->mvcost, cpi);
+
+ // Trap vectors that reach beyond the UMV borders
+ if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) ||
+ ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
+ ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) ||
+ ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max)) {
+ continue;
+ }
+ if (mbmi->ref_frame[1] > 0 &&
+ mv_check_bounds(x, &second_mode_mv[this_mode]))
+ continue;
+
+ this_rd = encode_inter_mb_segment(&cpi->common,
+ x, i, &labelyrate,
+ &distortion, t_above_s, t_left_s);
+ this_rd += RDCOST(x->rdmult, x->rddiv, rate, 0);
+ rate += labelyrate;
+
+ if (this_rd < best_label_rd) {
+ sbr = rate;
+ sbd = distortion;
+ bestlabelyrate = labelyrate;
+ mode_selected = this_mode;
+ best_label_rd = this_rd;
+ best_eobs[i] = x->e_mbd.plane[0].eobs[i];
+ vpx_memcpy(t_above_b, t_above_s, sizeof(t_above_s));
+ vpx_memcpy(t_left_b, t_left_s, sizeof(t_left_s));
+ }
+ } /*for each 4x4 mode*/
+
+ vpx_memcpy(t_above, t_above_b, sizeof(t_above));
+ vpx_memcpy(t_left, t_left_b, sizeof(t_left));
+
+ labels2mode(x, i, mode_selected, &mode_mv[mode_selected],
+ &second_mode_mv[mode_selected], frame_mv, seg_mvs[i],
+ bsi->ref_mv, bsi->second_ref_mv, x->nmvjointcost,
+ x->mvcost, cpi);
+
+ br += sbr;
+ bd += sbd;
+ segmentyrate += bestlabelyrate;
+ this_segment_rd += best_label_rd;
+ other_segment_rd += best_other_rd;
+
+ for (j = 1; j < bh; ++j)
+ vpx_memcpy(&x->partition_info->bmi[i + j * 2],
+ &x->partition_info->bmi[i],
+ sizeof(x->partition_info->bmi[i]));
+ for (j = 1; j < bw; ++j)
+ vpx_memcpy(&x->partition_info->bmi[i + j],
+ &x->partition_info->bmi[i],
+ sizeof(x->partition_info->bmi[i]));
+ }
+ } /* for each label */
+
+ if (this_segment_rd < bsi->segment_rd) {
+ bsi->r = br;
+ bsi->d = bd;
+ bsi->segment_yrate = segmentyrate;
+ bsi->segment_rd = this_segment_rd;
+
+ // store everything needed to come back to this!!
+ for (i = 0; i < 4; i++) {
+ bsi->mvs[i].as_mv = x->partition_info->bmi[i].mv.as_mv;
+ if (mbmi->ref_frame[1] > 0)
+ bsi->second_mvs[i].as_mv = x->partition_info->bmi[i].second_mv.as_mv;
+ bsi->modes[i] = x->partition_info->bmi[i].mode;
+ bsi->eobs[i] = best_eobs[i];
+ }
+ }
+}
+
+static int rd_pick_best_mbsegmentation(VP9_COMP *cpi, MACROBLOCK *x,
+ int_mv *best_ref_mv,
+ int_mv *second_best_ref_mv,
+ int64_t best_rd,
+ int *returntotrate,
+ int *returnyrate,
+ int *returndistortion,
+ int *skippable, int mvthresh,
+ int_mv seg_mvs[4][MAX_REF_FRAMES],
+ int mi_row, int mi_col) {
+ int i;
+ BEST_SEG_INFO bsi;
+ MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
+
+ vpx_memset(&bsi, 0, sizeof(bsi));
+
+ bsi.segment_rd = best_rd;
+ bsi.ref_mv = best_ref_mv;
+ bsi.second_ref_mv = second_best_ref_mv;
+ bsi.mvp.as_int = best_ref_mv->as_int;
+ bsi.mvthresh = mvthresh;
+
+ for (i = 0; i < 4; i++)
+ bsi.modes[i] = ZEROMV;
+
+ rd_check_segment_txsize(cpi, x, &bsi, seg_mvs, mi_row, mi_col);
+
+ /* set it to the best */
+ for (i = 0; i < 4; i++) {
+ x->e_mbd.mode_info_context->bmi[i].as_mv[0].as_int = bsi.mvs[i].as_int;
+ if (mbmi->ref_frame[1] > 0)
+ x->e_mbd.mode_info_context->bmi[i].as_mv[1].as_int =
+ bsi.second_mvs[i].as_int;
+ x->e_mbd.plane[0].eobs[i] = bsi.eobs[i];
+ }
+
+ /* save partitions */
+ x->partition_info->count = 4;
+
+ for (i = 0; i < x->partition_info->count; i++) {
+ x->partition_info->bmi[i].mode = bsi.modes[i];
+ x->partition_info->bmi[i].mv.as_mv = bsi.mvs[i].as_mv;
+ if (mbmi->ref_frame[1] > 0)
+ x->partition_info->bmi[i].second_mv.as_mv = bsi.second_mvs[i].as_mv;
+ }
+ /*
+ * used to set mbmi->mv.as_int
+ */
+ x->partition_info->bmi[3].mv.as_int = bsi.mvs[3].as_int;
+ if (mbmi->ref_frame[1] > 0)
+ x->partition_info->bmi[3].second_mv.as_int = bsi.second_mvs[3].as_int;
+
+ *returntotrate = bsi.r;
+ *returndistortion = bsi.d;
+ *returnyrate = bsi.segment_yrate;
+ *skippable = vp9_sby_is_skippable(&x->e_mbd, BLOCK_SIZE_SB8X8);
+ mbmi->mode = bsi.modes[3];
+
+ return (int)(bsi.segment_rd);
+}
+
+static void mv_pred(VP9_COMP *cpi, MACROBLOCK *x,
+ uint8_t *ref_y_buffer, int ref_y_stride,
+ int ref_frame, enum BlockSize block_size ) {
+ MACROBLOCKD *xd = &x->e_mbd;
+ MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
+ int_mv this_mv;
+ int i;
+ int zero_seen = 0;
+ int best_index = 0;
+ int best_sad = INT_MAX;
+ int this_sad = INT_MAX;
+
+ uint8_t *src_y_ptr = x->plane[0].src.buf;
+ uint8_t *ref_y_ptr;
+ int row_offset, col_offset;
+
+ // Get the sad for each candidate reference mv
+ for (i = 0; i < MAX_MV_REF_CANDIDATES; i++) {
+ this_mv.as_int = mbmi->ref_mvs[ref_frame][i].as_int;
+
+ // The list is at an end if we see 0 for a second time.
+ if (!this_mv.as_int && zero_seen)
+ break;
+ zero_seen = zero_seen || !this_mv.as_int;
+
+ row_offset = this_mv.as_mv.row >> 3;
+ col_offset = this_mv.as_mv.col >> 3;
+ ref_y_ptr = ref_y_buffer + (ref_y_stride * row_offset) + col_offset;
+
+ // Find sad for current vector.
+ this_sad = cpi->fn_ptr[block_size].sdf(src_y_ptr, x->plane[0].src.stride,
+ ref_y_ptr, ref_y_stride,
+ 0x7fffffff);
+
+ // Note if it is the best so far.
+ if (this_sad < best_sad) {
+ best_sad = this_sad;
+ best_index = i;
+ }
+ }
+
+ // Note the index of the mv that worked best in the reference list.
+ x->mv_best_ref_index[ref_frame] = best_index;
+}
+
+static void estimate_ref_frame_costs(VP9_COMP *cpi, int segment_id,
+ unsigned int *ref_costs_single,
+ unsigned int *ref_costs_comp,
+ vp9_prob *comp_mode_p) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+ int seg_ref_active = vp9_segfeature_active(xd, segment_id,
+ SEG_LVL_REF_FRAME);
+ if (seg_ref_active) {
+ vpx_memset(ref_costs_single, 0, MAX_REF_FRAMES * sizeof(*ref_costs_single));
+ vpx_memset(ref_costs_comp, 0, MAX_REF_FRAMES * sizeof(*ref_costs_comp));
+ *comp_mode_p = 128;
+ } else {
+ vp9_prob intra_inter_p = vp9_get_pred_prob(cm, xd, PRED_INTRA_INTER);
+ vp9_prob comp_inter_p = 128;
+
+ if (cm->comp_pred_mode == HYBRID_PREDICTION) {
+ comp_inter_p = vp9_get_pred_prob(cm, xd, PRED_COMP_INTER_INTER);
+ *comp_mode_p = comp_inter_p;
+ } else {
+ *comp_mode_p = 128;
+ }
+
+ ref_costs_single[INTRA_FRAME] = vp9_cost_bit(intra_inter_p, 0);
+
+ if (cm->comp_pred_mode != COMP_PREDICTION_ONLY) {
+ vp9_prob ref_single_p1 = vp9_get_pred_prob(cm, xd, PRED_SINGLE_REF_P1);
+ vp9_prob ref_single_p2 = vp9_get_pred_prob(cm, xd, PRED_SINGLE_REF_P2);
+ unsigned int base_cost = vp9_cost_bit(intra_inter_p, 1);
+
+ if (cm->comp_pred_mode == HYBRID_PREDICTION)
+ base_cost += vp9_cost_bit(comp_inter_p, 0);
+
+ ref_costs_single[LAST_FRAME] = ref_costs_single[GOLDEN_FRAME] =
+ ref_costs_single[ALTREF_FRAME] = base_cost;
+ ref_costs_single[LAST_FRAME] += vp9_cost_bit(ref_single_p1, 0);
+ ref_costs_single[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p1, 1);
+ ref_costs_single[ALTREF_FRAME] += vp9_cost_bit(ref_single_p1, 1);
+ ref_costs_single[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p2, 0);
+ ref_costs_single[ALTREF_FRAME] += vp9_cost_bit(ref_single_p2, 1);
+ } else {
+ ref_costs_single[LAST_FRAME] = 512;
+ ref_costs_single[GOLDEN_FRAME] = 512;
+ ref_costs_single[ALTREF_FRAME] = 512;
+ }
+ if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY) {
+ vp9_prob ref_comp_p = vp9_get_pred_prob(cm, xd, PRED_COMP_REF_P);
+ unsigned int base_cost = vp9_cost_bit(intra_inter_p, 1);
+
+ if (cm->comp_pred_mode == HYBRID_PREDICTION)
+ base_cost += vp9_cost_bit(comp_inter_p, 1);
+
+ ref_costs_comp[LAST_FRAME] = base_cost + vp9_cost_bit(ref_comp_p, 0);
+ ref_costs_comp[GOLDEN_FRAME] = base_cost + vp9_cost_bit(ref_comp_p, 1);
+ } else {
+ ref_costs_comp[LAST_FRAME] = 512;
+ ref_costs_comp[GOLDEN_FRAME] = 512;
+ }
+ }
+}
+
+static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
+ int mode_index,
+ PARTITION_INFO *partition,
+ int_mv *ref_mv,
+ int_mv *second_ref_mv,
+ int64_t comp_pred_diff[NB_PREDICTION_TYPES],
+ int64_t txfm_size_diff[NB_TXFM_MODES]) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+
+ // Take a snapshot of the coding context so it can be
+ // restored if we decide to encode this way
+ ctx->skip = x->skip;
+ ctx->best_mode_index = mode_index;
+ ctx->mic = *xd->mode_info_context;
+
+ if (partition)
+ ctx->partition_info = *partition;
+
+ ctx->best_ref_mv.as_int = ref_mv->as_int;
+ ctx->second_best_ref_mv.as_int = second_ref_mv->as_int;
+
+ ctx->single_pred_diff = (int)comp_pred_diff[SINGLE_PREDICTION_ONLY];
+ ctx->comp_pred_diff = (int)comp_pred_diff[COMP_PREDICTION_ONLY];
+ ctx->hybrid_pred_diff = (int)comp_pred_diff[HYBRID_PREDICTION];
+
+ memcpy(ctx->txfm_rd_diff, txfm_size_diff, sizeof(ctx->txfm_rd_diff));
+}
+
+static void setup_pred_block(const MACROBLOCKD *xd,
+ struct buf_2d dst[MAX_MB_PLANE],
+ const YV12_BUFFER_CONFIG *src,
+ int mi_row, int mi_col,
+ const struct scale_factors *scale,
+ const struct scale_factors *scale_uv) {
+ int i;
+
+ dst[0].buf = src->y_buffer;
+ dst[0].stride = src->y_stride;
+ dst[1].buf = src->u_buffer;
+ dst[2].buf = src->v_buffer;
+ dst[1].stride = dst[2].stride = src->uv_stride;
+#if CONFIG_ALPHA
+ dst[3].buf = src->alpha_buffer;
+ dst[3].stride = src->alpha_stride;
+#endif
+
+ // TODO(jkoleszar): Make scale factors per-plane data
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ setup_pred_plane(dst + i, dst[i].buf, dst[i].stride, mi_row, mi_col,
+ i ? scale_uv : scale,
+ xd->plane[i].subsampling_x, xd->plane[i].subsampling_y);
+ }
+}
+
+static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x,
+ int idx, MV_REFERENCE_FRAME frame_type,
+ enum BlockSize block_size,
+ int mi_row, int mi_col,
+ int_mv frame_nearest_mv[MAX_REF_FRAMES],
+ int_mv frame_near_mv[MAX_REF_FRAMES],
+ struct buf_2d yv12_mb[4][MAX_MB_PLANE],
+ struct scale_factors scale[MAX_REF_FRAMES]) {
+ VP9_COMMON *cm = &cpi->common;
+ YV12_BUFFER_CONFIG *yv12 = &cm->yv12_fb[cpi->common.ref_frame_map[idx]];
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
+
+ // set up scaling factors
+ scale[frame_type] = cpi->common.active_ref_scale[frame_type - 1];
+
+ scale[frame_type].x_offset_q4 =
+ ROUND_POWER_OF_TWO(mi_col * MI_SIZE * scale[frame_type].x_scale_fp,
+ VP9_REF_SCALE_SHIFT) & 0xf;
+ scale[frame_type].y_offset_q4 =
+ ROUND_POWER_OF_TWO(mi_row * MI_SIZE * scale[frame_type].y_scale_fp,
+ VP9_REF_SCALE_SHIFT) & 0xf;
+
+ // TODO(jkoleszar): Is the UV buffer ever used here? If so, need to make this
+ // use the UV scaling factors.
+ setup_pred_block(xd, yv12_mb[frame_type], yv12, mi_row, mi_col,
+ &scale[frame_type], &scale[frame_type]);
+
+ // Gets an initial list of candidate vectors from neighbours and orders them
+ vp9_find_mv_refs(&cpi->common, xd, xd->mode_info_context,
+ xd->prev_mode_info_context,
+ frame_type,
+ mbmi->ref_mvs[frame_type],
+ cpi->common.ref_frame_sign_bias);
+
+ // Candidate refinement carried out at encoder and decoder
+ vp9_find_best_ref_mvs(xd,
+ mbmi->ref_mvs[frame_type],
+ &frame_nearest_mv[frame_type],
+ &frame_near_mv[frame_type]);
+
+ // Further refinement that is encode side only to test the top few candidates
+ // in full and choose the best as the centre point for subsequent searches.
+ // The current implementation doesn't support scaling.
+ if (scale[frame_type].x_scale_fp == (1 << VP9_REF_SCALE_SHIFT) &&
+ scale[frame_type].y_scale_fp == (1 << VP9_REF_SCALE_SHIFT))
+ mv_pred(cpi, x, yv12_mb[frame_type][0].buf, yv12->y_stride,
+ frame_type, block_size);
+}
+
+static YV12_BUFFER_CONFIG *get_scaled_ref_frame(VP9_COMP *cpi, int ref_frame) {
+ YV12_BUFFER_CONFIG *scaled_ref_frame = NULL;
+ int fb = get_ref_frame_idx(cpi, ref_frame);
+ if (cpi->scaled_ref_idx[fb] != cpi->common.ref_frame_map[fb])
+ scaled_ref_frame = &cpi->common.yv12_fb[cpi->scaled_ref_idx[fb]];
+ return scaled_ref_frame;
+}
+
+static void model_rd_from_var_lapndz(int var, int n, int qstep,
+ int *rate, int *dist) {
+ // This function models the rate and distortion for a Laplacian
+ // source with given variance when quantized with a uniform quantizer
+ // with given stepsize. The closed form expressions are in:
+ // Hang and Chen, "Source Model for transform video coder and its
+ // application - Part I: Fundamental Theory", IEEE Trans. Circ.
+ // Sys. for Video Tech., April 1997.
+ // The function is implemented as piecewise approximation to the
+ // exact computation.
+ // TODO(debargha): Implement the functions by interpolating from a
+ // look-up table
+ vp9_clear_system_state();
+ if (var == 0 || n == 0) {
+ *rate = 0;
+ *dist = 0;
+ } else {
+ double D, R;
+ double s2 = (double) var / n;
+ double s = sqrt(s2);
+ double x = qstep / s;
+ if (x > 1.0) {
+ double y = exp(-x / 2);
+ double y2 = y * y;
+ D = 2.069981728764738 * y2 - 2.764286806516079 * y + 1.003956960819275;
+ R = 0.924056758535089 * y2 + 2.738636469814024 * y - 0.005169662030017;
+ } else {
+ double x2 = x * x;
+ D = 0.075303187668830 * x2 + 0.004296954321112 * x - 0.000413209252807;
+ if (x > 0.125)
+ R = 1 / (-0.03459733614226 * x2 + 0.36561675733603 * x +
+ 0.1626989668625);
+ else
+ R = -1.442252874826093 * log(x) + 1.944647760719664;
+ }
+ if (R < 0) {
+ *rate = 0;
+ *dist = var;
+ } else {
+ *rate = (n * R * 256 + 0.5);
+ *dist = (n * D * s2 + 0.5);
+ }
+ }
+ vp9_clear_system_state();
+}
+
+static enum BlockSize get_plane_block_size(BLOCK_SIZE_TYPE bsize,
+ struct macroblockd_plane *pd) {
+ return get_block_size(plane_block_width(bsize, pd),
+ plane_block_height(bsize, pd));
+}
+
+static void model_rd_for_sb(VP9_COMP *cpi, BLOCK_SIZE_TYPE bsize,
+ MACROBLOCK *x, MACROBLOCKD *xd,
+ int *out_rate_sum, int *out_dist_sum) {
+ // Note our transform coeffs are 8 times an orthogonal transform.
+ // Hence quantizer step is also 8 times. To get effective quantizer
+ // we need to divide by 8 before sending to modeling function.
+ unsigned int sse, var;
+ int i, rate_sum = 0, dist_sum = 0;
+
+ for (i = 0; i < MAX_MB_PLANE; ++i) {
+ struct macroblock_plane *const p = &x->plane[i];
+ struct macroblockd_plane *const pd = &xd->plane[i];
+
+ // TODO(dkovalev) the same code in get_plane_block_size
+ const int bw = plane_block_width(bsize, pd);
+ const int bh = plane_block_height(bsize, pd);
+ const enum BlockSize bs = get_block_size(bw, bh);
+ int rate, dist;
+ var = cpi->fn_ptr[bs].vf(p->src.buf, p->src.stride,
+ pd->dst.buf, pd->dst.stride, &sse);
+ model_rd_from_var_lapndz(var, bw * bh, pd->dequant[1] >> 3, &rate, &dist);
+
+ rate_sum += rate;
+ dist_sum += dist;
+ }
+
+ *out_rate_sum = rate_sum;
+ *out_dist_sum = dist_sum;
+}
+
+static INLINE int get_switchable_rate(VP9_COMMON *cm, MACROBLOCK *x) {
+ MACROBLOCKD *xd = &x->e_mbd;
+ MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
+
+ const int c = vp9_get_pred_context(cm, xd, PRED_SWITCHABLE_INTERP);
+ const int m = vp9_switchable_interp_map[mbmi->interp_filter];
+ return SWITCHABLE_INTERP_RATE_FACTOR * x->switchable_interp_costs[c][m];
+}
+
+static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
+ BLOCK_SIZE_TYPE bsize,
+ int mi_row, int mi_col,
+ int_mv *tmp_mv, int *rate_mv) {
+ MACROBLOCKD *xd = &x->e_mbd;
+ MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
+ struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0}};
+ int bestsme = INT_MAX;
+ int further_steps, step_param = cpi->sf.first_step;
+ int sadpb = x->sadperbit16;
+ int_mv mvp_full;
+ int ref = mbmi->ref_frame[0];
+ int_mv ref_mv = mbmi->ref_mvs[ref][0];
+ int sr = 0;
+ const enum BlockSize block_size = get_plane_block_size(bsize, &xd->plane[0]);
+
+ int tmp_col_min = x->mv_col_min;
+ int tmp_col_max = x->mv_col_max;
+ int tmp_row_min = x->mv_row_min;
+ int tmp_row_max = x->mv_row_max;
+
+ YV12_BUFFER_CONFIG *scaled_ref_frame = get_scaled_ref_frame(cpi, ref);
+
+ if (scaled_ref_frame) {
+ int i;
+ // Swap out the reference frame for a version that's been scaled to
+ // match the resolution of the current frame, allowing the existing
+ // motion search code to be used without additional modifications.
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ backup_yv12[i] = xd->plane[i].pre[0];
+
+ setup_pre_planes(xd, scaled_ref_frame, NULL, mi_row, mi_col,
+ NULL, NULL);
+ }
+
+ vp9_clamp_mv_min_max(x, &ref_mv);
+
+ sr = vp9_init_search_range(cpi->common.width, cpi->common.height);
+
+ // mvp_full.as_int = ref_mv[0].as_int;
+ mvp_full.as_int =
+ mbmi->ref_mvs[ref][x->mv_best_ref_index[ref]].as_int;
+
+ mvp_full.as_mv.col >>= 3;
+ mvp_full.as_mv.row >>= 3;
+
+ // adjust search range according to sr from mv prediction
+ step_param = MAX(step_param, sr);
+
+ // Further step/diamond searches as necessary
+ further_steps = (cpi->sf.max_step_search_steps - 1) - step_param;
+
+ bestsme = vp9_full_pixel_diamond(cpi, x, &mvp_full, step_param,
+ sadpb, further_steps, 1,
+ &cpi->fn_ptr[block_size],
+ &ref_mv, tmp_mv);
+
+ x->mv_col_min = tmp_col_min;
+ x->mv_col_max = tmp_col_max;
+ x->mv_row_min = tmp_row_min;
+ x->mv_row_max = tmp_row_max;
+
+ if (bestsme < INT_MAX) {
+ int dis; /* TODO: use dis in distortion calculation later. */
+ unsigned int sse;
+ cpi->find_fractional_mv_step(x, tmp_mv, &ref_mv,
+ x->errorperbit,
+ &cpi->fn_ptr[block_size],
+ x->nmvjointcost, x->mvcost,
+ &dis, &sse);
+ }
+ *rate_mv = vp9_mv_bit_cost(tmp_mv, &ref_mv,
+ x->nmvjointcost, x->mvcost,
+ 96, xd->allow_high_precision_mv);
+ if (scaled_ref_frame) {
+ int i;
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ xd->plane[i].pre[0] = backup_yv12[i];
+ }
+}
+
+static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
+ BLOCK_SIZE_TYPE bsize,
+ int_mv *frame_mv,
+ int mi_row, int mi_col,
+ int_mv single_newmv[MAX_REF_FRAMES],
+ int *rate_mv) {
+ int pw = 4 << b_width_log2(bsize), ph = 4 << b_height_log2(bsize);
+ MACROBLOCKD *xd = &x->e_mbd;
+ MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
+ int refs[2] = { mbmi->ref_frame[0],
+ (mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]) };
+ int_mv ref_mv[2];
+ const enum BlockSize block_size = get_plane_block_size(bsize, &xd->plane[0]);
+ int ite;
+ // Prediction buffer from second frame.
+ uint8_t *second_pred = vpx_memalign(16, pw * ph * sizeof(uint8_t));
+
+ // Do joint motion search in compound mode to get more accurate mv.
+ struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0}};
+ struct buf_2d backup_second_yv12[MAX_MB_PLANE] = {{0}};
+ struct buf_2d scaled_first_yv12;
+ int last_besterr[2] = {INT_MAX, INT_MAX};
+ YV12_BUFFER_CONFIG *scaled_ref_frame[2] = {NULL, NULL};
+ scaled_ref_frame[0] = get_scaled_ref_frame(cpi, mbmi->ref_frame[0]);
+ scaled_ref_frame[1] = get_scaled_ref_frame(cpi, mbmi->ref_frame[1]);
+
+ ref_mv[0] = mbmi->ref_mvs[refs[0]][0];
+ ref_mv[1] = mbmi->ref_mvs[refs[1]][0];
+
+ if (scaled_ref_frame[0]) {
+ int i;
+ // Swap out the reference frame for a version that's been scaled to
+ // match the resolution of the current frame, allowing the existing
+ // motion search code to be used without additional modifications.
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ backup_yv12[i] = xd->plane[i].pre[0];
+ setup_pre_planes(xd, scaled_ref_frame[0], NULL, mi_row, mi_col,
+ NULL, NULL);
+ }
+
+ if (scaled_ref_frame[1]) {
+ int i;
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ backup_second_yv12[i] = xd->plane[i].pre[1];
+
+ setup_pre_planes(xd, scaled_ref_frame[1], NULL, mi_row, mi_col,
+ NULL, NULL);
+ }
+
+ xd->scale_factor[0].set_scaled_offsets(&xd->scale_factor[0],
+ mi_row, mi_col);
+ xd->scale_factor[1].set_scaled_offsets(&xd->scale_factor[1],
+ mi_row, mi_col);
+ scaled_first_yv12 = xd->plane[0].pre[0];
+
+ // Initialize mv using single prediction mode result.
+ frame_mv[refs[0]].as_int = single_newmv[refs[0]].as_int;
+ frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int;
+
+ // Allow joint search multiple times iteratively for each ref frame
+ // and break out the search loop if it couldn't find better mv.
+ for (ite = 0; ite < 4; ite++) {
+ struct buf_2d ref_yv12[2];
+ int bestsme = INT_MAX;
+ int sadpb = x->sadperbit16;
+ int_mv tmp_mv;
+ int search_range = 3;
+
+ int tmp_col_min = x->mv_col_min;
+ int tmp_col_max = x->mv_col_max;
+ int tmp_row_min = x->mv_row_min;
+ int tmp_row_max = x->mv_row_max;
+ int id = ite % 2;
+
+ // Initialized here because of compiler problem in Visual Studio.
+ ref_yv12[0] = xd->plane[0].pre[0];
+ ref_yv12[1] = xd->plane[0].pre[1];
+
+ // Get pred block from second frame.
+ vp9_build_inter_predictor(ref_yv12[!id].buf,
+ ref_yv12[!id].stride,
+ second_pred, pw,
+ &frame_mv[refs[!id]],
+ &xd->scale_factor[!id],
+ pw, ph, 0,
+ &xd->subpix);
+
+ // Compound motion search on first ref frame.
+ if (id)
+ xd->plane[0].pre[0] = ref_yv12[id];
+ vp9_clamp_mv_min_max(x, &ref_mv[id]);
+
+ // Use mv result from single mode as mvp.
+ tmp_mv.as_int = frame_mv[refs[id]].as_int;
+
+ tmp_mv.as_mv.col >>= 3;
+ tmp_mv.as_mv.row >>= 3;
+
+ // Small-range full-pixel motion search
+ bestsme = vp9_refining_search_8p_c(x, &tmp_mv, sadpb,
+ search_range,
+ &cpi->fn_ptr[block_size],
+ x->nmvjointcost, x->mvcost,
+ &ref_mv[id], second_pred,
+ pw, ph);
+
+ x->mv_col_min = tmp_col_min;
+ x->mv_col_max = tmp_col_max;
+ x->mv_row_min = tmp_row_min;
+ x->mv_row_max = tmp_row_max;
+
+ if (bestsme < INT_MAX) {
+ int dis; /* TODO: use dis in distortion calculation later. */
+ unsigned int sse;
+
+ bestsme = vp9_find_best_sub_pixel_comp(x, &tmp_mv,
+ &ref_mv[id],
+ x->errorperbit,
+ &cpi->fn_ptr[block_size],
+ x->nmvjointcost, x->mvcost,
+ &dis, &sse, second_pred,
+ pw, ph);
+ }
+
+ if (id)
+ xd->plane[0].pre[0] = scaled_first_yv12;
+
+ if (bestsme < last_besterr[id]) {
+ frame_mv[refs[id]].as_int = tmp_mv.as_int;
+ last_besterr[id] = bestsme;
+ } else {
+ break;
+ }
+ }
+
+ // restore the predictor
+ if (scaled_ref_frame[0]) {
+ int i;
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ xd->plane[i].pre[0] = backup_yv12[i];
+ }
+
+ if (scaled_ref_frame[1]) {
+ int i;
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ xd->plane[i].pre[1] = backup_second_yv12[i];
+ }
+ *rate_mv = vp9_mv_bit_cost(&frame_mv[refs[0]],
+ &mbmi->ref_mvs[refs[0]][0],
+ x->nmvjointcost, x->mvcost, 96,
+ x->e_mbd.allow_high_precision_mv);
+ *rate_mv += vp9_mv_bit_cost(&frame_mv[refs[1]],
+ &mbmi->ref_mvs[refs[1]][0],
+ x->nmvjointcost, x->mvcost, 96,
+ x->e_mbd.allow_high_precision_mv);
+
+ vpx_free(second_pred);
+}
+
+static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
+ BLOCK_SIZE_TYPE bsize,
+ int64_t txfm_cache[],
+ int *rate2, int *distortion, int *skippable,
+ int *rate_y, int *distortion_y,
+ int *rate_uv, int *distortion_uv,
+ int *mode_excluded, int *disable_skip,
+ INTERPOLATIONFILTERTYPE *best_filter,
+ int_mv *frame_mv,
+ int mi_row, int mi_col,
+ int_mv single_newmv[MAX_REF_FRAMES]) {
+ const int bw = 1 << mi_width_log2(bsize), bh = 1 << mi_height_log2(bsize);
+
+ VP9_COMMON *cm = &cpi->common;
+ MACROBLOCKD *xd = &x->e_mbd;
+ const enum BlockSize block_size = get_plane_block_size(bsize, &xd->plane[0]);
+ const enum BlockSize uv_block_size = get_plane_block_size(bsize,
+ &xd->plane[1]);
+ MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
+ const int is_comp_pred = (mbmi->ref_frame[1] > 0);
+ const int num_refs = is_comp_pred ? 2 : 1;
+ const int this_mode = mbmi->mode;
+ int i;
+ int refs[2] = { mbmi->ref_frame[0],
+ (mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]) };
+ int_mv cur_mv[2];
+ int64_t this_rd = 0;
+ unsigned char tmp_buf[MAX_MB_PLANE][64 * 64];
+ int pred_exists = 0;
+ int interpolating_intpel_seen = 0;
+ int intpel_mv;
+ int64_t rd, best_rd = INT64_MAX;
+
+ switch (this_mode) {
+ int rate_mv;
+ case NEWMV:
+ if (is_comp_pred) {
+ // Initialize mv using single prediction mode result.
+ frame_mv[refs[0]].as_int = single_newmv[refs[0]].as_int;
+ frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int;
+
+ if (cpi->sf.comp_inter_joint_search_thresh < bsize) {
+ joint_motion_search(cpi, x, bsize, frame_mv,
+ mi_row, mi_col, single_newmv, &rate_mv);
+ } else {
+ rate_mv = vp9_mv_bit_cost(&frame_mv[refs[0]],
+ &mbmi->ref_mvs[refs[0]][0],
+ x->nmvjointcost, x->mvcost, 96,
+ x->e_mbd.allow_high_precision_mv);
+ rate_mv += vp9_mv_bit_cost(&frame_mv[refs[1]],
+ &mbmi->ref_mvs[refs[1]][0],
+ x->nmvjointcost, x->mvcost, 96,
+ x->e_mbd.allow_high_precision_mv);
+ }
+ if (frame_mv[refs[0]].as_int == INVALID_MV ||
+ frame_mv[refs[1]].as_int == INVALID_MV)
+ return INT64_MAX;
+ *rate2 += rate_mv;
+
+ } else {
+ int_mv tmp_mv;
+ single_motion_search(cpi, x, bsize, mi_row, mi_col,
+ &tmp_mv, &rate_mv);
+ *rate2 += rate_mv;
+ frame_mv[refs[0]].as_int =
+ xd->mode_info_context->bmi[0].as_mv[0].as_int = tmp_mv.as_int;
+ single_newmv[refs[0]].as_int = tmp_mv.as_int;
+ }
+ break;
+ case NEARMV:
+ case NEARESTMV:
+ case ZEROMV:
+ default:
+ break;
+ }
+ for (i = 0; i < num_refs; ++i) {
+ cur_mv[i] = frame_mv[refs[i]];
+ // Clip "next_nearest" so that it does not extend to far out of image
+ if (this_mode == NEWMV)
+ assert(!clamp_mv2(&cur_mv[i], xd));
+ else
+ clamp_mv2(&cur_mv[i], xd);
+
+ if (mv_check_bounds(x, &cur_mv[i]))
+ return INT64_MAX;
+ mbmi->mv[i].as_int = cur_mv[i].as_int;
+ }
+
+ /* We don't include the cost of the second reference here, because there
+ * are only three options: Last/Golden, ARF/Last or Golden/ARF, or in other
+ * words if you present them in that order, the second one is always known
+ * if the first is known */
+ *rate2 += vp9_cost_mv_ref(cpi, this_mode,
+ mbmi->mb_mode_context[mbmi->ref_frame[0]]);
+
+ pred_exists = 0;
+ interpolating_intpel_seen = 0;
+ // Are all MVs integer pel for Y and UV
+ intpel_mv = (mbmi->mv[0].as_mv.row & 15) == 0 &&
+ (mbmi->mv[0].as_mv.col & 15) == 0;
+ if (is_comp_pred)
+ intpel_mv &= (mbmi->mv[1].as_mv.row & 15) == 0 &&
+ (mbmi->mv[1].as_mv.col & 15) == 0;
+ // Search for best switchable filter by checking the variance of
+ // pred error irrespective of whether the filter will be used
+ if (cpi->speed > 4) {
+ *best_filter = EIGHTTAP;
+ } else {
+ int i, newbest;
+ int tmp_rate_sum = 0, tmp_dist_sum = 0;
+ for (i = 0; i < VP9_SWITCHABLE_FILTERS; ++i) {
+ int rs = 0;
+ const INTERPOLATIONFILTERTYPE filter = vp9_switchable_interp[i];
+ const int is_intpel_interp = intpel_mv &&
+ vp9_is_interpolating_filter[filter];
+ mbmi->interp_filter = filter;
+ vp9_setup_interp_filters(xd, mbmi->interp_filter, cm);
+
+ if (cm->mcomp_filter_type == SWITCHABLE)
+ rs = get_switchable_rate(cm, x);
+
+ if (interpolating_intpel_seen && is_intpel_interp) {
+ rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate_sum, tmp_dist_sum);
+ } else {
+ int rate_sum = 0, dist_sum = 0;
+ vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+ model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum);
+ rd = RDCOST(x->rdmult, x->rddiv, rs + rate_sum, dist_sum);
+ if (!interpolating_intpel_seen && is_intpel_interp) {
+ tmp_rate_sum = rate_sum;
+ tmp_dist_sum = dist_sum;
+ }
+ }
+ newbest = i == 0 || rd < best_rd;
+
+ if (newbest) {
+ best_rd = rd;
+ *best_filter = mbmi->interp_filter;
+ }
+
+ if ((cm->mcomp_filter_type == SWITCHABLE && newbest) ||
+ (cm->mcomp_filter_type != SWITCHABLE &&
+ cm->mcomp_filter_type == mbmi->interp_filter)) {
+ int p;
+
+ for (p = 0; p < MAX_MB_PLANE; p++) {
+ const int y = (MI_SIZE * bh) >> xd->plane[p].subsampling_y;
+ const int x = (MI_SIZE * bw) >> xd->plane[p].subsampling_x;
+ int i;
+
+ for (i = 0; i < y; i++)
+ vpx_memcpy(&tmp_buf[p][64 * i],
+ xd->plane[p].dst.buf + i * xd->plane[p].dst.stride, x);
+ }
+ pred_exists = 1;
+ }
+ interpolating_intpel_seen |= is_intpel_interp;
+ }
+ }
+
+ // Set the appripriate filter
+ mbmi->interp_filter = cm->mcomp_filter_type != SWITCHABLE ?
+ cm->mcomp_filter_type : *best_filter;
+ vp9_setup_interp_filters(xd, mbmi->interp_filter, cm);
+
+
+ if (pred_exists) {
+ int p;
+
+ for (p = 0; p < MAX_MB_PLANE; p++) {
+ const int y = (MI_SIZE * bh) >> xd->plane[p].subsampling_y;
+ const int x = (MI_SIZE * bw) >> xd->plane[p].subsampling_x;
+ int i;
+
+ for (i = 0; i < y; i++)
+ vpx_memcpy(xd->plane[p].dst.buf + i * xd->plane[p].dst.stride,
+ &tmp_buf[p][64 * i], x);
+ }
+ } else {
+ // Handles the special case when a filter that is not in the
+ // switchable list (ex. bilinear, 6-tap) is indicated at the frame level
+ vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+ }
+
+ if (cpi->common.mcomp_filter_type == SWITCHABLE)
+ *rate2 += get_switchable_rate(cm, x);
+
+ if (cpi->active_map_enabled && x->active_ptr[0] == 0)
+ x->skip = 1;
+ else if (x->encode_breakout) {
+ unsigned int var, sse;
+ int threshold = (xd->plane[0].dequant[1]
+ * xd->plane[0].dequant[1] >> 4);
+
+ if (threshold < x->encode_breakout)
+ threshold = x->encode_breakout;
+
+ var = cpi->fn_ptr[block_size].vf(x->plane[0].src.buf,
+ x->plane[0].src.stride,
+ xd->plane[0].dst.buf,
+ xd->plane[0].dst.stride,
+ &sse);
+
+ if ((int)sse < threshold) {
+ unsigned int q2dc = xd->plane[0].dequant[0];
+ /* If there is no codeable 2nd order dc
+ or a very small uniform pixel change change */
+ if ((sse - var < q2dc * q2dc >> 4) ||
+ (sse / 2 > var && sse - var < 64)) {
+ // Check u and v to make sure skip is ok
+ int sse2;
+ unsigned int sse2u, sse2v;
+ var = cpi->fn_ptr[uv_block_size].vf(x->plane[1].src.buf,
+ x->plane[1].src.stride,
+ xd->plane[1].dst.buf,
+ xd->plane[1].dst.stride, &sse2u);
+ var = cpi->fn_ptr[uv_block_size].vf(x->plane[2].src.buf,
+ x->plane[1].src.stride,
+ xd->plane[2].dst.buf,
+ xd->plane[1].dst.stride, &sse2v);
+ sse2 = sse2u + sse2v;
+
+ if (sse2 * 2 < threshold) {
+ x->skip = 1;
+ *distortion = sse + sse2;
+ *rate2 = 500;
+
+ /* for best_yrd calculation */
+ *rate_uv = 0;
+ *distortion_uv = sse2;
+
+ *disable_skip = 1;
+ this_rd = RDCOST(x->rdmult, x->rddiv, *rate2, *distortion);
+ }
+ }
+ }
+ }
+
+ if (!x->skip) {
+ int skippable_y, skippable_uv;
+
+ // Y cost and distortion
+ super_block_yrd(cpi, x, rate_y, distortion_y, &skippable_y,
+ bsize, txfm_cache);
+
+ *rate2 += *rate_y;
+ *distortion += *distortion_y;
+
+ super_block_uvrd(cm, x, rate_uv, distortion_uv,
+ &skippable_uv, bsize);
+
+ *rate2 += *rate_uv;
+ *distortion += *distortion_uv;
+ *skippable = skippable_y && skippable_uv;
+ }
+
+ if (!(*mode_excluded)) {
+ if (is_comp_pred) {
+ *mode_excluded = (cpi->common.comp_pred_mode == SINGLE_PREDICTION_ONLY);
+ } else {
+ *mode_excluded = (cpi->common.comp_pred_mode == COMP_PREDICTION_ONLY);
+ }
+ }
+
+ return this_rd; // if 0, this will be re-calculated by caller
+}
+
+void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
+ int *returnrate, int *returndist,
+ BLOCK_SIZE_TYPE bsize,
+ PICK_MODE_CONTEXT *ctx) {
+ VP9_COMMON *cm = &cpi->common;
+ MACROBLOCKD *xd = &x->e_mbd;
+ int rate_y = 0, rate_uv;
+ int rate_y_tokenonly = 0, rate_uv_tokenonly;
+ int dist_y = 0, dist_uv;
+ int y_skip = 0, uv_skip;
+ int64_t txfm_cache[NB_TXFM_MODES], err;
+ MB_PREDICTION_MODE mode;
+ TX_SIZE txfm_size;
+ int rate4x4_y, rate4x4_y_tokenonly, dist4x4_y;
+ int64_t err4x4 = INT64_MAX;
+ int i;
+
+ vpx_memset(&txfm_cache,0,sizeof(txfm_cache));
+ ctx->skip = 0;
+ xd->mode_info_context->mbmi.mode = DC_PRED;
+ xd->mode_info_context->mbmi.ref_frame[0] = INTRA_FRAME;
+ err = rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly,
+ &dist_y, &y_skip, bsize, txfm_cache);
+ mode = xd->mode_info_context->mbmi.mode;
+ txfm_size = xd->mode_info_context->mbmi.txfm_size;
+ rd_pick_intra_sbuv_mode(cpi, x, &rate_uv, &rate_uv_tokenonly,
+ &dist_uv, &uv_skip,
+ (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 :
+ bsize);
+ if (bsize < BLOCK_SIZE_SB8X8)
+ err4x4 = rd_pick_intra4x4mby_modes(cpi, x, &rate4x4_y,
+ &rate4x4_y_tokenonly,
+ &dist4x4_y, err);
+
+ if (y_skip && uv_skip) {
+ *returnrate = rate_y + rate_uv - rate_y_tokenonly - rate_uv_tokenonly +
+ vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_MBSKIP), 1);
+ *returndist = dist_y + (dist_uv >> 2);
+ memset(ctx->txfm_rd_diff, 0, sizeof(ctx->txfm_rd_diff));
+ xd->mode_info_context->mbmi.mode = mode;
+ xd->mode_info_context->mbmi.txfm_size = txfm_size;
+ } else if (bsize < BLOCK_SIZE_SB8X8 && err4x4 < err) {
+ *returnrate = rate4x4_y + rate_uv +
+ vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_MBSKIP), 0);
+ *returndist = dist4x4_y + (dist_uv >> 2);
+ vpx_memset(ctx->txfm_rd_diff, 0, sizeof(ctx->txfm_rd_diff));
+ xd->mode_info_context->mbmi.txfm_size = TX_4X4;
+ } else {
+ *returnrate = rate_y + rate_uv +
+ vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_MBSKIP), 0);
+ *returndist = dist_y + (dist_uv >> 2);
+ for (i = 0; i < NB_TXFM_MODES; i++) {
+ ctx->txfm_rd_diff[i] = txfm_cache[i] - txfm_cache[cm->txfm_mode];
+ }
+ xd->mode_info_context->mbmi.txfm_size = txfm_size;
+ xd->mode_info_context->mbmi.mode = mode;
+ }
+
+ ctx->mic = *xd->mode_info_context;
+}
+
+int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
+ int mi_row, int mi_col,
+ int *returnrate,
+ int *returndistortion,
+ BLOCK_SIZE_TYPE bsize,
+ PICK_MODE_CONTEXT *ctx) {
+ VP9_COMMON *cm = &cpi->common;
+ MACROBLOCKD *xd = &x->e_mbd;
+ MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
+ const enum BlockSize block_size = get_plane_block_size(bsize, &xd->plane[0]);
+ MB_PREDICTION_MODE this_mode;
+ MB_PREDICTION_MODE best_mode = DC_PRED;
+ MV_REFERENCE_FRAME ref_frame;
+ unsigned char segment_id = xd->mode_info_context->mbmi.segment_id;
+ int comp_pred, i;
+ int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
+ struct buf_2d yv12_mb[4][MAX_MB_PLANE];
+ int_mv single_newmv[MAX_REF_FRAMES];
+ static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
+ VP9_ALT_FLAG };
+ int idx_list[4] = {0,
+ cpi->lst_fb_idx,
+ cpi->gld_fb_idx,
+ cpi->alt_fb_idx};
+ int64_t best_rd = INT64_MAX;
+ int64_t best_txfm_rd[NB_TXFM_MODES];
+ int64_t best_txfm_diff[NB_TXFM_MODES];
+ int64_t best_pred_diff[NB_PREDICTION_TYPES];
+ int64_t best_pred_rd[NB_PREDICTION_TYPES];
+ MB_MODE_INFO best_mbmode;
+ int j;
+ int mode_index, best_mode_index = 0;
+ unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
+ vp9_prob comp_mode_p;
+ int64_t best_overall_rd = INT64_MAX;
+ INTERPOLATIONFILTERTYPE best_filter = SWITCHABLE;
+ INTERPOLATIONFILTERTYPE tmp_best_filter = SWITCHABLE;
+ int rate_uv_intra[TX_SIZE_MAX_SB], rate_uv_tokenonly[TX_SIZE_MAX_SB];
+ int dist_uv[TX_SIZE_MAX_SB], skip_uv[TX_SIZE_MAX_SB];
+ MB_PREDICTION_MODE mode_uv[TX_SIZE_MAX_SB];
+ struct scale_factors scale_factor[4];
+ unsigned int ref_frame_mask = 0;
+ unsigned int mode_mask = 0;
+ int64_t mode_distortions[MB_MODE_COUNT] = {-1};
+ int64_t frame_distortions[MAX_REF_FRAMES] = {-1};
+ int intra_cost_penalty = 20 * vp9_dc_quant(cpi->common.base_qindex,
+ cpi->common.y_dc_delta_q);
+ int_mv seg_mvs[4][MAX_REF_FRAMES];
+ union b_mode_info best_bmodes[4];
+ PARTITION_INFO best_partition;
+ int bwsl = b_width_log2(bsize);
+ int bws = (1 << bwsl) / 4; // mode_info step for subsize
+ int bhsl = b_height_log2(bsize);
+ int bhs = (1 << bhsl) / 4; // mode_info step for subsize
+
+ for (i = 0; i < 4; i++) {
+ int j;
+
+ for (j = 0; j < MAX_REF_FRAMES; j++)
+ seg_mvs[i][j].as_int = INVALID_MV;
+ }
+ // Everywhere the flag is set the error is much higher than its neighbors.
+ ctx->frames_with_high_error = 0;
+ ctx->modes_with_high_error = 0;
+
+ xd->mode_info_context->mbmi.segment_id = segment_id;
+ estimate_ref_frame_costs(cpi, segment_id, ref_costs_single, ref_costs_comp,
+ &comp_mode_p);
+ vpx_memset(&best_mbmode, 0, sizeof(best_mbmode));
+ vpx_memset(&single_newmv, 0, sizeof(single_newmv));
+
+ for (i = 0; i < NB_PREDICTION_TYPES; ++i)
+ best_pred_rd[i] = INT64_MAX;
+ for (i = 0; i < NB_TXFM_MODES; i++)
+ best_txfm_rd[i] = INT64_MAX;
+
+ // Create a mask set to 1 for each frame used by a smaller resolution.
+ if (cpi->speed > 0) {
+ switch (block_size) {
+ case BLOCK_64X64:
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 4; j++) {
+ ref_frame_mask |= x->mb_context[i][j].frames_with_high_error;
+ mode_mask |= x->mb_context[i][j].modes_with_high_error;
+ }
+ }
+ for (i = 0; i < 4; i++) {
+ ref_frame_mask |= x->sb32_context[i].frames_with_high_error;
+ mode_mask |= x->sb32_context[i].modes_with_high_error;
+ }
+ break;
+ case BLOCK_32X32:
+ for (i = 0; i < 4; i++) {
+ ref_frame_mask |=
+ x->mb_context[xd->sb_index][i].frames_with_high_error;
+ mode_mask |= x->mb_context[xd->sb_index][i].modes_with_high_error;
+ }
+ break;
+ default:
+ // Until we handle all block sizes set it to present;
+ ref_frame_mask = 0;
+ mode_mask = 0;
+ break;
+ }
+ ref_frame_mask = ~ref_frame_mask;
+ mode_mask = ~mode_mask;
+ }
+
+ for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) {
+ if (cpi->ref_frame_flags & flag_list[ref_frame]) {
+ setup_buffer_inter(cpi, x, idx_list[ref_frame], ref_frame, block_size,
+ mi_row, mi_col, frame_mv[NEARESTMV], frame_mv[NEARMV],
+ yv12_mb, scale_factor);
+ }
+ frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
+ frame_mv[ZEROMV][ref_frame].as_int = 0;
+ }
+ if (cpi->speed == 0
+ || (cpi->speed > 0 && (ref_frame_mask & (1 << INTRA_FRAME)))) {
+ mbmi->mode = DC_PRED;
+ mbmi->ref_frame[0] = INTRA_FRAME;
+ for (i = 0; i <= (bsize < BLOCK_SIZE_MB16X16 ? TX_4X4 :
+ (bsize < BLOCK_SIZE_SB32X32 ? TX_8X8 :
+ (bsize < BLOCK_SIZE_SB64X64 ? TX_16X16 : TX_32X32)));
+ i++) {
+ mbmi->txfm_size = i;
+ rd_pick_intra_sbuv_mode(cpi, x, &rate_uv_intra[i], &rate_uv_tokenonly[i],
+ &dist_uv[i], &skip_uv[i],
+ (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 :
+ bsize);
+ mode_uv[i] = mbmi->uv_mode;
+ }
+ }
+
+ for (mode_index = 0; mode_index < MAX_MODES; ++mode_index) {
+ int mode_excluded = 0;
+ int64_t this_rd = INT64_MAX;
+ int disable_skip = 0;
+ int compmode_cost = 0;
+ int rate2 = 0, rate_y = 0, rate_uv = 0;
+ int distortion2 = 0, distortion_y = 0, distortion_uv = 0;
+ int skippable;
+ int64_t txfm_cache[NB_TXFM_MODES];
+ int i;
+
+ for (i = 0; i < NB_TXFM_MODES; ++i)
+ txfm_cache[i] = INT64_MAX;
+
+ // Test best rd so far against threshold for trying this mode.
+ if ((best_rd < ((cpi->rd_threshes[bsize][mode_index] *
+ cpi->rd_thresh_freq_fact[bsize][mode_index]) >> 4)) ||
+ cpi->rd_threshes[bsize][mode_index] == INT_MAX)
+ continue;
+
+ // Do not allow compound prediction if the segment level reference
+ // frame feature is in use as in this case there can only be one reference.
+ if ((vp9_mode_order[mode_index].second_ref_frame > INTRA_FRAME) &&
+ vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME))
+ continue;
+
+ x->skip = 0;
+ this_mode = vp9_mode_order[mode_index].mode;
+ ref_frame = vp9_mode_order[mode_index].ref_frame;
+
+ if (cpi->speed > 0 && bsize >= BLOCK_SIZE_SB8X8) {
+ if (!(ref_frame_mask & (1 << ref_frame))) {
+ continue;
+ }
+ if (!(mode_mask & (1 << this_mode))) {
+ continue;
+ }
+ if (vp9_mode_order[mode_index].second_ref_frame != NONE
+ && !(ref_frame_mask
+ & (1 << vp9_mode_order[mode_index].second_ref_frame))) {
+ continue;
+ }
+ }
+
+ mbmi->ref_frame[0] = ref_frame;
+ mbmi->ref_frame[1] = vp9_mode_order[mode_index].second_ref_frame;
+
+ if (!(ref_frame == INTRA_FRAME
+ || (cpi->ref_frame_flags & flag_list[ref_frame]))) {
+ continue;
+ }
+ if (!(mbmi->ref_frame[1] == NONE
+ || (cpi->ref_frame_flags & flag_list[mbmi->ref_frame[1]]))) {
+ continue;
+ }
+
+ // TODO(jingning, jkoleszar): scaling reference frame not supported for
+ // SPLITMV.
+ if (mbmi->ref_frame[0] > 0 &&
+ (scale_factor[mbmi->ref_frame[0]].x_scale_fp !=
+ (1 << VP9_REF_SCALE_SHIFT) ||
+ scale_factor[mbmi->ref_frame[0]].y_scale_fp !=
+ (1 << VP9_REF_SCALE_SHIFT)) &&
+ this_mode == SPLITMV)
+ continue;
+
+ if (mbmi->ref_frame[1] > 0 &&
+ (scale_factor[mbmi->ref_frame[1]].x_scale_fp !=
+ (1 << VP9_REF_SCALE_SHIFT) ||
+ scale_factor[mbmi->ref_frame[1]].y_scale_fp !=
+ (1 << VP9_REF_SCALE_SHIFT)) &&
+ this_mode == SPLITMV)
+ continue;
+
+ set_scale_factors(xd, mbmi->ref_frame[0], mbmi->ref_frame[1],
+ scale_factor);
+ comp_pred = mbmi->ref_frame[1] > INTRA_FRAME;
+ mbmi->mode = this_mode;
+ mbmi->uv_mode = DC_PRED;
+
+ // Evaluate all sub-pel filters irrespective of whether we can use
+ // them for this frame.
+ mbmi->interp_filter = cm->mcomp_filter_type;
+ vp9_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common);
+
+ if (bsize >= BLOCK_SIZE_SB8X8 &&
+ (this_mode == I4X4_PRED || this_mode == SPLITMV))
+ continue;
+ if (bsize < BLOCK_SIZE_SB8X8 &&
+ !(this_mode == I4X4_PRED || this_mode == SPLITMV))
+ continue;
+
+ if (comp_pred) {
+ if (!(cpi->ref_frame_flags & flag_list[mbmi->ref_frame[1]]))
+ continue;
+ set_scale_factors(xd, mbmi->ref_frame[0], mbmi->ref_frame[1],
+ scale_factor);
+
+ mode_excluded =
+ mode_excluded ?
+ mode_excluded : cm->comp_pred_mode == SINGLE_PREDICTION_ONLY;
+ } else {
+ // mbmi->ref_frame[1] = vp9_mode_order[mode_index].ref_frame[1];
+ if (ref_frame != INTRA_FRAME) {
+ if (mbmi->ref_frame[1] != INTRA_FRAME)
+ mode_excluded =
+ mode_excluded ?
+ mode_excluded : cm->comp_pred_mode == COMP_PREDICTION_ONLY;
+ }
+ }
+
+ // Select predictors
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
+ if (comp_pred)
+ xd->plane[i].pre[1] = yv12_mb[mbmi->ref_frame[1]][i];
+ }
+
+ // If the segment reference frame feature is enabled....
+ // then do nothing if the current ref frame is not allowed..
+ if (vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME) &&
+ vp9_get_segdata(xd, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) {
+ continue;
+ // If the segment skip feature is enabled....
+ // then do nothing if the current mode is not allowed..
+ } else if (vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP) &&
+ (this_mode != ZEROMV && ref_frame != INTRA_FRAME)) {
+ continue;
+ // Disable this drop out case if the ref frame
+ // segment level feature is enabled for this segment. This is to
+ // prevent the possibility that we end up unable to pick any mode.
+ } else if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME)) {
+ // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
+ // unless ARNR filtering is enabled in which case we want
+ // an unfiltered alternative
+ if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
+ if (this_mode != ZEROMV || ref_frame != ALTREF_FRAME) {
+ continue;
+ }
+ }
+ }
+ // TODO(JBB): This is to make up for the fact that we don't have sad
+ // functions that work when the block size reads outside the umv. We
+ // should fix this either by making the motion search just work on
+ // a representative block in the boundary ( first ) and then implement a
+ // function that does sads when inside the border..
+ if (((mi_row + bhs) > cm->mi_rows || (mi_col + bws) > cm->mi_cols) &&
+ this_mode == NEWMV) {
+ continue;
+ }
+
+ if (this_mode == I4X4_PRED) {
+ int rate;
+
+ mbmi->txfm_size = TX_4X4;
+ rd_pick_intra4x4mby_modes(cpi, x, &rate, &rate_y,
+ &distortion_y, INT64_MAX);
+ rate2 += rate;
+ rate2 += intra_cost_penalty;
+ distortion2 += distortion_y;
+
+ rate2 += rate_uv_intra[TX_4X4];
+ rate_uv = rate_uv_intra[TX_4X4];
+ distortion2 += dist_uv[TX_4X4];
+ distortion_uv = dist_uv[TX_4X4];
+ mbmi->uv_mode = mode_uv[TX_4X4];
+ txfm_cache[ONLY_4X4] = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
+ for (i = 0; i < NB_TXFM_MODES; ++i)
+ txfm_cache[i] = txfm_cache[ONLY_4X4];
+ } else if (ref_frame == INTRA_FRAME) {
+ TX_SIZE uv_tx;
+ super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable,
+ bsize, txfm_cache);
+
+ uv_tx = mbmi->txfm_size;
+ if (bsize < BLOCK_SIZE_MB16X16 && uv_tx == TX_8X8)
+ uv_tx = TX_4X4;
+ if (bsize < BLOCK_SIZE_SB32X32 && uv_tx == TX_16X16)
+ uv_tx = TX_8X8;
+ else if (bsize < BLOCK_SIZE_SB64X64 && uv_tx == TX_32X32)
+ uv_tx = TX_16X16;
+
+ rate_uv = rate_uv_intra[uv_tx];
+ distortion_uv = dist_uv[uv_tx];
+ skippable = skippable && skip_uv[uv_tx];
+ mbmi->uv_mode = mode_uv[uv_tx];
+
+ rate2 = rate_y + x->mbmode_cost[mbmi->mode] + rate_uv;
+ if (mbmi->mode != DC_PRED && mbmi->mode != TM_PRED)
+ rate2 += intra_cost_penalty;
+ distortion2 = distortion_y + distortion_uv;
+ } else if (this_mode == SPLITMV) {
+ const int is_comp_pred = mbmi->ref_frame[1] > 0;
+ int rate, distortion;
+ int64_t this_rd_thresh;
+ int64_t tmp_rd, tmp_best_rd = INT64_MAX, tmp_best_rdu = INT64_MAX;
+ int tmp_best_rate = INT_MAX, tmp_best_ratey = INT_MAX;
+ int tmp_best_distortion = INT_MAX, tmp_best_skippable = 0;
+ int switchable_filter_index;
+ int_mv *second_ref = is_comp_pred ?
+ &mbmi->ref_mvs[mbmi->ref_frame[1]][0] : NULL;
+ union b_mode_info tmp_best_bmodes[16];
+ MB_MODE_INFO tmp_best_mbmode;
+ PARTITION_INFO tmp_best_partition;
+ int pred_exists = 0;
+ int uv_skippable;
+
+ this_rd_thresh = (mbmi->ref_frame[0] == LAST_FRAME) ?
+ cpi->rd_threshes[bsize][THR_NEWMV] :
+ cpi->rd_threshes[bsize][THR_NEWA];
+ this_rd_thresh = (mbmi->ref_frame[0] == GOLDEN_FRAME) ?
+ cpi->rd_threshes[bsize][THR_NEWG] : this_rd_thresh;
+ xd->mode_info_context->mbmi.txfm_size = TX_4X4;
+
+ for (switchable_filter_index = 0;
+ switchable_filter_index < VP9_SWITCHABLE_FILTERS;
+ ++switchable_filter_index) {
+ int newbest;
+ mbmi->interp_filter =
+ vp9_switchable_interp[switchable_filter_index];
+ vp9_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common);
+
+ tmp_rd = rd_pick_best_mbsegmentation(cpi, x,
+ &mbmi->ref_mvs[mbmi->ref_frame[0]][0],
+ second_ref, INT64_MAX,
+ &rate, &rate_y, &distortion,
+ &skippable,
+ (int)this_rd_thresh, seg_mvs,
+ mi_row, mi_col);
+ if (cpi->common.mcomp_filter_type == SWITCHABLE) {
+ const int rs = get_switchable_rate(cm, x);
+ tmp_rd += RDCOST(x->rdmult, x->rddiv, rs, 0);
+ }
+ newbest = (tmp_rd < tmp_best_rd);
+ if (newbest) {
+ tmp_best_filter = mbmi->interp_filter;
+ tmp_best_rd = tmp_rd;
+ }
+ if ((newbest && cm->mcomp_filter_type == SWITCHABLE) ||
+ (mbmi->interp_filter == cm->mcomp_filter_type &&
+ cm->mcomp_filter_type != SWITCHABLE)) {
+ tmp_best_rdu = tmp_rd;
+ tmp_best_rate = rate;
+ tmp_best_ratey = rate_y;
+ tmp_best_distortion = distortion;
+ tmp_best_skippable = skippable;
+ tmp_best_mbmode = *mbmi;
+ tmp_best_partition = *x->partition_info;
+ for (i = 0; i < 4; i++)
+ tmp_best_bmodes[i] = xd->mode_info_context->bmi[i];
+ pred_exists = 1;
+ }
+ } // switchable_filter_index loop
+
+ mbmi->interp_filter = (cm->mcomp_filter_type == SWITCHABLE ?
+ tmp_best_filter : cm->mcomp_filter_type);
+ vp9_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common);
+ if (!pred_exists) {
+ // Handles the special case when a filter that is not in the
+ // switchable list (bilinear, 6-tap) is indicated at the frame level
+ tmp_rd = rd_pick_best_mbsegmentation(cpi, x,
+ &mbmi->ref_mvs[mbmi->ref_frame[0]][0],
+ second_ref, INT64_MAX,
+ &rate, &rate_y, &distortion,
+ &skippable,
+ (int)this_rd_thresh, seg_mvs,
+ mi_row, mi_col);
+ } else {
+ if (cpi->common.mcomp_filter_type == SWITCHABLE) {
+ int rs = get_switchable_rate(cm, x);
+ tmp_best_rdu -= RDCOST(x->rdmult, x->rddiv, rs, 0);
+ }
+ tmp_rd = tmp_best_rdu;
+ rate = tmp_best_rate;
+ rate_y = tmp_best_ratey;
+ distortion = tmp_best_distortion;
+ skippable = tmp_best_skippable;
+ *mbmi = tmp_best_mbmode;
+ *x->partition_info = tmp_best_partition;
+ for (i = 0; i < 4; i++)
+ xd->mode_info_context->bmi[i] = tmp_best_bmodes[i];
+ }
+
+ rate2 += rate;
+ distortion2 += distortion;
+
+ if (cpi->common.mcomp_filter_type == SWITCHABLE)
+ rate2 += get_switchable_rate(cm, x);
+
+ // If even the 'Y' rd value of split is higher than best so far
+ // then dont bother looking at UV
+ vp9_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col,
+ BLOCK_SIZE_SB8X8);
+ vp9_subtract_sbuv(x, BLOCK_SIZE_SB8X8);
+ super_block_uvrd_for_txfm(cm, x, &rate_uv, &distortion_uv,
+ &uv_skippable, BLOCK_SIZE_SB8X8, TX_4X4);
+ rate2 += rate_uv;
+ distortion2 += distortion_uv;
+ skippable = skippable && uv_skippable;
+
+ txfm_cache[ONLY_4X4] = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
+ for (i = 0; i < NB_TXFM_MODES; ++i)
+ txfm_cache[i] = txfm_cache[ONLY_4X4];
+
+ if (!mode_excluded) {
+ if (is_comp_pred)
+ mode_excluded = cpi->common.comp_pred_mode == SINGLE_PREDICTION_ONLY;
+ else
+ mode_excluded = cpi->common.comp_pred_mode == COMP_PREDICTION_ONLY;
+ }
+
+ compmode_cost = vp9_cost_bit(comp_mode_p, is_comp_pred);
+ } else {
+ compmode_cost = vp9_cost_bit(comp_mode_p,
+ mbmi->ref_frame[1] > INTRA_FRAME);
+ this_rd = handle_inter_mode(cpi, x, bsize,
+ txfm_cache,
+ &rate2, &distortion2, &skippable,
+ &rate_y, &distortion_y,
+ &rate_uv, &distortion_uv,
+ &mode_excluded, &disable_skip,
+ &tmp_best_filter, frame_mv[this_mode],
+ mi_row, mi_col,
+ single_newmv);
+ if (this_rd == INT64_MAX)
+ continue;
+ }
+
+ if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
+ rate2 += compmode_cost;
+ }
+
+ // Estimate the reference frame signaling cost and add it
+ // to the rolling cost variable.
+ if (mbmi->ref_frame[1] > INTRA_FRAME) {
+ rate2 += ref_costs_comp[mbmi->ref_frame[0]];
+ } else {
+ rate2 += ref_costs_single[mbmi->ref_frame[0]];
+ }
+
+ if (!disable_skip) {
+ // Test for the condition where skip block will be activated
+ // because there are no non zero coefficients and make any
+ // necessary adjustment for rate. Ignore if skip is coded at
+ // segment level as the cost wont have been added in.
+ int mb_skip_allowed;
+
+ // Is Mb level skip allowed (i.e. not coded at segment level).
+ mb_skip_allowed = !vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP);
+
+ if (skippable && bsize >= BLOCK_SIZE_SB8X8) {
+ // Back out the coefficient coding costs
+ rate2 -= (rate_y + rate_uv);
+ // for best_yrd calculation
+ rate_uv = 0;
+
+ if (mb_skip_allowed) {
+ int prob_skip_cost;
+
+ // Cost the skip mb case
+ vp9_prob skip_prob =
+ vp9_get_pred_prob(cm, xd, PRED_MBSKIP);
+
+ if (skip_prob) {
+ prob_skip_cost = vp9_cost_bit(skip_prob, 1);
+ rate2 += prob_skip_cost;
+ }
+ }
+ } else if (mb_skip_allowed) {
+ // Add in the cost of the no skip flag.
+ int prob_skip_cost = vp9_cost_bit(vp9_get_pred_prob(cm, xd,
+ PRED_MBSKIP), 0);
+ rate2 += prob_skip_cost;
+ }
+
+ // Calculate the final RD estimate for this mode.
+ this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
+ }
+
+#if 0
+ // Keep record of best intra distortion
+ if ((xd->mode_info_context->mbmi.ref_frame[0] == INTRA_FRAME) &&
+ (this_rd < best_intra_rd)) {
+ best_intra_rd = this_rd;
+ *returnintra = distortion2;
+ }
+#endif
+
+ if (!disable_skip && mbmi->ref_frame[0] == INTRA_FRAME)
+ for (i = 0; i < NB_PREDICTION_TYPES; ++i)
+ best_pred_rd[i] = MIN(best_pred_rd[i], this_rd);
+
+ if (this_rd < best_overall_rd) {
+ best_overall_rd = this_rd;
+ best_filter = tmp_best_filter;
+ best_mode = this_mode;
+ }
+
+ if (this_mode != I4X4_PRED && this_mode != SPLITMV) {
+ // Store the respective mode distortions for later use.
+ if (mode_distortions[this_mode] == -1
+ || distortion2 < mode_distortions[this_mode]) {
+ mode_distortions[this_mode] = distortion2;
+ }
+ if (frame_distortions[mbmi->ref_frame[0]] == -1
+ || distortion2 < frame_distortions[mbmi->ref_frame[0]]) {
+ frame_distortions[mbmi->ref_frame[0]] = distortion2;
+ }
+ }
+
+ // Did this mode help.. i.e. is it the new best mode
+ if (this_rd < best_rd || x->skip) {
+ if (!mode_excluded) {
+ // Note index of best mode so far
+ best_mode_index = mode_index;
+
+ if (ref_frame == INTRA_FRAME) {
+ /* required for left and above block mv */
+ mbmi->mv[0].as_int = 0;
+ }
+
+ *returnrate = rate2;
+ *returndistortion = distortion2;
+ best_rd = this_rd;
+ best_mbmode = *mbmi;
+ best_partition = *x->partition_info;
+
+ if (this_mode == I4X4_PRED || this_mode == SPLITMV)
+ for (i = 0; i < 4; i++)
+ best_bmodes[i] = xd->mode_info_context->bmi[i];
+ }
+#if 0
+ // Testing this mode gave rise to an improvement in best error score.
+ // Lower threshold a bit for next time
+ cpi->rd_thresh_mult[mode_index] =
+ (cpi->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2)) ?
+ cpi->rd_thresh_mult[mode_index] - 2 : MIN_THRESHMULT;
+ cpi->rd_threshes[mode_index] =
+ (cpi->rd_baseline_thresh[mode_index] >> 7)
+ * cpi->rd_thresh_mult[mode_index];
+#endif
+ } else {
+ // If the mode did not help improve the best error case then
+ // raise the threshold for testing that mode next time around.
+#if 0
+ cpi->rd_thresh_mult[mode_index] += 4;
+
+ if (cpi->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
+ cpi->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
+
+ cpi->rd_threshes[mode_index] =
+ (cpi->rd_baseline_thresh[mode_index] >> 7)
+ * cpi->rd_thresh_mult[mode_index];
+#endif
+ }
+
+ /* keep record of best compound/single-only prediction */
+ if (!disable_skip && mbmi->ref_frame[0] != INTRA_FRAME) {
+ int single_rd, hybrid_rd, single_rate, hybrid_rate;
+
+ if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
+ single_rate = rate2 - compmode_cost;
+ hybrid_rate = rate2;
+ } else {
+ single_rate = rate2;
+ hybrid_rate = rate2 + compmode_cost;
+ }
+
+ single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2);
+ hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2);
+
+ if (mbmi->ref_frame[1] <= INTRA_FRAME &&
+ single_rd < best_pred_rd[SINGLE_PREDICTION_ONLY]) {
+ best_pred_rd[SINGLE_PREDICTION_ONLY] = single_rd;
+ } else if (mbmi->ref_frame[1] > INTRA_FRAME &&
+ single_rd < best_pred_rd[COMP_PREDICTION_ONLY]) {
+ best_pred_rd[COMP_PREDICTION_ONLY] = single_rd;
+ }
+ if (hybrid_rd < best_pred_rd[HYBRID_PREDICTION])
+ best_pred_rd[HYBRID_PREDICTION] = hybrid_rd;
+ }
+
+ /* keep record of best txfm size */
+ if (bsize < BLOCK_SIZE_SB32X32) {
+ if (bsize < BLOCK_SIZE_MB16X16) {
+ if (this_mode == SPLITMV || this_mode == I4X4_PRED)
+ txfm_cache[ALLOW_8X8] = txfm_cache[ONLY_4X4];
+ txfm_cache[ALLOW_16X16] = txfm_cache[ALLOW_8X8];
+ }
+ txfm_cache[ALLOW_32X32] = txfm_cache[ALLOW_16X16];
+ }
+ if (!mode_excluded && this_rd != INT64_MAX) {
+ for (i = 0; i < NB_TXFM_MODES; i++) {
+ int64_t adj_rd = INT64_MAX;
+ if (this_mode != I4X4_PRED) {
+ adj_rd = this_rd + txfm_cache[i] - txfm_cache[cm->txfm_mode];
+ } else {
+ adj_rd = this_rd;
+ }
+
+ if (adj_rd < best_txfm_rd[i])
+ best_txfm_rd[i] = adj_rd;
+ }
+ }
+
+ if (x->skip && !mode_excluded)
+ break;
+ }
+ // Flag all modes that have a distortion thats > 2x the best we found at
+ // this level.
+ for (mode_index = 0; mode_index < MB_MODE_COUNT; ++mode_index) {
+ if (mode_index == NEARESTMV || mode_index == NEARMV || mode_index == NEWMV)
+ continue;
+
+ if (mode_distortions[mode_index] > 2 * *returndistortion) {
+ ctx->modes_with_high_error |= (1 << mode_index);
+ }
+ }
+
+ // Flag all ref frames that have a distortion thats > 2x the best we found at
+ // this level.
+ for (ref_frame = INTRA_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) {
+ if (frame_distortions[ref_frame] > 2 * *returndistortion) {
+ ctx->frames_with_high_error |= (1 << ref_frame);
+ }
+ }
+
+ if (best_rd == INT64_MAX && bsize < BLOCK_SIZE_SB8X8) {
+ *returnrate = INT_MAX;
+ *returndistortion = INT_MAX;
+ return best_rd;
+ }
+
+ assert((cm->mcomp_filter_type == SWITCHABLE) ||
+ (cm->mcomp_filter_type == best_mbmode.interp_filter) ||
+ (best_mbmode.ref_frame[0] == INTRA_FRAME));
+
+ // Accumulate filter usage stats
+ // TODO(agrange): Use RD criteria to select interpolation filter mode.
+ if (is_inter_mode(best_mode))
+ ++cpi->best_switchable_interp_count[vp9_switchable_interp_map[best_filter]];
+
+ // Updating rd_thresh_freq_fact[] here means that the differnt
+ // partition/block sizes are handled independently based on the best
+ // choice for the current partition. It may well be better to keep a scaled
+ // best rd so far value and update rd_thresh_freq_fact based on the mode/size
+ // combination that wins out.
+ if (cpi->sf.adpative_rd_thresh) {
+ for (mode_index = 0; mode_index < MAX_MODES; ++mode_index) {
+ if (mode_index == best_mode_index) {
+ cpi->rd_thresh_freq_fact[bsize][mode_index] = BASE_RD_THRESH_FREQ_FACT;
+ } else {
+ cpi->rd_thresh_freq_fact[bsize][mode_index] += MAX_RD_THRESH_FREQ_INC;
+ if (cpi->rd_thresh_freq_fact[bsize][mode_index] >
+ (cpi->sf.adpative_rd_thresh * MAX_RD_THRESH_FREQ_FACT)) {
+ cpi->rd_thresh_freq_fact[bsize][mode_index] =
+ cpi->sf.adpative_rd_thresh * MAX_RD_THRESH_FREQ_FACT;
+ }
+ }
+ }
+ }
+
+ // TODO(rbultje) integrate with RD trd_thresh_freq_facthresholding
+#if 0
+ // Reduce the activation RD thresholds for the best choice mode
+ if ((cpi->rd_baseline_thresh[best_mode_index] > 0) &&
+ (cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2))) {
+ int best_adjustment = (cpi->rd_thresh_mult[best_mode_index] >> 2);
+
+ cpi->rd_thresh_mult[best_mode_index] =
+ (cpi->rd_thresh_mult[best_mode_index] >= (MIN_THRESHMULT + best_adjustment)) ?
+ cpi->rd_thresh_mult[best_mode_index] - best_adjustment : MIN_THRESHMULT;
+ cpi->rd_threshes[best_mode_index] =
+ (cpi->rd_baseline_thresh[best_mode_index] >> 7) * cpi->rd_thresh_mult[best_mode_index];
+ }
+#endif
+
+ // This code forces Altref,0,0 and skip for the frame that overlays a
+ // an alrtef unless Altref is filtered. However, this is unsafe if
+ // segment level coding of ref frame is enabled for this segment.
+ if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME) &&
+ cpi->is_src_frame_alt_ref &&
+ (cpi->oxcf.arnr_max_frames == 0) &&
+ (best_mbmode.mode != ZEROMV || best_mbmode.ref_frame[0] != ALTREF_FRAME)
+ && bsize >= BLOCK_SIZE_SB8X8) {
+ mbmi->mode = ZEROMV;
+ mbmi->ref_frame[0] = ALTREF_FRAME;
+ mbmi->ref_frame[1] = NONE;
+ mbmi->mv[0].as_int = 0;
+ mbmi->uv_mode = DC_PRED;
+ mbmi->mb_skip_coeff = 1;
+ if (cm->txfm_mode == TX_MODE_SELECT) {
+ if (bsize >= BLOCK_SIZE_SB32X32)
+ mbmi->txfm_size = TX_32X32;
+ else if (bsize >= BLOCK_SIZE_MB16X16)
+ mbmi->txfm_size = TX_16X16;
+ else
+ mbmi->txfm_size = TX_8X8;
+ }
+
+ vpx_memset(best_txfm_diff, 0, sizeof(best_txfm_diff));
+ vpx_memset(best_pred_diff, 0, sizeof(best_pred_diff));
+ goto end;
+ }
+
+ // macroblock modes
+ *mbmi = best_mbmode;
+ if (best_mbmode.ref_frame[0] == INTRA_FRAME &&
+ best_mbmode.sb_type < BLOCK_SIZE_SB8X8) {
+ for (i = 0; i < 4; i++)
+ xd->mode_info_context->bmi[i].as_mode = best_bmodes[i].as_mode;
+ }
+
+ if (best_mbmode.ref_frame[0] != INTRA_FRAME &&
+ best_mbmode.sb_type < BLOCK_SIZE_SB8X8) {
+ for (i = 0; i < 4; i++)
+ xd->mode_info_context->bmi[i].as_mv[0].as_int =
+ best_bmodes[i].as_mv[0].as_int;
+
+ if (mbmi->ref_frame[1] > 0)
+ for (i = 0; i < 4; i++)
+ xd->mode_info_context->bmi[i].as_mv[1].as_int =
+ best_bmodes[i].as_mv[1].as_int;
+
+ *x->partition_info = best_partition;
+
+ mbmi->mv[0].as_int = x->partition_info->bmi[3].mv.as_int;
+ mbmi->mv[1].as_int = x->partition_info->bmi[3].second_mv.as_int;
+ }
+
+ for (i = 0; i < NB_PREDICTION_TYPES; ++i) {
+ if (best_pred_rd[i] == INT64_MAX)
+ best_pred_diff[i] = INT_MIN;
+ else
+ best_pred_diff[i] = best_rd - best_pred_rd[i];
+ }
+
+ if (!x->skip) {
+ for (i = 0; i < NB_TXFM_MODES; i++) {
+ if (best_txfm_rd[i] == INT64_MAX)
+ best_txfm_diff[i] = 0;
+ else
+ best_txfm_diff[i] = best_rd - best_txfm_rd[i];
+ }
+ } else {
+ vpx_memset(best_txfm_diff, 0, sizeof(best_txfm_diff));
+ }
+
+ end:
+ set_scale_factors(xd, mbmi->ref_frame[0], mbmi->ref_frame[1],
+ scale_factor);
+ store_coding_context(x, ctx, best_mode_index,
+ &best_partition,
+ &mbmi->ref_mvs[mbmi->ref_frame[0]][0],
+ &mbmi->ref_mvs[mbmi->ref_frame[1] < 0 ? 0 :
+ mbmi->ref_frame[1]][0],
+ best_pred_diff, best_txfm_diff);
+
+ return best_rd;
+}
diff --git a/libvpx/vp9/encoder/vp9_rdopt.h b/libvpx/vp9/encoder/vp9_rdopt.h
new file mode 100644
index 0000000..dcf5d00
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_rdopt.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_VP9_RDOPT_H_
+#define VP9_ENCODER_VP9_RDOPT_H_
+
+#define RDCOST(RM,DM,R,D) ( ((128+((int64_t)R)*(RM)) >> 8) + ((int64_t)DM)*(D) )
+#define RDCOST_8x8(RM,DM,R,D) ( ((128+((int64_t)R)*(RM)) >> 8) + ((int64_t)DM)*(D) )
+
+void vp9_initialize_rd_consts(VP9_COMP *cpi, int qindex);
+
+void vp9_initialize_me_consts(VP9_COMP *cpi, int qindex);
+
+void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
+ int *r, int *d, BLOCK_SIZE_TYPE bsize,
+ PICK_MODE_CONTEXT *ctx);
+
+int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
+ int mi_row, int mi_col,
+ int *r, int *d, BLOCK_SIZE_TYPE bsize,
+ PICK_MODE_CONTEXT *ctx);
+
+void vp9_init_me_luts();
+
+void vp9_set_mbmode_and_mvs(MACROBLOCK *x,
+ MB_PREDICTION_MODE mb, int_mv *mv);
+
+#endif // VP9_ENCODER_VP9_RDOPT_H_
diff --git a/libvpx/vp9/encoder/vp9_sad_c.c b/libvpx/vp9/encoder/vp9_sad_c.c
new file mode 100644
index 0000000..6b1ba49
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_sad_c.c
@@ -0,0 +1,686 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include <stdlib.h>
+#include "vp9/common/vp9_sadmxn.h"
+#include "./vpx_config.h"
+#include "vpx/vpx_integer.h"
+#include "./vp9_rtcd.h"
+
+unsigned int vp9_sad64x64_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int max_sad) {
+ return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 64, 64);
+}
+
+unsigned int vp9_sad64x32_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int max_sad) {
+ return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 64, 32);
+}
+
+void vp9_sad64x32x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad64x32(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad64x32(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad64x32(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad64x32(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
+
+unsigned int vp9_sad32x64_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int max_sad) {
+ return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 32, 64);
+}
+
+void vp9_sad32x64x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad32x64(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad32x64(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad32x64(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad32x64(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
+
+unsigned int vp9_sad32x32_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int max_sad) {
+ return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 32, 32);
+}
+
+unsigned int vp9_sad32x16_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int max_sad) {
+ return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 32, 16);
+}
+
+void vp9_sad32x16x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad32x16(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad32x16(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad32x16(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad32x16(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
+
+unsigned int vp9_sad16x32_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int max_sad) {
+ return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 16, 32);
+}
+
+void vp9_sad16x32x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad16x32(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad16x32(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad16x32(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad16x32(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
+
+unsigned int vp9_sad16x16_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int max_sad) {
+ return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 16, 16);
+}
+
+unsigned int vp9_sad8x8_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int max_sad) {
+ return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 8, 8);
+}
+
+
+unsigned int vp9_sad16x8_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int max_sad) {
+ return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 16, 8);
+}
+
+unsigned int vp9_sad8x16_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int max_sad) {
+ return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 8, 16);
+}
+
+unsigned int vp9_sad8x4_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int max_sad) {
+ return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 8, 4);
+}
+
+unsigned int vp9_sad4x8_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int max_sad) {
+ return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 4, 8);
+}
+
+unsigned int vp9_sad4x4_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int max_sad) {
+ return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 4, 4);
+}
+
+void vp9_sad64x64x3_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad64x64(src_ptr, src_stride, ref_ptr, ref_stride,
+ 0x7fffffff);
+ sad_array[1] = vp9_sad64x64(src_ptr, src_stride, ref_ptr + 1, ref_stride,
+ 0x7fffffff);
+ sad_array[2] = vp9_sad64x64(src_ptr, src_stride, ref_ptr + 2, ref_stride,
+ 0x7fffffff);
+}
+
+void vp9_sad32x32x3_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr, ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride, 0x7fffffff);
+}
+
+void vp9_sad64x64x8_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad64x64(src_ptr, src_stride,
+ ref_ptr, ref_stride,
+ 0x7fffffff);
+ sad_array[1] = vp9_sad64x64(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride,
+ 0x7fffffff);
+ sad_array[2] = vp9_sad64x64(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride,
+ 0x7fffffff);
+ sad_array[3] = vp9_sad64x64(src_ptr, src_stride,
+ ref_ptr + 3, ref_stride,
+ 0x7fffffff);
+ sad_array[4] = vp9_sad64x64(src_ptr, src_stride,
+ ref_ptr + 4, ref_stride,
+ 0x7fffffff);
+ sad_array[5] = vp9_sad64x64(src_ptr, src_stride,
+ ref_ptr + 5, ref_stride,
+ 0x7fffffff);
+ sad_array[6] = vp9_sad64x64(src_ptr, src_stride,
+ ref_ptr + 6, ref_stride,
+ 0x7fffffff);
+ sad_array[7] = vp9_sad64x64(src_ptr, src_stride,
+ ref_ptr + 7, ref_stride,
+ 0x7fffffff);
+}
+
+void vp9_sad32x32x8_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr, ref_stride,
+ 0x7fffffff);
+ sad_array[1] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride,
+ 0x7fffffff);
+ sad_array[2] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride,
+ 0x7fffffff);
+ sad_array[3] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr + 3, ref_stride,
+ 0x7fffffff);
+ sad_array[4] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr + 4, ref_stride,
+ 0x7fffffff);
+ sad_array[5] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr + 5, ref_stride,
+ 0x7fffffff);
+ sad_array[6] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr + 6, ref_stride,
+ 0x7fffffff);
+ sad_array[7] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr + 7, ref_stride,
+ 0x7fffffff);
+}
+
+void vp9_sad16x16x3_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr, ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride, 0x7fffffff);
+}
+
+void vp9_sad16x16x8_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ uint32_t *sad_array) {
+ sad_array[0] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr, ref_stride,
+ 0x7fffffff);
+ sad_array[1] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride,
+ 0x7fffffff);
+ sad_array[2] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride,
+ 0x7fffffff);
+ sad_array[3] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr + 3, ref_stride,
+ 0x7fffffff);
+ sad_array[4] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr + 4, ref_stride,
+ 0x7fffffff);
+ sad_array[5] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr + 5, ref_stride,
+ 0x7fffffff);
+ sad_array[6] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr + 6, ref_stride,
+ 0x7fffffff);
+ sad_array[7] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr + 7, ref_stride,
+ 0x7fffffff);
+}
+
+void vp9_sad16x8x3_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr, ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride, 0x7fffffff);
+}
+
+void vp9_sad16x8x8_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ uint32_t *sad_array) {
+ sad_array[0] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr, ref_stride,
+ 0x7fffffff);
+ sad_array[1] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride,
+ 0x7fffffff);
+ sad_array[2] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride,
+ 0x7fffffff);
+ sad_array[3] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr + 3, ref_stride,
+ 0x7fffffff);
+ sad_array[4] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr + 4, ref_stride,
+ 0x7fffffff);
+ sad_array[5] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr + 5, ref_stride,
+ 0x7fffffff);
+ sad_array[6] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr + 6, ref_stride,
+ 0x7fffffff);
+ sad_array[7] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr + 7, ref_stride,
+ 0x7fffffff);
+}
+
+void vp9_sad8x8x3_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr, ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride, 0x7fffffff);
+}
+
+void vp9_sad8x8x8_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ uint32_t *sad_array) {
+ sad_array[0] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr, ref_stride,
+ 0x7fffffff);
+ sad_array[1] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride,
+ 0x7fffffff);
+ sad_array[2] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride,
+ 0x7fffffff);
+ sad_array[3] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr + 3, ref_stride,
+ 0x7fffffff);
+ sad_array[4] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr + 4, ref_stride,
+ 0x7fffffff);
+ sad_array[5] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr + 5, ref_stride,
+ 0x7fffffff);
+ sad_array[6] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr + 6, ref_stride,
+ 0x7fffffff);
+ sad_array[7] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr + 7, ref_stride,
+ 0x7fffffff);
+}
+
+void vp9_sad8x16x3_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr, ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride, 0x7fffffff);
+}
+
+void vp9_sad8x16x8_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ uint32_t *sad_array) {
+ sad_array[0] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr, ref_stride,
+ 0x7fffffff);
+ sad_array[1] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride,
+ 0x7fffffff);
+ sad_array[2] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride,
+ 0x7fffffff);
+ sad_array[3] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr + 3, ref_stride,
+ 0x7fffffff);
+ sad_array[4] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr + 4, ref_stride,
+ 0x7fffffff);
+ sad_array[5] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr + 5, ref_stride,
+ 0x7fffffff);
+ sad_array[6] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr + 6, ref_stride,
+ 0x7fffffff);
+ sad_array[7] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr + 7, ref_stride,
+ 0x7fffffff);
+}
+
+void vp9_sad4x4x3_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr, ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride, 0x7fffffff);
+}
+
+void vp9_sad4x4x8_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ uint32_t *sad_array) {
+ sad_array[0] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr, ref_stride,
+ 0x7fffffff);
+ sad_array[1] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride,
+ 0x7fffffff);
+ sad_array[2] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride,
+ 0x7fffffff);
+ sad_array[3] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr + 3, ref_stride,
+ 0x7fffffff);
+ sad_array[4] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr + 4, ref_stride,
+ 0x7fffffff);
+ sad_array[5] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr + 5, ref_stride,
+ 0x7fffffff);
+ sad_array[6] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr + 6, ref_stride,
+ 0x7fffffff);
+ sad_array[7] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr + 7, ref_stride,
+ 0x7fffffff);
+}
+
+void vp9_sad64x64x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad64x64(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad64x64(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad64x64(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad64x64(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
+
+void vp9_sad32x32x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad32x32(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
+
+void vp9_sad16x16x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad16x16(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
+
+void vp9_sad16x8x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad16x8(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
+
+void vp9_sad8x8x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad8x8(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
+
+void vp9_sad8x16x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad8x16(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
+
+void vp9_sad8x4x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad8x4(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad8x4(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad8x4(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad8x4(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
+
+void vp9_sad8x4x8_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ uint32_t *sad_array) {
+ sad_array[0] = vp9_sad8x4(src_ptr, src_stride,
+ ref_ptr, ref_stride,
+ 0x7fffffff);
+ sad_array[1] = vp9_sad8x4(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride,
+ 0x7fffffff);
+ sad_array[2] = vp9_sad8x4(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride,
+ 0x7fffffff);
+ sad_array[3] = vp9_sad8x4(src_ptr, src_stride,
+ ref_ptr + 3, ref_stride,
+ 0x7fffffff);
+ sad_array[4] = vp9_sad8x4(src_ptr, src_stride,
+ ref_ptr + 4, ref_stride,
+ 0x7fffffff);
+ sad_array[5] = vp9_sad8x4(src_ptr, src_stride,
+ ref_ptr + 5, ref_stride,
+ 0x7fffffff);
+ sad_array[6] = vp9_sad8x4(src_ptr, src_stride,
+ ref_ptr + 6, ref_stride,
+ 0x7fffffff);
+ sad_array[7] = vp9_sad8x4(src_ptr, src_stride,
+ ref_ptr + 7, ref_stride,
+ 0x7fffffff);
+}
+
+void vp9_sad4x8x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad4x8(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad4x8(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad4x8(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad4x8(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
+
+void vp9_sad4x8x8_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ uint32_t *sad_array) {
+ sad_array[0] = vp9_sad4x8(src_ptr, src_stride,
+ ref_ptr, ref_stride,
+ 0x7fffffff);
+ sad_array[1] = vp9_sad4x8(src_ptr, src_stride,
+ ref_ptr + 1, ref_stride,
+ 0x7fffffff);
+ sad_array[2] = vp9_sad4x8(src_ptr, src_stride,
+ ref_ptr + 2, ref_stride,
+ 0x7fffffff);
+ sad_array[3] = vp9_sad4x8(src_ptr, src_stride,
+ ref_ptr + 3, ref_stride,
+ 0x7fffffff);
+ sad_array[4] = vp9_sad4x8(src_ptr, src_stride,
+ ref_ptr + 4, ref_stride,
+ 0x7fffffff);
+ sad_array[5] = vp9_sad4x8(src_ptr, src_stride,
+ ref_ptr + 5, ref_stride,
+ 0x7fffffff);
+ sad_array[6] = vp9_sad4x8(src_ptr, src_stride,
+ ref_ptr + 6, ref_stride,
+ 0x7fffffff);
+ sad_array[7] = vp9_sad4x8(src_ptr, src_stride,
+ ref_ptr + 7, ref_stride,
+ 0x7fffffff);
+}
+
+void vp9_sad4x4x4d_c(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride,
+ unsigned int *sad_array) {
+ sad_array[0] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr[0], ref_stride, 0x7fffffff);
+ sad_array[1] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr[1], ref_stride, 0x7fffffff);
+ sad_array[2] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr[2], ref_stride, 0x7fffffff);
+ sad_array[3] = vp9_sad4x4(src_ptr, src_stride,
+ ref_ptr[3], ref_stride, 0x7fffffff);
+}
diff --git a/libvpx/vp9/encoder/vp9_segmentation.c b/libvpx/vp9/encoder/vp9_segmentation.c
new file mode 100644
index 0000000..fe995ad
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_segmentation.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include <limits.h>
+#include "vpx_mem/vpx_mem.h"
+#include "vp9/encoder/vp9_segmentation.h"
+#include "vp9/common/vp9_pred_common.h"
+#include "vp9/common/vp9_tile_common.h"
+
+void vp9_enable_segmentation(VP9_PTR ptr) {
+ VP9_COMP *cpi = (VP9_COMP *)ptr;
+
+ cpi->mb.e_mbd.segmentation_enabled = 1;
+ cpi->mb.e_mbd.update_mb_segmentation_map = 1;
+ cpi->mb.e_mbd.update_mb_segmentation_data = 1;
+}
+
+void vp9_disable_segmentation(VP9_PTR ptr) {
+ VP9_COMP *cpi = (VP9_COMP *)ptr;
+ cpi->mb.e_mbd.segmentation_enabled = 0;
+}
+
+void vp9_set_segmentation_map(VP9_PTR ptr,
+ unsigned char *segmentation_map) {
+ VP9_COMP *cpi = (VP9_COMP *)(ptr);
+
+ // Copy in the new segmentation map
+ vpx_memcpy(cpi->segmentation_map, segmentation_map,
+ (cpi->common.mi_rows * cpi->common.mi_cols));
+
+ // Signal that the map should be updated.
+ cpi->mb.e_mbd.update_mb_segmentation_map = 1;
+ cpi->mb.e_mbd.update_mb_segmentation_data = 1;
+}
+
+void vp9_set_segment_data(VP9_PTR ptr,
+ signed char *feature_data,
+ unsigned char abs_delta) {
+ VP9_COMP *cpi = (VP9_COMP *)(ptr);
+
+ cpi->mb.e_mbd.mb_segment_abs_delta = abs_delta;
+
+ vpx_memcpy(cpi->mb.e_mbd.segment_feature_data, feature_data,
+ sizeof(cpi->mb.e_mbd.segment_feature_data));
+
+ // TBD ?? Set the feature mask
+ // vpx_memcpy(cpi->mb.e_mbd.segment_feature_mask, 0,
+ // sizeof(cpi->mb.e_mbd.segment_feature_mask));
+}
+
+// Based on set of segment counts calculate a probability tree
+static void calc_segtree_probs(MACROBLOCKD *xd, int *segcounts,
+ vp9_prob *segment_tree_probs) {
+ // Work out probabilities of each segment
+ const int c01 = segcounts[0] + segcounts[1];
+ const int c23 = segcounts[2] + segcounts[3];
+ const int c45 = segcounts[4] + segcounts[5];
+ const int c67 = segcounts[6] + segcounts[7];
+
+ segment_tree_probs[0] = get_binary_prob(c01 + c23, c45 + c67);
+ segment_tree_probs[1] = get_binary_prob(c01, c23);
+ segment_tree_probs[2] = get_binary_prob(c45, c67);
+ segment_tree_probs[3] = get_binary_prob(segcounts[0], segcounts[1]);
+ segment_tree_probs[4] = get_binary_prob(segcounts[2], segcounts[3]);
+ segment_tree_probs[5] = get_binary_prob(segcounts[4], segcounts[5]);
+ segment_tree_probs[6] = get_binary_prob(segcounts[6], segcounts[7]);
+}
+
+// Based on set of segment counts and probabilities calculate a cost estimate
+static int cost_segmap(MACROBLOCKD *xd, int *segcounts, vp9_prob *probs) {
+ const int c01 = segcounts[0] + segcounts[1];
+ const int c23 = segcounts[2] + segcounts[3];
+ const int c45 = segcounts[4] + segcounts[5];
+ const int c67 = segcounts[6] + segcounts[7];
+ const int c0123 = c01 + c23;
+ const int c4567 = c45 + c67;
+
+ // Cost the top node of the tree
+ int cost = c0123 * vp9_cost_zero(probs[0]) +
+ c4567 * vp9_cost_one(probs[0]);
+
+ // Cost subsequent levels
+ if (c0123 > 0) {
+ cost += c01 * vp9_cost_zero(probs[1]) +
+ c23 * vp9_cost_one(probs[1]);
+
+ if (c01 > 0)
+ cost += segcounts[0] * vp9_cost_zero(probs[3]) +
+ segcounts[1] * vp9_cost_one(probs[3]);
+ if (c23 > 0)
+ cost += segcounts[2] * vp9_cost_zero(probs[4]) +
+ segcounts[3] * vp9_cost_one(probs[4]);
+ }
+
+ if (c4567 > 0) {
+ cost += c45 * vp9_cost_zero(probs[2]) +
+ c67 * vp9_cost_one(probs[2]);
+
+ if (c45 > 0)
+ cost += segcounts[4] * vp9_cost_zero(probs[5]) +
+ segcounts[5] * vp9_cost_one(probs[5]);
+ if (c67 > 0)
+ cost += segcounts[6] * vp9_cost_zero(probs[6]) +
+ segcounts[7] * vp9_cost_one(probs[6]);
+ }
+
+ return cost;
+}
+
+static void count_segs(VP9_COMP *cpi,
+ MODE_INFO *mi,
+ int *no_pred_segcounts,
+ int (*temporal_predictor_count)[2],
+ int *t_unpred_seg_counts,
+ int bw, int bh, int mi_row, int mi_col) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+ int segment_id;
+
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
+ return;
+
+ segment_id = mi->mbmi.segment_id;
+ xd->mode_info_context = mi;
+ set_mi_row_col(cm, xd, mi_row, bh, mi_col, bw);
+
+ // Count the number of hits on each segment with no prediction
+ no_pred_segcounts[segment_id]++;
+
+ // Temporal prediction not allowed on key frames
+ if (cm->frame_type != KEY_FRAME) {
+ // Test to see if the segment id matches the predicted value.
+ const int pred_seg_id = vp9_get_pred_mi_segid(cm, mi->mbmi.sb_type,
+ mi_row, mi_col);
+ const int seg_predicted = (segment_id == pred_seg_id);
+
+ // Get the segment id prediction context
+ const int pred_context = vp9_get_pred_context(cm, xd, PRED_SEG_ID);
+
+ // Store the prediction status for this mb and update counts
+ // as appropriate
+ vp9_set_pred_flag(xd, PRED_SEG_ID, seg_predicted);
+ temporal_predictor_count[pred_context][seg_predicted]++;
+
+ if (!seg_predicted)
+ // Update the "unpredicted" segment count
+ t_unpred_seg_counts[segment_id]++;
+ }
+}
+
+static void count_segs_sb(VP9_COMP *cpi, MODE_INFO *mi,
+ int *no_pred_segcounts,
+ int (*temporal_predictor_count)[2],
+ int *t_unpred_seg_counts,
+ int mi_row, int mi_col,
+ BLOCK_SIZE_TYPE bsize) {
+ VP9_COMMON *const cm = &cpi->common;
+ const int mis = cm->mode_info_stride;
+ int bwl, bhl;
+ const int bsl = mi_width_log2(bsize), bs = 1 << (bsl - 1);
+
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
+ return;
+
+ bwl = mi_width_log2(mi->mbmi.sb_type);
+ bhl = mi_height_log2(mi->mbmi.sb_type);
+
+ if (bwl == bsl && bhl == bsl) {
+ count_segs(cpi, mi, no_pred_segcounts, temporal_predictor_count,
+ t_unpred_seg_counts, 1 << bsl, 1 << bsl, mi_row, mi_col);
+ } else if (bwl == bsl && bhl < bsl) {
+ count_segs(cpi, mi, no_pred_segcounts, temporal_predictor_count,
+ t_unpred_seg_counts, 1 << bsl, bs, mi_row, mi_col);
+ count_segs(cpi, mi + bs * mis, no_pred_segcounts, temporal_predictor_count,
+ t_unpred_seg_counts, 1 << bsl, bs, mi_row + bs, mi_col);
+ } else if (bwl < bsl && bhl == bsl) {
+ count_segs(cpi, mi, no_pred_segcounts, temporal_predictor_count,
+ t_unpred_seg_counts, bs, 1 << bsl, mi_row, mi_col);
+ count_segs(cpi, mi + bs, no_pred_segcounts, temporal_predictor_count,
+ t_unpred_seg_counts, bs, 1 << bsl, mi_row, mi_col + bs);
+ } else {
+ BLOCK_SIZE_TYPE subsize;
+ int n;
+
+ assert(bwl < bsl && bhl < bsl);
+ if (bsize == BLOCK_SIZE_SB64X64) {
+ subsize = BLOCK_SIZE_SB32X32;
+ } else if (bsize == BLOCK_SIZE_SB32X32) {
+ subsize = BLOCK_SIZE_MB16X16;
+ } else {
+ assert(bsize == BLOCK_SIZE_MB16X16);
+ subsize = BLOCK_SIZE_SB8X8;
+ }
+
+ for (n = 0; n < 4; n++) {
+ const int y_idx = n >> 1, x_idx = n & 0x01;
+
+ count_segs_sb(cpi, mi + y_idx * bs * mis + x_idx * bs,
+ no_pred_segcounts, temporal_predictor_count,
+ t_unpred_seg_counts,
+ mi_row + y_idx * bs, mi_col + x_idx * bs, subsize);
+ }
+ }
+}
+
+void vp9_choose_segmap_coding_method(VP9_COMP *cpi) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+
+ int no_pred_cost;
+ int t_pred_cost = INT_MAX;
+
+ int i;
+ int tile_col, mi_row, mi_col;
+
+ int temporal_predictor_count[PREDICTION_PROBS][2];
+ int no_pred_segcounts[MAX_MB_SEGMENTS];
+ int t_unpred_seg_counts[MAX_MB_SEGMENTS];
+
+ vp9_prob no_pred_tree[MB_SEG_TREE_PROBS];
+ vp9_prob t_pred_tree[MB_SEG_TREE_PROBS];
+ vp9_prob t_nopred_prob[PREDICTION_PROBS];
+
+ const int mis = cm->mode_info_stride;
+ MODE_INFO *mi_ptr, *mi;
+
+ // Set default state for the segment tree probabilities and the
+ // temporal coding probabilities
+ vpx_memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs));
+ vpx_memset(cm->segment_pred_probs, 255, sizeof(cm->segment_pred_probs));
+
+ vpx_memset(no_pred_segcounts, 0, sizeof(no_pred_segcounts));
+ vpx_memset(t_unpred_seg_counts, 0, sizeof(t_unpred_seg_counts));
+ vpx_memset(temporal_predictor_count, 0, sizeof(temporal_predictor_count));
+
+ // First of all generate stats regarding how well the last segment map
+ // predicts this one
+ for (tile_col = 0; tile_col < cm->tile_columns; tile_col++) {
+ vp9_get_tile_col_offsets(cm, tile_col);
+ mi_ptr = cm->mi + cm->cur_tile_mi_col_start;
+ for (mi_row = 0; mi_row < cm->mi_rows;
+ mi_row += 8, mi_ptr += 8 * mis) {
+ mi = mi_ptr;
+ for (mi_col = cm->cur_tile_mi_col_start;
+ mi_col < cm->cur_tile_mi_col_end;
+ mi_col += 8, mi += 8) {
+ count_segs_sb(cpi, mi, no_pred_segcounts, temporal_predictor_count,
+ t_unpred_seg_counts, mi_row, mi_col, BLOCK_SIZE_SB64X64);
+ }
+ }
+ }
+
+ // Work out probability tree for coding segments without prediction
+ // and the cost.
+ calc_segtree_probs(xd, no_pred_segcounts, no_pred_tree);
+ no_pred_cost = cost_segmap(xd, no_pred_segcounts, no_pred_tree);
+
+ // Key frames cannot use temporal prediction
+ if (cm->frame_type != KEY_FRAME) {
+ // Work out probability tree for coding those segments not
+ // predicted using the temporal method and the cost.
+ calc_segtree_probs(xd, t_unpred_seg_counts, t_pred_tree);
+ t_pred_cost = cost_segmap(xd, t_unpred_seg_counts, t_pred_tree);
+
+ // Add in the cost of the signalling for each prediction context
+ for (i = 0; i < PREDICTION_PROBS; i++) {
+ const int count0 = temporal_predictor_count[i][0];
+ const int count1 = temporal_predictor_count[i][1];
+
+ t_nopred_prob[i] = get_binary_prob(count0, count1);
+
+ // Add in the predictor signaling cost
+ t_pred_cost += count0 * vp9_cost_zero(t_nopred_prob[i]) +
+ count1 * vp9_cost_one(t_nopred_prob[i]);
+ }
+ }
+
+ // Now choose which coding method to use.
+ if (t_pred_cost < no_pred_cost) {
+ cm->temporal_update = 1;
+ vpx_memcpy(xd->mb_segment_tree_probs, t_pred_tree, sizeof(t_pred_tree));
+ vpx_memcpy(cm->segment_pred_probs, t_nopred_prob, sizeof(t_nopred_prob));
+ } else {
+ cm->temporal_update = 0;
+ vpx_memcpy(xd->mb_segment_tree_probs, no_pred_tree, sizeof(no_pred_tree));
+ }
+}
diff --git a/libvpx/vp9/encoder/vp9_segmentation.h b/libvpx/vp9/encoder/vp9_segmentation.h
new file mode 100644
index 0000000..2183771
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_segmentation.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_VP9_SEGMENTATION_H_
+#define VP9_ENCODER_VP9_SEGMENTATION_H_
+
+#include "vp9/common/vp9_blockd.h"
+#include "vp9/encoder/vp9_onyx_int.h"
+
+void vp9_enable_segmentation(VP9_PTR ptr);
+void vp9_disable_segmentation(VP9_PTR ptr);
+
+// Valid values for a segment are 0 to 3
+// Segmentation map is arrange as [Rows][Columns]
+void vp9_set_segmentation_map(VP9_PTR ptr, unsigned char *segmentation_map);
+
+// The values given for each segment can be either deltas (from the default
+// value chosen for the frame) or absolute values.
+//
+// Valid range for abs values is (0-127 for MB_LVL_ALT_Q), (0-63 for
+// SEGMENT_ALT_LF)
+// Valid range for delta values are (+/-127 for MB_LVL_ALT_Q), (+/-63 for
+// SEGMENT_ALT_LF)
+//
+// abs_delta = SEGMENT_DELTADATA (deltas) abs_delta = SEGMENT_ABSDATA (use
+// the absolute values given).
+void vp9_set_segment_data(VP9_PTR ptr, signed char *feature_data,
+ unsigned char abs_delta);
+
+void vp9_choose_segmap_coding_method(VP9_COMP *cpi);
+
+#endif // VP9_ENCODER_VP9_SEGMENTATION_H_
diff --git a/libvpx/vp9/encoder/vp9_ssim.c b/libvpx/vp9/encoder/vp9_ssim.c
new file mode 100644
index 0000000..363ed84
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_ssim.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vp9/encoder/vp9_onyx_int.h"
+
+void vp9_ssim_parms_16x16_c(uint8_t *s, int sp, uint8_t *r,
+ int rp, unsigned long *sum_s, unsigned long *sum_r,
+ unsigned long *sum_sq_s, unsigned long *sum_sq_r,
+ unsigned long *sum_sxr) {
+ int i, j;
+ for (i = 0; i < 16; i++, s += sp, r += rp) {
+ for (j = 0; j < 16; j++) {
+ *sum_s += s[j];
+ *sum_r += r[j];
+ *sum_sq_s += s[j] * s[j];
+ *sum_sq_r += r[j] * r[j];
+ *sum_sxr += s[j] * r[j];
+ }
+ }
+}
+void vp9_ssim_parms_8x8_c(uint8_t *s, int sp, uint8_t *r, int rp,
+ unsigned long *sum_s, unsigned long *sum_r,
+ unsigned long *sum_sq_s, unsigned long *sum_sq_r,
+ unsigned long *sum_sxr) {
+ int i, j;
+ for (i = 0; i < 8; i++, s += sp, r += rp) {
+ for (j = 0; j < 8; j++) {
+ *sum_s += s[j];
+ *sum_r += r[j];
+ *sum_sq_s += s[j] * s[j];
+ *sum_sq_r += r[j] * r[j];
+ *sum_sxr += s[j] * r[j];
+ }
+ }
+}
+
+const static int64_t cc1 = 26634; // (64^2*(.01*255)^2
+const static int64_t cc2 = 239708; // (64^2*(.03*255)^2
+
+static double similarity(unsigned long sum_s, unsigned long sum_r,
+ unsigned long sum_sq_s, unsigned long sum_sq_r,
+ unsigned long sum_sxr, int count) {
+ int64_t ssim_n, ssim_d;
+ int64_t c1, c2;
+
+ // scale the constants by number of pixels
+ c1 = (cc1 * count * count) >> 12;
+ c2 = (cc2 * count * count) >> 12;
+
+ ssim_n = (2 * sum_s * sum_r + c1) * ((int64_t) 2 * count * sum_sxr -
+ (int64_t) 2 * sum_s * sum_r + c2);
+
+ ssim_d = (sum_s * sum_s + sum_r * sum_r + c1) *
+ ((int64_t)count * sum_sq_s - (int64_t)sum_s * sum_s +
+ (int64_t)count * sum_sq_r - (int64_t) sum_r * sum_r + c2);
+
+ return ssim_n * 1.0 / ssim_d;
+}
+
+static double ssim_16x16(uint8_t *s, int sp, uint8_t *r, int rp) {
+ unsigned long sum_s = 0, sum_r = 0, sum_sq_s = 0, sum_sq_r = 0, sum_sxr = 0;
+ vp9_ssim_parms_16x16(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r,
+ &sum_sxr);
+ return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 256);
+}
+static double ssim_8x8(uint8_t *s, int sp, uint8_t *r, int rp) {
+ unsigned long sum_s = 0, sum_r = 0, sum_sq_s = 0, sum_sq_r = 0, sum_sxr = 0;
+ vp9_ssim_parms_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r,
+ &sum_sxr);
+ return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 64);
+}
+
+// We are using a 8x8 moving window with starting location of each 8x8 window
+// on the 4x4 pixel grid. Such arrangement allows the windows to overlap
+// block boundaries to penalize blocking artifacts.
+double vp9_ssim2(uint8_t *img1, uint8_t *img2, int stride_img1,
+ int stride_img2, int width, int height) {
+ int i, j;
+ int samples = 0;
+ double ssim_total = 0;
+
+ // sample point start with each 4x4 location
+ for (i = 0; i < height - 8; i += 4, img1 += stride_img1 * 4, img2 += stride_img2 * 4) {
+ for (j = 0; j < width - 8; j += 4) {
+ double v = ssim_8x8(img1 + j, stride_img1, img2 + j, stride_img2);
+ ssim_total += v;
+ samples++;
+ }
+ }
+ ssim_total /= samples;
+ return ssim_total;
+}
+double vp9_calc_ssim(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest,
+ int lumamask, double *weight) {
+ double a, b, c;
+ double ssimv;
+
+ a = vp9_ssim2(source->y_buffer, dest->y_buffer,
+ source->y_stride, dest->y_stride, source->y_width,
+ source->y_height);
+
+ b = vp9_ssim2(source->u_buffer, dest->u_buffer,
+ source->uv_stride, dest->uv_stride, source->uv_width,
+ source->uv_height);
+
+ c = vp9_ssim2(source->v_buffer, dest->v_buffer,
+ source->uv_stride, dest->uv_stride, source->uv_width,
+ source->uv_height);
+
+ ssimv = a * .8 + .1 * (b + c);
+
+ *weight = 1;
+
+ return ssimv;
+}
+
+double vp9_calc_ssimg(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest,
+ double *ssim_y, double *ssim_u, double *ssim_v) {
+ double ssim_all = 0;
+ double a, b, c;
+
+ a = vp9_ssim2(source->y_buffer, dest->y_buffer,
+ source->y_stride, dest->y_stride, source->y_width,
+ source->y_height);
+
+ b = vp9_ssim2(source->u_buffer, dest->u_buffer,
+ source->uv_stride, dest->uv_stride, source->uv_width,
+ source->uv_height);
+
+ c = vp9_ssim2(source->v_buffer, dest->v_buffer,
+ source->uv_stride, dest->uv_stride, source->uv_width,
+ source->uv_height);
+ *ssim_y = a;
+ *ssim_u = b;
+ *ssim_v = c;
+ ssim_all = (a * 4 + b + c) / 6;
+
+ return ssim_all;
+}
diff --git a/libvpx/vp9/encoder/vp9_temporal_filter.c b/libvpx/vp9/encoder/vp9_temporal_filter.c
new file mode 100644
index 0000000..47792fc
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_temporal_filter.c
@@ -0,0 +1,529 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <limits.h>
+
+#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/common/vp9_reconinter.h"
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vp9/common/vp9_systemdependent.h"
+#include "vp9/encoder/vp9_quantize.h"
+#include "vp9/common/vp9_alloccommon.h"
+#include "vp9/encoder/vp9_mcomp.h"
+#include "vp9/encoder/vp9_firstpass.h"
+#include "vp9/encoder/vp9_psnr.h"
+#include "vpx_scale/vpx_scale.h"
+#include "vp9/common/vp9_extend.h"
+#include "vp9/encoder/vp9_ratectrl.h"
+#include "vp9/common/vp9_quant_common.h"
+#include "vp9/encoder/vp9_segmentation.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/vpx_timer.h"
+
+#define ALT_REF_MC_ENABLED 1 // dis/enable MC in AltRef filtering
+#define ALT_REF_SUBPEL_ENABLED 1 // dis/enable subpel in MC AltRef filtering
+
+static void temporal_filter_predictors_mb_c(MACROBLOCKD *xd,
+ uint8_t *y_mb_ptr,
+ uint8_t *u_mb_ptr,
+ uint8_t *v_mb_ptr,
+ int stride,
+ int mv_row,
+ int mv_col,
+ uint8_t *pred) {
+ const int which_mv = 0;
+ int_mv mv;
+
+ mv.as_mv.row = mv_row;
+ mv.as_mv.col = mv_col;
+
+ vp9_build_inter_predictor(y_mb_ptr, stride,
+ &pred[0], 16,
+ &mv,
+ &xd->scale_factor[which_mv],
+ 16, 16,
+ which_mv,
+ &xd->subpix);
+
+ stride = (stride + 1) >> 1;
+
+ vp9_build_inter_predictor_q4(u_mb_ptr, stride,
+ &pred[256], 8,
+ &mv,
+ &xd->scale_factor_uv[which_mv],
+ 8, 8,
+ which_mv,
+ &xd->subpix);
+
+ vp9_build_inter_predictor_q4(v_mb_ptr, stride,
+ &pred[320], 8,
+ &mv,
+ &xd->scale_factor_uv[which_mv],
+ 8, 8,
+ which_mv,
+ &xd->subpix);
+}
+
+void vp9_temporal_filter_apply_c(uint8_t *frame1,
+ unsigned int stride,
+ uint8_t *frame2,
+ unsigned int block_size,
+ int strength,
+ int filter_weight,
+ unsigned int *accumulator,
+ uint16_t *count) {
+ unsigned int i, j, k;
+ int modifier;
+ int byte = 0;
+
+ for (i = 0, k = 0; i < block_size; i++) {
+ for (j = 0; j < block_size; j++, k++) {
+
+ int src_byte = frame1[byte];
+ int pixel_value = *frame2++;
+
+ modifier = src_byte - pixel_value;
+ // This is an integer approximation of:
+ // float coeff = (3.0 * modifer * modifier) / pow(2, strength);
+ // modifier = (int)roundf(coeff > 16 ? 0 : 16-coeff);
+ modifier *= modifier;
+ modifier *= 3;
+ modifier += 1 << (strength - 1);
+ modifier >>= strength;
+
+ if (modifier > 16)
+ modifier = 16;
+
+ modifier = 16 - modifier;
+ modifier *= filter_weight;
+
+ count[k] += modifier;
+ accumulator[k] += modifier * pixel_value;
+
+ byte++;
+ }
+
+ byte += stride - block_size;
+ }
+}
+
+#if ALT_REF_MC_ENABLED
+
+static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi,
+ uint8_t *arf_frame_buf,
+ uint8_t *frame_ptr_buf,
+ int stride,
+ int error_thresh) {
+ MACROBLOCK *x = &cpi->mb;
+ MACROBLOCKD* const xd = &x->e_mbd;
+ int step_param;
+ int sadpb = x->sadperbit16;
+ int bestsme = INT_MAX;
+
+ int_mv best_ref_mv1;
+ int_mv best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */
+ int_mv *ref_mv;
+
+ // Save input state
+ struct buf_2d src = x->plane[0].src;
+ struct buf_2d pre = xd->plane[0].pre[0];
+
+ best_ref_mv1.as_int = 0;
+ best_ref_mv1_full.as_mv.col = best_ref_mv1.as_mv.col >> 3;
+ best_ref_mv1_full.as_mv.row = best_ref_mv1.as_mv.row >> 3;
+
+ // Setup frame pointers
+ x->plane[0].src.buf = arf_frame_buf;
+ x->plane[0].src.stride = stride;
+ xd->plane[0].pre[0].buf = frame_ptr_buf;
+ xd->plane[0].pre[0].stride = stride;
+
+ // Further step/diamond searches as necessary
+ if (cpi->speed < 8)
+ step_param = cpi->sf.first_step + ((cpi->speed > 5) ? 1 : 0);
+ else
+ step_param = cpi->sf.first_step + 2;
+
+ /*cpi->sf.search_method == HEX*/
+ // TODO Check that the 16x16 vf & sdf are selected here
+ // Ignore mv costing by sending NULL pointer instead of cost arrays
+ ref_mv = &x->e_mbd.mode_info_context->bmi[0].as_mv[0];
+ bestsme = vp9_hex_search(x, &best_ref_mv1_full, ref_mv,
+ step_param, sadpb, &cpi->fn_ptr[BLOCK_16X16],
+ NULL, NULL, NULL, NULL,
+ &best_ref_mv1);
+
+#if ALT_REF_SUBPEL_ENABLED
+ // Try sub-pixel MC?
+ // if (bestsme > error_thresh && bestsme < INT_MAX)
+ {
+ int distortion;
+ unsigned int sse;
+ // Ignore mv costing by sending NULL pointer instead of cost array
+ bestsme = cpi->find_fractional_mv_step(x, ref_mv,
+ &best_ref_mv1,
+ x->errorperbit,
+ &cpi->fn_ptr[BLOCK_16X16],
+ NULL, NULL,
+ &distortion, &sse);
+ }
+#endif
+
+ // Restore input state
+ x->plane[0].src = src;
+ xd->plane[0].pre[0] = pre;
+
+ return bestsme;
+}
+#endif
+
+static void temporal_filter_iterate_c(VP9_COMP *cpi,
+ int frame_count,
+ int alt_ref_index,
+ int strength) {
+ int byte;
+ int frame;
+ int mb_col, mb_row;
+ unsigned int filter_weight;
+ int mb_cols = cpi->common.mb_cols;
+ int mb_rows = cpi->common.mb_rows;
+ int mb_y_offset = 0;
+ int mb_uv_offset = 0;
+ DECLARE_ALIGNED_ARRAY(16, unsigned int, accumulator, 16 * 16 + 8 * 8 + 8 * 8);
+ DECLARE_ALIGNED_ARRAY(16, uint16_t, count, 16 * 16 + 8 * 8 + 8 * 8);
+ MACROBLOCKD *mbd = &cpi->mb.e_mbd;
+ YV12_BUFFER_CONFIG *f = cpi->frames[alt_ref_index];
+ uint8_t *dst1, *dst2;
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, predictor, 16 * 16 + 8 * 8 + 8 * 8);
+
+ // Save input state
+ uint8_t* input_buffer[MAX_MB_PLANE];
+ int i;
+
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ input_buffer[i] = mbd->plane[i].pre[0].buf;
+
+ for (mb_row = 0; mb_row < mb_rows; mb_row++) {
+#if ALT_REF_MC_ENABLED
+ // Source frames are extended to 16 pixels. This is different than
+ // L/A/G reference frames that have a border of 32 (VP9BORDERINPIXELS)
+ // A 6/8 tap filter is used for motion search. This requires 2 pixels
+ // before and 3 pixels after. So the largest Y mv on a border would
+ // then be 16 - VP9_INTERP_EXTEND. The UV blocks are half the size of the
+ // Y and therefore only extended by 8. The largest mv that a UV block
+ // can support is 8 - VP9_INTERP_EXTEND. A UV mv is half of a Y mv.
+ // (16 - VP9_INTERP_EXTEND) >> 1 which is greater than
+ // 8 - VP9_INTERP_EXTEND.
+ // To keep the mv in play for both Y and UV planes the max that it
+ // can be on a border is therefore 16 - (2*VP9_INTERP_EXTEND+1).
+ cpi->mb.mv_row_min = -((mb_row * 16) + (17 - 2 * VP9_INTERP_EXTEND));
+ cpi->mb.mv_row_max = ((cpi->common.mb_rows - 1 - mb_row) * 16)
+ + (17 - 2 * VP9_INTERP_EXTEND);
+#endif
+
+ for (mb_col = 0; mb_col < mb_cols; mb_col++) {
+ int i, j, k;
+ int stride;
+
+ vpx_memset(accumulator, 0, 384 * sizeof(unsigned int));
+ vpx_memset(count, 0, 384 * sizeof(uint16_t));
+
+#if ALT_REF_MC_ENABLED
+ cpi->mb.mv_col_min = -((mb_col * 16) + (17 - 2 * VP9_INTERP_EXTEND));
+ cpi->mb.mv_col_max = ((cpi->common.mb_cols - 1 - mb_col) * 16)
+ + (17 - 2 * VP9_INTERP_EXTEND);
+#endif
+
+ for (frame = 0; frame < frame_count; frame++) {
+ if (cpi->frames[frame] == NULL)
+ continue;
+
+ mbd->mode_info_context->bmi[0].as_mv[0].as_mv.row = 0;
+ mbd->mode_info_context->bmi[0].as_mv[0].as_mv.col = 0;
+
+ if (frame == alt_ref_index) {
+ filter_weight = 2;
+ } else {
+ int err = 0;
+#if ALT_REF_MC_ENABLED
+#define THRESH_LOW 10000
+#define THRESH_HIGH 20000
+
+ // Find best match in this frame by MC
+ err = temporal_filter_find_matching_mb_c
+ (cpi,
+ cpi->frames[alt_ref_index]->y_buffer + mb_y_offset,
+ cpi->frames[frame]->y_buffer + mb_y_offset,
+ cpi->frames[frame]->y_stride,
+ THRESH_LOW);
+#endif
+ // Assign higher weight to matching MB if it's error
+ // score is lower. If not applying MC default behavior
+ // is to weight all MBs equal.
+ filter_weight = err < THRESH_LOW
+ ? 2 : err < THRESH_HIGH ? 1 : 0;
+ }
+
+ if (filter_weight != 0) {
+ // Construct the predictors
+ temporal_filter_predictors_mb_c
+ (mbd,
+ cpi->frames[frame]->y_buffer + mb_y_offset,
+ cpi->frames[frame]->u_buffer + mb_uv_offset,
+ cpi->frames[frame]->v_buffer + mb_uv_offset,
+ cpi->frames[frame]->y_stride,
+ mbd->mode_info_context->bmi[0].as_mv[0].as_mv.row,
+ mbd->mode_info_context->bmi[0].as_mv[0].as_mv.col,
+ predictor);
+
+ // Apply the filter (YUV)
+ vp9_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride,
+ predictor, 16, strength, filter_weight,
+ accumulator, count);
+
+ vp9_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride,
+ predictor + 256, 8, strength, filter_weight,
+ accumulator + 256, count + 256);
+
+ vp9_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride,
+ predictor + 320, 8, strength, filter_weight,
+ accumulator + 320, count + 320);
+ }
+ }
+
+ // Normalize filter output to produce AltRef frame
+ dst1 = cpi->alt_ref_buffer.y_buffer;
+ stride = cpi->alt_ref_buffer.y_stride;
+ byte = mb_y_offset;
+ for (i = 0, k = 0; i < 16; i++) {
+ for (j = 0; j < 16; j++, k++) {
+ unsigned int pval = accumulator[k] + (count[k] >> 1);
+ pval *= cpi->fixed_divide[count[k]];
+ pval >>= 19;
+
+ dst1[byte] = (uint8_t)pval;
+
+ // move to next pixel
+ byte++;
+ }
+
+ byte += stride - 16;
+ }
+
+ dst1 = cpi->alt_ref_buffer.u_buffer;
+ dst2 = cpi->alt_ref_buffer.v_buffer;
+ stride = cpi->alt_ref_buffer.uv_stride;
+ byte = mb_uv_offset;
+ for (i = 0, k = 256; i < 8; i++) {
+ for (j = 0; j < 8; j++, k++) {
+ int m = k + 64;
+
+ // U
+ unsigned int pval = accumulator[k] + (count[k] >> 1);
+ pval *= cpi->fixed_divide[count[k]];
+ pval >>= 19;
+ dst1[byte] = (uint8_t)pval;
+
+ // V
+ pval = accumulator[m] + (count[m] >> 1);
+ pval *= cpi->fixed_divide[count[m]];
+ pval >>= 19;
+ dst2[byte] = (uint8_t)pval;
+
+ // move to next pixel
+ byte++;
+ }
+
+ byte += stride - 8;
+ }
+
+ mb_y_offset += 16;
+ mb_uv_offset += 8;
+ }
+
+ mb_y_offset += 16 * (f->y_stride - mb_cols);
+ mb_uv_offset += 8 * (f->uv_stride - mb_cols);
+ }
+
+ // Restore input state
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ mbd->plane[i].pre[0].buf = input_buffer[i];
+}
+
+void vp9_temporal_filter_prepare(VP9_COMP *cpi, int distance) {
+ VP9_COMMON *const cm = &cpi->common;
+
+ int frame = 0;
+
+ int frames_to_blur_backward = 0;
+ int frames_to_blur_forward = 0;
+ int frames_to_blur = 0;
+ int start_frame = 0;
+
+ int strength = cpi->active_arnr_strength;
+ int blur_type = cpi->oxcf.arnr_type;
+ int max_frames = cpi->active_arnr_frames;
+
+ const int num_frames_backward = distance;
+ const int num_frames_forward = vp9_lookahead_depth(cpi->lookahead)
+ - (num_frames_backward + 1);
+
+ switch (blur_type) {
+ case 1:
+ // Backward Blur
+ frames_to_blur_backward = num_frames_backward;
+
+ if (frames_to_blur_backward >= max_frames)
+ frames_to_blur_backward = max_frames - 1;
+
+ frames_to_blur = frames_to_blur_backward + 1;
+ break;
+
+ case 2:
+ // Forward Blur
+
+ frames_to_blur_forward = num_frames_forward;
+
+ if (frames_to_blur_forward >= max_frames)
+ frames_to_blur_forward = max_frames - 1;
+
+ frames_to_blur = frames_to_blur_forward + 1;
+ break;
+
+ case 3:
+ default:
+ // Center Blur
+ frames_to_blur_forward = num_frames_forward;
+ frames_to_blur_backward = num_frames_backward;
+
+ if (frames_to_blur_forward > frames_to_blur_backward)
+ frames_to_blur_forward = frames_to_blur_backward;
+
+ if (frames_to_blur_backward > frames_to_blur_forward)
+ frames_to_blur_backward = frames_to_blur_forward;
+
+ // When max_frames is even we have 1 more frame backward than forward
+ if (frames_to_blur_forward > (max_frames - 1) / 2)
+ frames_to_blur_forward = ((max_frames - 1) / 2);
+
+ if (frames_to_blur_backward > (max_frames / 2))
+ frames_to_blur_backward = (max_frames / 2);
+
+ frames_to_blur = frames_to_blur_backward + frames_to_blur_forward + 1;
+ break;
+ }
+
+ start_frame = distance + frames_to_blur_forward;
+
+#ifdef DEBUGFWG
+ // DEBUG FWG
+ printf("max:%d FBCK:%d FFWD:%d ftb:%d ftbbck:%d ftbfwd:%d sei:%d lasei:%d start:%d"
+, max_frames
+, num_frames_backward
+, num_frames_forward
+, frames_to_blur
+, frames_to_blur_backward
+, frames_to_blur_forward
+, cpi->source_encode_index
+, cpi->last_alt_ref_sei
+, start_frame);
+#endif
+
+ // Setup scaling factors. Scaling on each of the arnr frames is not supported
+ vp9_setup_scale_factors_for_frame(&cpi->mb.e_mbd.scale_factor[0],
+ cm->yv12_fb[cm->new_fb_idx].y_crop_width,
+ cm->yv12_fb[cm->new_fb_idx].y_crop_height,
+ cm->width, cm->height);
+ cpi->mb.e_mbd.scale_factor_uv[0] = cpi->mb.e_mbd.scale_factor[0];
+
+ // Setup frame pointers, NULL indicates frame not included in filter
+ vpx_memset(cpi->frames, 0, max_frames * sizeof(YV12_BUFFER_CONFIG *));
+ for (frame = 0; frame < frames_to_blur; frame++) {
+ int which_buffer = start_frame - frame;
+ struct lookahead_entry *buf = vp9_lookahead_peek(cpi->lookahead,
+ which_buffer);
+ cpi->frames[frames_to_blur - 1 - frame] = &buf->img;
+ }
+
+ temporal_filter_iterate_c(cpi, frames_to_blur, frames_to_blur_backward,
+ strength);
+}
+
+void configure_arnr_filter(VP9_COMP *cpi, const unsigned int this_frame,
+ const int group_boost) {
+ int half_gf_int;
+ int frames_after_arf;
+ int frames_bwd = cpi->oxcf.arnr_max_frames - 1;
+ int frames_fwd = cpi->oxcf.arnr_max_frames - 1;
+ int q;
+
+ // Define the arnr filter width for this group of frames:
+ // We only filter frames that lie within a distance of half
+ // the GF interval from the ARF frame. We also have to trap
+ // cases where the filter extends beyond the end of clip.
+ // Note: this_frame->frame has been updated in the loop
+ // so it now points at the ARF frame.
+ half_gf_int = cpi->baseline_gf_interval >> 1;
+ frames_after_arf = (int)(cpi->twopass.total_stats.count - this_frame - 1);
+
+ switch (cpi->oxcf.arnr_type) {
+ case 1: // Backward filter
+ frames_fwd = 0;
+ if (frames_bwd > half_gf_int)
+ frames_bwd = half_gf_int;
+ break;
+
+ case 2: // Forward filter
+ if (frames_fwd > half_gf_int)
+ frames_fwd = half_gf_int;
+ if (frames_fwd > frames_after_arf)
+ frames_fwd = frames_after_arf;
+ frames_bwd = 0;
+ break;
+
+ case 3: // Centered filter
+ default:
+ frames_fwd >>= 1;
+ if (frames_fwd > frames_after_arf)
+ frames_fwd = frames_after_arf;
+ if (frames_fwd > half_gf_int)
+ frames_fwd = half_gf_int;
+
+ frames_bwd = frames_fwd;
+
+ // For even length filter there is one more frame backward
+ // than forward: e.g. len=6 ==> bbbAff, len=7 ==> bbbAfff.
+ if (frames_bwd < half_gf_int)
+ frames_bwd += (cpi->oxcf.arnr_max_frames + 1) & 0x1;
+ break;
+ }
+
+ cpi->active_arnr_frames = frames_bwd + 1 + frames_fwd;
+
+ // Adjust the strength based on active max q
+ q = ((int)vp9_convert_qindex_to_q(cpi->active_worst_quality) >> 1);
+ if (q > 8) {
+ cpi->active_arnr_strength = cpi->oxcf.arnr_strength;
+ } else {
+ cpi->active_arnr_strength = cpi->oxcf.arnr_strength - (8 - q);
+ if (cpi->active_arnr_strength < 0)
+ cpi->active_arnr_strength = 0;
+ }
+
+ // Adjust number of frames in filter and strength based on gf boost level.
+ if (cpi->active_arnr_frames > (group_boost / 150)) {
+ cpi->active_arnr_frames = (group_boost / 150);
+ cpi->active_arnr_frames += !(cpi->active_arnr_frames & 1);
+ }
+ if (cpi->active_arnr_strength > (group_boost / 300)) {
+ cpi->active_arnr_strength = (group_boost / 300);
+ }
+}
diff --git a/libvpx/vp9/encoder/vp9_temporal_filter.h b/libvpx/vp9/encoder/vp9_temporal_filter.h
new file mode 100644
index 0000000..c5f3b46
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_temporal_filter.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_ENCODER_VP9_TEMPORAL_FILTER_H_
+#define VP9_ENCODER_VP9_TEMPORAL_FILTER_H_
+
+void vp9_temporal_filter_prepare(VP9_COMP *cpi, int distance);
+void configure_arnr_filter(VP9_COMP *cpi, const unsigned int this_frame,
+ const int group_boost);
+
+#endif // VP9_ENCODER_VP9_TEMPORAL_FILTER_H_
diff --git a/libvpx/vp9/encoder/vp9_tokenize.c b/libvpx/vp9/encoder/vp9_tokenize.c
new file mode 100644
index 0000000..0a290e1
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_tokenize.c
@@ -0,0 +1,458 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include <math.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vp9/encoder/vp9_tokenize.h"
+#include "vpx_mem/vpx_mem.h"
+
+#include "vp9/common/vp9_pred_common.h"
+#include "vp9/common/vp9_seg_common.h"
+#include "vp9/common/vp9_entropy.h"
+
+/* Global event counters used for accumulating statistics across several
+ compressions, then generating vp9_context.c = initial stats. */
+
+#ifdef ENTROPY_STATS
+vp9_coeff_accum context_counters[TX_SIZE_MAX_SB][BLOCK_TYPES];
+extern vp9_coeff_stats tree_update_hist[TX_SIZE_MAX_SB][BLOCK_TYPES];
+#endif /* ENTROPY_STATS */
+
+DECLARE_ALIGNED(16, extern const uint8_t,
+ vp9_pt_energy_class[MAX_ENTROPY_TOKENS]);
+
+static TOKENVALUE dct_value_tokens[DCT_MAX_VALUE * 2];
+const TOKENVALUE *vp9_dct_value_tokens_ptr;
+static int dct_value_cost[DCT_MAX_VALUE * 2];
+const int *vp9_dct_value_cost_ptr;
+
+static void fill_value_tokens() {
+
+ TOKENVALUE *const t = dct_value_tokens + DCT_MAX_VALUE;
+ vp9_extra_bit *const e = vp9_extra_bits;
+
+ int i = -DCT_MAX_VALUE;
+ int sign = 1;
+
+ do {
+ if (!i)
+ sign = 0;
+
+ {
+ const int a = sign ? -i : i;
+ int eb = sign;
+
+ if (a > 4) {
+ int j = 4;
+
+ while (++j < 11 && e[j].base_val <= a) {}
+
+ t[i].token = --j;
+ eb |= (a - e[j].base_val) << 1;
+ } else
+ t[i].token = a;
+
+ t[i].extra = eb;
+ }
+
+ // initialize the cost for extra bits for all possible coefficient value.
+ {
+ int cost = 0;
+ vp9_extra_bit *p = vp9_extra_bits + t[i].token;
+
+ if (p->base_val) {
+ const int extra = t[i].extra;
+ const int length = p->len;
+
+ if (length)
+ cost += treed_cost(p->tree, p->prob, extra >> 1, length);
+
+ cost += vp9_cost_bit(vp9_prob_half, extra & 1); /* sign */
+ dct_value_cost[i + DCT_MAX_VALUE] = cost;
+ }
+
+ }
+
+ } while (++i < DCT_MAX_VALUE);
+
+ vp9_dct_value_tokens_ptr = dct_value_tokens + DCT_MAX_VALUE;
+ vp9_dct_value_cost_ptr = dct_value_cost + DCT_MAX_VALUE;
+}
+
+extern const int *vp9_get_coef_neighbors_handle(const int *scan, int *pad);
+
+struct tokenize_b_args {
+ VP9_COMP *cpi;
+ MACROBLOCKD *xd;
+ TOKENEXTRA **tp;
+ TX_SIZE tx_size;
+ int dry_run;
+};
+
+static void tokenize_b(int plane, int block, BLOCK_SIZE_TYPE bsize,
+ int ss_txfrm_size, void *arg) {
+ struct tokenize_b_args* const args = arg;
+ VP9_COMP *cpi = args->cpi;
+ MACROBLOCKD *xd = args->xd;
+ TOKENEXTRA **tp = args->tp;
+ PLANE_TYPE type = plane ? PLANE_TYPE_UV : PLANE_TYPE_Y_WITH_DC;
+ TX_SIZE tx_size = ss_txfrm_size / 2;
+ int dry_run = args->dry_run;
+
+ MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
+ int pt; /* near block/prev token context index */
+ int c = 0, rc = 0;
+ TOKENEXTRA *t = *tp; /* store tokens starting here */
+ const int eob = xd->plane[plane].eobs[block];
+ const int16_t *qcoeff_ptr = BLOCK_OFFSET(xd->plane[plane].qcoeff, block, 16);
+ const BLOCK_SIZE_TYPE sb_type = (mbmi->sb_type < BLOCK_SIZE_SB8X8) ?
+ BLOCK_SIZE_SB8X8 : mbmi->sb_type;
+ const int bwl = b_width_log2(sb_type);
+ const int off = block >> (2 * tx_size);
+ const int mod = bwl - tx_size - xd->plane[plane].subsampling_x;
+ const int aoff = (off & ((1 << mod) - 1)) << tx_size;
+ const int loff = (off >> mod) << tx_size;
+ ENTROPY_CONTEXT *A = xd->plane[plane].above_context + aoff;
+ ENTROPY_CONTEXT *L = xd->plane[plane].left_context + loff;
+ int seg_eob, default_eob, pad;
+ const int segment_id = mbmi->segment_id;
+ const int *scan, *nb;
+ vp9_coeff_count *counts;
+ vp9_coeff_probs_model *coef_probs;
+ const int ref = mbmi->ref_frame[0] != INTRA_FRAME;
+ ENTROPY_CONTEXT above_ec, left_ec;
+ uint8_t token_cache[1024];
+ TX_TYPE tx_type = DCT_DCT;
+ const uint8_t * band_translate;
+ assert((!type && !plane) || (type && plane));
+
+ counts = cpi->coef_counts[tx_size];
+ coef_probs = cpi->common.fc.coef_probs[tx_size];
+ switch (tx_size) {
+ default:
+ case TX_4X4: {
+ tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
+ get_tx_type_4x4(xd, block) : DCT_DCT;
+ above_ec = A[0] != 0;
+ left_ec = L[0] != 0;
+ seg_eob = 16;
+ scan = get_scan_4x4(tx_type);
+ band_translate = vp9_coefband_trans_4x4;
+ break;
+ }
+ case TX_8X8: {
+ const int sz = 1 + b_width_log2(sb_type);
+ const int x = block & ((1 << sz) - 1), y = block - x;
+ tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
+ get_tx_type_8x8(xd, y + (x >> 1)) : DCT_DCT;
+ above_ec = (A[0] + A[1]) != 0;
+ left_ec = (L[0] + L[1]) != 0;
+ seg_eob = 64;
+ scan = get_scan_8x8(tx_type);
+ band_translate = vp9_coefband_trans_8x8plus;
+ break;
+ }
+ case TX_16X16: {
+ const int sz = 2 + b_width_log2(sb_type);
+ const int x = block & ((1 << sz) - 1), y = block - x;
+ tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
+ get_tx_type_16x16(xd, y + (x >> 2)) : DCT_DCT;
+ above_ec = (A[0] + A[1] + A[2] + A[3]) != 0;
+ left_ec = (L[0] + L[1] + L[2] + L[3]) != 0;
+ seg_eob = 256;
+ scan = get_scan_16x16(tx_type);
+ band_translate = vp9_coefband_trans_8x8plus;
+ break;
+ }
+ case TX_32X32:
+ above_ec = (A[0] + A[1] + A[2] + A[3] + A[4] + A[5] + A[6] + A[7]) != 0;
+ left_ec = (L[0] + L[1] + L[2] + L[3] + L[4] + L[5] + L[6] + L[7]) != 0;
+ seg_eob = 1024;
+ scan = vp9_default_scan_32x32;
+ band_translate = vp9_coefband_trans_8x8plus;
+ break;
+ }
+
+ pt = combine_entropy_contexts(above_ec, left_ec);
+ nb = vp9_get_coef_neighbors_handle(scan, &pad);
+ default_eob = seg_eob;
+
+ if (vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP))
+ seg_eob = 0;
+
+ c = 0;
+ do {
+ const int band = get_coef_band(band_translate, c);
+ int token;
+ int v = 0;
+ rc = scan[c];
+ if (c)
+ pt = vp9_get_coef_context(scan, nb, pad, token_cache, c, default_eob);
+ if (c < eob) {
+ v = qcoeff_ptr[rc];
+ assert(-DCT_MAX_VALUE <= v && v < DCT_MAX_VALUE);
+
+ t->extra = vp9_dct_value_tokens_ptr[v].extra;
+ token = vp9_dct_value_tokens_ptr[v].token;
+ } else {
+ token = DCT_EOB_TOKEN;
+ }
+
+ t->token = token;
+ t->context_tree = coef_probs[type][ref][band][pt];
+ t->skip_eob_node = (c > 0) && (token_cache[scan[c - 1]] == 0);
+
+#if CONFIG_BALANCED_COEFTREE
+ assert(token <= ZERO_TOKEN ||
+ vp9_coef_encodings[t->token].len - t->skip_eob_node > 0);
+#else
+ assert(vp9_coef_encodings[t->token].len - t->skip_eob_node > 0);
+#endif
+
+ if (!dry_run) {
+ ++counts[type][ref][band][pt][token];
+#if CONFIG_BALANCED_COEFTREE
+ if (!t->skip_eob_node && token > ZERO_TOKEN)
+#else
+ if (!t->skip_eob_node)
+#endif
+ ++cpi->common.fc.eob_branch_counts[tx_size][type][ref][band][pt];
+ }
+ token_cache[scan[c]] = vp9_pt_energy_class[token];
+ ++t;
+ } while (c < eob && ++c < seg_eob);
+
+ *tp = t;
+ if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0) {
+ set_contexts_on_border(xd, bsize, plane, tx_size, c, aoff, loff, A, L);
+ } else {
+ for (pt = 0; pt < (1 << tx_size); pt++) {
+ A[pt] = L[pt] = c > 0;
+ }
+ }
+}
+
+struct is_skippable_args {
+ MACROBLOCKD *xd;
+ int *skippable;
+};
+static void is_skippable(int plane, int block,
+ BLOCK_SIZE_TYPE bsize, int ss_txfrm_size, void *argv) {
+ struct is_skippable_args *args = argv;
+ args->skippable[0] &= (!args->xd->plane[plane].eobs[block]);
+}
+
+int vp9_sb_is_skippable(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
+ int result = 1;
+ struct is_skippable_args args = {xd, &result};
+ foreach_transformed_block(xd, bsize, is_skippable, &args);
+ return result;
+}
+
+int vp9_sby_is_skippable(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
+ int result = 1;
+ struct is_skippable_args args = {xd, &result};
+ foreach_transformed_block_in_plane(xd, bsize, 0,
+ is_skippable, &args);
+ return result;
+}
+
+int vp9_sbuv_is_skippable(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
+ int result = 1;
+ struct is_skippable_args args = {xd, &result};
+ foreach_transformed_block_uv(xd, bsize, is_skippable, &args);
+ return result;
+}
+
+void vp9_tokenize_sb(VP9_COMP *cpi,
+ MACROBLOCKD *xd,
+ TOKENEXTRA **t,
+ int dry_run, BLOCK_SIZE_TYPE bsize) {
+ VP9_COMMON * const cm = &cpi->common;
+ MB_MODE_INFO * const mbmi = &xd->mode_info_context->mbmi;
+ TOKENEXTRA *t_backup = *t;
+ const int mb_skip_context = vp9_get_pred_context(cm, xd, PRED_MBSKIP);
+ const int segment_id = mbmi->segment_id;
+ const int skip_inc = !vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP);
+ const TX_SIZE txfm_size = mbmi->txfm_size;
+ struct tokenize_b_args arg = {
+ cpi, xd, t, txfm_size, dry_run
+ };
+
+ mbmi->mb_skip_coeff = vp9_sb_is_skippable(xd, bsize);
+
+ if (mbmi->mb_skip_coeff) {
+ if (!dry_run)
+ cm->fc.mbskip_count[mb_skip_context][1] += skip_inc;
+ vp9_reset_sb_tokens_context(xd, bsize);
+ if (dry_run)
+ *t = t_backup;
+ return;
+ }
+
+ if (!dry_run)
+ cm->fc.mbskip_count[mb_skip_context][0] += skip_inc;
+
+ foreach_transformed_block(xd, bsize, tokenize_b, &arg);
+
+ if (dry_run)
+ *t = t_backup;
+}
+
+#ifdef ENTROPY_STATS
+void init_context_counters(void) {
+ FILE *f = fopen("context.bin", "rb");
+ if (!f) {
+ vp9_zero(context_counters);
+ } else {
+ fread(context_counters, sizeof(context_counters), 1, f);
+ fclose(f);
+ }
+
+ f = fopen("treeupdate.bin", "rb");
+ if (!f) {
+ vpx_memset(tree_update_hist, 0, sizeof(tree_update_hist));
+ } else {
+ fread(tree_update_hist, sizeof(tree_update_hist), 1, f);
+ fclose(f);
+ }
+}
+
+static void print_counter(FILE *f, vp9_coeff_accum *context_counters,
+ int block_types, const char *header) {
+ int type, ref, band, pt, t;
+
+ fprintf(f, "static const vp9_coeff_count %s = {\n", header);
+
+#define Comma(X) (X ? "," : "")
+ type = 0;
+ do {
+ ref = 0;
+ fprintf(f, "%s\n { /* block Type %d */", Comma(type), type);
+ do {
+ fprintf(f, "%s\n { /* %s */", Comma(type), ref ? "Inter" : "Intra");
+ band = 0;
+ do {
+ fprintf(f, "%s\n { /* Coeff Band %d */", Comma(band), band);
+ pt = 0;
+ do {
+ fprintf(f, "%s\n {", Comma(pt));
+
+ t = 0;
+ do {
+ const int64_t x = context_counters[type][ref][band][pt][t];
+ const int y = (int) x;
+
+ assert(x == (int64_t) y); /* no overflow handling yet */
+ fprintf(f, "%s %d", Comma(t), y);
+ } while (++t < 1 + MAX_ENTROPY_TOKENS);
+ fprintf(f, "}");
+ } while (++pt < PREV_COEF_CONTEXTS);
+ fprintf(f, "\n }");
+ } while (++band < COEF_BANDS);
+ fprintf(f, "\n }");
+ } while (++ref < REF_TYPES);
+ fprintf(f, "\n }");
+ } while (++type < block_types);
+ fprintf(f, "\n};\n");
+}
+
+static void print_probs(FILE *f, vp9_coeff_accum *context_counters,
+ int block_types, const char *header) {
+ int type, ref, band, pt, t;
+
+ fprintf(f, "static const vp9_coeff_probs %s = {", header);
+
+ type = 0;
+#define Newline(x, spaces) (x ? " " : "\n" spaces)
+ do {
+ fprintf(f, "%s%s{ /* block Type %d */",
+ Comma(type), Newline(type, " "), type);
+ ref = 0;
+ do {
+ fprintf(f, "%s%s{ /* %s */",
+ Comma(band), Newline(band, " "), ref ? "Inter" : "Intra");
+ band = 0;
+ do {
+ fprintf(f, "%s%s{ /* Coeff Band %d */",
+ Comma(band), Newline(band, " "), band);
+ pt = 0;
+ do {
+ unsigned int branch_ct[ENTROPY_NODES][2];
+ unsigned int coef_counts[MAX_ENTROPY_TOKENS + 1];
+ vp9_prob coef_probs[ENTROPY_NODES];
+
+ if (pt >= 3 && band == 0)
+ break;
+ for (t = 0; t < MAX_ENTROPY_TOKENS + 1; ++t)
+ coef_counts[t] = context_counters[type][ref][band][pt][t];
+ vp9_tree_probs_from_distribution(vp9_coef_tree, coef_probs,
+ branch_ct, coef_counts, 0);
+ branch_ct[0][1] = coef_counts[MAX_ENTROPY_TOKENS] - branch_ct[0][0];
+ coef_probs[0] = get_binary_prob(branch_ct[0][0], branch_ct[0][1]);
+ fprintf(f, "%s\n {", Comma(pt));
+
+ t = 0;
+ do {
+ fprintf(f, "%s %3d", Comma(t), coef_probs[t]);
+ } while (++t < ENTROPY_NODES);
+
+ fprintf(f, " }");
+ } while (++pt < PREV_COEF_CONTEXTS);
+ fprintf(f, "\n }");
+ } while (++band < COEF_BANDS);
+ fprintf(f, "\n }");
+ } while (++ref < REF_TYPES);
+ fprintf(f, "\n }");
+ } while (++type < block_types);
+ fprintf(f, "\n};\n");
+}
+
+void print_context_counters() {
+ FILE *f = fopen("vp9_context.c", "w");
+
+ fprintf(f, "#include \"vp9_entropy.h\"\n");
+ fprintf(f, "\n/* *** GENERATED FILE: DO NOT EDIT *** */\n\n");
+
+ /* print counts */
+ print_counter(f, context_counters[TX_4X4], BLOCK_TYPES,
+ "vp9_default_coef_counts_4x4[BLOCK_TYPES]");
+ print_counter(f, context_counters[TX_8X8], BLOCK_TYPES,
+ "vp9_default_coef_counts_8x8[BLOCK_TYPES]");
+ print_counter(f, context_counters[TX_16X16], BLOCK_TYPES,
+ "vp9_default_coef_counts_16x16[BLOCK_TYPES]");
+ print_counter(f, context_counters[TX_32X32], BLOCK_TYPES,
+ "vp9_default_coef_counts_32x32[BLOCK_TYPES]");
+
+ /* print coefficient probabilities */
+ print_probs(f, context_counters[TX_4X4], BLOCK_TYPES,
+ "default_coef_probs_4x4[BLOCK_TYPES]");
+ print_probs(f, context_counters[TX_8X8], BLOCK_TYPES,
+ "default_coef_probs_8x8[BLOCK_TYPES]");
+ print_probs(f, context_counters[TX_16X16], BLOCK_TYPES,
+ "default_coef_probs_16x16[BLOCK_TYPES]");
+ print_probs(f, context_counters[TX_32X32], BLOCK_TYPES,
+ "default_coef_probs_32x32[BLOCK_TYPES]");
+
+ fclose(f);
+
+ f = fopen("context.bin", "wb");
+ fwrite(context_counters, sizeof(context_counters), 1, f);
+ fclose(f);
+}
+#endif
+
+void vp9_tokenize_initialize() {
+ fill_value_tokens();
+}
diff --git a/libvpx/vp9/encoder/vp9_tokenize.h b/libvpx/vp9/encoder/vp9_tokenize.h
new file mode 100644
index 0000000..e7f90c9
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_tokenize.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_ENCODER_VP9_TOKENIZE_H_
+#define VP9_ENCODER_VP9_TOKENIZE_H_
+
+#include "vp9/common/vp9_entropy.h"
+#include "vp9/encoder/vp9_block.h"
+
+void vp9_tokenize_initialize();
+
+typedef struct {
+ int16_t token;
+ int16_t extra;
+} TOKENVALUE;
+
+typedef struct {
+ const vp9_prob *context_tree;
+ int16_t extra;
+ uint8_t token;
+ uint8_t skip_eob_node;
+} TOKENEXTRA;
+
+typedef int64_t vp9_coeff_accum[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS]
+ [MAX_ENTROPY_TOKENS + 1];
+
+int vp9_sb_is_skippable(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
+int vp9_sby_is_skippable(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
+int vp9_sbuv_is_skippable(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
+struct VP9_COMP;
+
+void vp9_tokenize_sb(struct VP9_COMP *cpi, MACROBLOCKD *xd,
+ TOKENEXTRA **t, int dry_run, BLOCK_SIZE_TYPE bsize);
+
+#ifdef ENTROPY_STATS
+void init_context_counters();
+void print_context_counters();
+
+extern vp9_coeff_accum context_counters[TX_SIZE_MAX_SB][BLOCK_TYPES];
+#endif
+
+extern const int *vp9_dct_value_cost_ptr;
+/* TODO: The Token field should be broken out into a separate char array to
+ * improve cache locality, since it's needed for costing when the rest of the
+ * fields are not.
+ */
+extern const TOKENVALUE *vp9_dct_value_tokens_ptr;
+
+#endif // VP9_ENCODER_VP9_TOKENIZE_H_
diff --git a/libvpx/vp9/encoder/vp9_treewriter.c b/libvpx/vp9/encoder/vp9_treewriter.c
new file mode 100644
index 0000000..e4aed53
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_treewriter.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp9/encoder/vp9_treewriter.h"
+
+static void cost(int *costs, vp9_tree tree, const vp9_prob *probs,
+ int i, int c) {
+ const vp9_prob prob = probs[i / 2];
+ int b;
+
+ for (b = 0; b <= 1; ++b) {
+ const int cc = c + vp9_cost_bit(prob, b);
+ const vp9_tree_index ii = tree[i + b];
+
+ if (ii <= 0)
+ costs[-ii] = cc;
+ else
+ cost(costs, tree, probs, ii, cc);
+ }
+}
+
+void vp9_cost_tokens(int *costs, const vp9_prob *probs, vp9_tree tree) {
+ cost(costs, tree, probs, 0, 0);
+}
+
+void vp9_cost_tokens_skip(int *costs, const vp9_prob *probs, vp9_tree tree) {
+ assert(tree[0] <= 0 && tree[1] > 0);
+
+ costs[-tree[0]] = vp9_cost_bit(probs[0], 0);
+ cost(costs, tree, probs, 2, 0);
+}
diff --git a/libvpx/vp9/encoder/vp9_treewriter.h b/libvpx/vp9/encoder/vp9_treewriter.h
new file mode 100644
index 0000000..eeda5cd
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_treewriter.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_VP9_TREEWRITER_H_
+#define VP9_ENCODER_VP9_TREEWRITER_H_
+
+/* Trees map alphabets into huffman-like codes suitable for an arithmetic
+ bit coder. Timothy S Murphy 11 October 2004 */
+
+#include "vp9/common/vp9_treecoder.h"
+
+#include "vp9/encoder/vp9_boolhuff.h" /* for now */
+
+
+#define vp9_write_prob(w, v) vp9_write_literal((w), (v), 8)
+
+/* Approximate length of an encoded bool in 256ths of a bit at given prob */
+
+#define vp9_cost_zero(x) (vp9_prob_cost[x])
+#define vp9_cost_one(x) vp9_cost_zero(vp9_complement(x))
+
+#define vp9_cost_bit(x, b) vp9_cost_zero((b) ? vp9_complement(x) : (x))
+
+/* VP8BC version is scaled by 2^20 rather than 2^8; see bool_coder.h */
+
+
+/* Both of these return bits, not scaled bits. */
+static INLINE unsigned int cost_branch256(const unsigned int ct[2],
+ vp9_prob p) {
+ return ct[0] * vp9_cost_zero(p) + ct[1] * vp9_cost_one(p);
+}
+
+static INLINE unsigned int cost_branch(const unsigned int ct[2],
+ vp9_prob p) {
+ return cost_branch256(ct, p) >> 8;
+}
+
+
+static INLINE void treed_write(vp9_writer *w,
+ vp9_tree tree, const vp9_prob *probs,
+ int bits, int len) {
+ vp9_tree_index i = 0;
+
+ do {
+ const int bit = (bits >> --len) & 1;
+ vp9_write(w, bit, probs[i >> 1]);
+ i = tree[i + bit];
+ } while (len);
+}
+
+static INLINE void write_token(vp9_writer *w, vp9_tree tree,
+ const vp9_prob *probs,
+ const struct vp9_token *token) {
+ treed_write(w, tree, probs, token->value, token->len);
+}
+
+static INLINE int treed_cost(vp9_tree tree, const vp9_prob *probs,
+ int bits, int len) {
+ int cost = 0;
+ vp9_tree_index i = 0;
+
+ do {
+ const int bit = (bits >> --len) & 1;
+ cost += vp9_cost_bit(probs[i >> 1], bit);
+ i = tree[i + bit];
+ } while (len);
+
+ return cost;
+}
+
+static INLINE int cost_token(vp9_tree tree, const vp9_prob *probs,
+ const struct vp9_token *token) {
+ return treed_cost(tree, probs, token->value, token->len);
+}
+
+void vp9_cost_tokens(int *costs, const vp9_prob *probs, vp9_tree tree);
+void vp9_cost_tokens_skip(int *costs, const vp9_prob *probs, vp9_tree tree);
+
+#endif // VP9_ENCODER_VP9_TREEWRITER_H_
diff --git a/libvpx/vp9/encoder/vp9_variance.h b/libvpx/vp9/encoder/vp9_variance.h
new file mode 100644
index 0000000..38808d7
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_variance.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_ENCODER_VP9_VARIANCE_H_
+#define VP9_ENCODER_VP9_VARIANCE_H_
+
+#include "vpx/vpx_integer.h"
+// #include "./vpx_config.h"
+
+typedef unsigned int(*vp9_sad_fn_t)(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int max_sad);
+
+typedef void (*vp9_sad_multi_fn_t)(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array);
+
+typedef void (*vp9_sad_multi1_fn_t)(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array);
+
+typedef void (*vp9_sad_multi_d_fn_t)(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t* const ref_ptr[],
+ int ref_stride, unsigned int *sad_array);
+
+typedef unsigned int (*vp9_variance_fn_t)(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride,
+ unsigned int *sse);
+
+typedef unsigned int (*vp9_subpixvariance_fn_t)(const uint8_t *src_ptr,
+ int source_stride,
+ int xoffset,
+ int yoffset,
+ const uint8_t *ref_ptr,
+ int Refstride,
+ unsigned int *sse);
+
+typedef unsigned int (*vp9_subp_avg_variance_fn_t)(const uint8_t *src_ptr,
+ int source_stride,
+ int xoffset,
+ int yoffset,
+ const uint8_t *ref_ptr,
+ int Refstride,
+ unsigned int *sse,
+ const uint8_t *second_pred);
+
+typedef void (*vp9_ssimpf_fn_t)(uint8_t *s, int sp, uint8_t *r,
+ int rp, unsigned long *sum_s,
+ unsigned long *sum_r, unsigned long *sum_sq_s,
+ unsigned long *sum_sq_r,
+ unsigned long *sum_sxr);
+
+typedef unsigned int (*vp9_getmbss_fn_t)(const short *);
+
+typedef unsigned int (*vp9_get16x16prederror_fn_t)(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride);
+
+typedef struct vp9_variance_vtable {
+ vp9_sad_fn_t sdf;
+ vp9_variance_fn_t vf;
+ vp9_subpixvariance_fn_t svf;
+ vp9_subp_avg_variance_fn_t svaf;
+ vp9_variance_fn_t svf_halfpix_h;
+ vp9_variance_fn_t svf_halfpix_v;
+ vp9_variance_fn_t svf_halfpix_hv;
+ vp9_sad_multi_fn_t sdx3f;
+ vp9_sad_multi1_fn_t sdx8f;
+ vp9_sad_multi_d_fn_t sdx4df;
+} vp9_variance_fn_ptr_t;
+
+static void comp_avg_pred(uint8_t *comp_pred, const uint8_t *pred, int width,
+ int height, uint8_t *ref, int ref_stride) {
+ int i, j;
+
+ for (i = 0; i < height; i++) {
+ for (j = 0; j < width; j++) {
+ int tmp;
+ tmp = pred[j] + ref[j];
+ comp_pred[j] = (tmp + 1) >> 1;
+ }
+ comp_pred += width;
+ pred += width;
+ ref += ref_stride;
+ }
+}
+#endif // VP9_ENCODER_VP9_VARIANCE_H_
diff --git a/libvpx/vp9/encoder/vp9_variance_c.c b/libvpx/vp9/encoder/vp9_variance_c.c
new file mode 100644
index 0000000..23e7767
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_variance_c.c
@@ -0,0 +1,957 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vp9/encoder/vp9_variance.h"
+#include "vp9/common/vp9_filter.h"
+#include "vp9/common/vp9_subpelvar.h"
+#include "vpx/vpx_integer.h"
+#include "vpx_ports/mem.h"
+#include "./vp9_rtcd.h"
+
+unsigned int vp9_get_mb_ss_c(const int16_t *src_ptr) {
+ unsigned int i, sum = 0;
+
+ for (i = 0; i < 256; i++) {
+ sum += (src_ptr[i] * src_ptr[i]);
+ }
+
+ return sum;
+}
+
+unsigned int vp9_variance64x32_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 64, 32, &var, &avg);
+ *sse = var;
+ return (var - (((int64_t)avg * avg) >> 11));
+}
+
+unsigned int vp9_sub_pixel_variance64x32_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint16_t fdata3[65 * 64]; // Temp data bufffer used in filtering
+ uint8_t temp2[68 * 64];
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 33, 64, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 64, 64, 32, 64, vfilter);
+
+ return vp9_variance64x32(temp2, 64, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance64x32_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint16_t fdata3[65 * 64]; // Temp data bufffer used in filtering
+ uint8_t temp2[68 * 64];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 64 * 64); // compound pred buffer
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 33, 64, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 64, 64, 32, 64, vfilter);
+ comp_avg_pred(temp3, second_pred, 64, 32, temp2, 64);
+ return vp9_variance64x32(temp3, 64, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_variance32x64_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 32, 64, &var, &avg);
+ *sse = var;
+ return (var - (((int64_t)avg * avg) >> 11));
+}
+
+unsigned int vp9_sub_pixel_variance32x64_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint16_t fdata3[65 * 64]; // Temp data bufffer used in filtering
+ uint8_t temp2[68 * 64];
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 65, 32, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 32, 32, 64, 32, vfilter);
+
+ return vp9_variance32x64(temp2, 32, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance32x64_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint16_t fdata3[65 * 64]; // Temp data bufffer used in filtering
+ uint8_t temp2[68 * 64];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 32 * 64); // compound pred buffer
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 65, 32, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 32, 32, 64, 32, vfilter);
+ comp_avg_pred(temp3, second_pred, 32, 64, temp2, 32);
+ return vp9_variance32x64(temp3, 32, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_variance32x16_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 32, 16, &var, &avg);
+ *sse = var;
+ return (var - (((int64_t)avg * avg) >> 9));
+}
+
+unsigned int vp9_sub_pixel_variance32x16_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint16_t fdata3[33 * 32]; // Temp data bufffer used in filtering
+ uint8_t temp2[36 * 32];
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 17, 32, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 32, 32, 16, 32, vfilter);
+
+ return vp9_variance32x16(temp2, 32, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance32x16_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint16_t fdata3[33 * 32]; // Temp data bufffer used in filtering
+ uint8_t temp2[36 * 32];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 32 * 16); // compound pred buffer
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 17, 32, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 32, 32, 16, 32, vfilter);
+ comp_avg_pred(temp3, second_pred, 32, 16, temp2, 32);
+ return vp9_variance32x16(temp3, 32, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_variance16x32_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 32, &var, &avg);
+ *sse = var;
+ return (var - (((int64_t)avg * avg) >> 9));
+}
+
+unsigned int vp9_sub_pixel_variance16x32_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint16_t fdata3[33 * 32]; // Temp data bufffer used in filtering
+ uint8_t temp2[36 * 32];
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 33, 16, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 16, 16, 32, 16, vfilter);
+
+ return vp9_variance16x32(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance16x32_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint16_t fdata3[33 * 32]; // Temp data bufffer used in filtering
+ uint8_t temp2[36 * 32];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 16 * 32); // compound pred buffer
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 33, 16, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 16, 16, 32, 16, vfilter);
+ comp_avg_pred(temp3, second_pred, 16, 32, temp2, 16);
+ return vp9_variance16x32(temp3, 16, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_variance64x64_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 64, 64, &var, &avg);
+ *sse = var;
+ return (var - (((int64_t)avg * avg) >> 12));
+}
+
+unsigned int vp9_variance32x32_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 32, 32, &var, &avg);
+ *sse = var;
+ return (var - (((int64_t)avg * avg) >> 10));
+}
+
+unsigned int vp9_variance16x16_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 16, &var, &avg);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 8));
+}
+
+unsigned int vp9_variance8x16_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 16, &var, &avg);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 7));
+}
+
+unsigned int vp9_variance16x8_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 8, &var, &avg);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 7));
+}
+
+void vp9_get_sse_sum_8x8_c(const uint8_t *src_ptr, int source_stride,
+ const uint8_t *ref_ptr, int ref_stride,
+ unsigned int *sse, int *sum) {
+ variance(src_ptr, source_stride, ref_ptr, ref_stride, 8, 8, sse, sum);
+}
+
+unsigned int vp9_variance8x8_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 8, &var, &avg);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 6));
+}
+
+unsigned int vp9_variance8x4_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 4, &var, &avg);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 5));
+}
+
+unsigned int vp9_variance4x8_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 4, 8, &var, &avg);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 5));
+}
+
+unsigned int vp9_variance4x4_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 4, 4, &var, &avg);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 4));
+}
+
+
+unsigned int vp9_mse16x16_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 16, &var, &avg);
+ *sse = var;
+ return var;
+}
+
+unsigned int vp9_mse16x8_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 8, &var, &avg);
+ *sse = var;
+ return var;
+}
+
+unsigned int vp9_mse8x16_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 16, &var, &avg);
+ *sse = var;
+ return var;
+}
+
+unsigned int vp9_mse8x8_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 8, &var, &avg);
+ *sse = var;
+ return var;
+}
+
+
+unsigned int vp9_sub_pixel_variance4x4_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint8_t temp2[20 * 16];
+ const int16_t *hfilter, *vfilter;
+ uint16_t fdata3[5 * 4]; // Temp data bufffer used in filtering
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ // First filter 1d Horizontal
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 5, 4, hfilter);
+
+ // Now filter Verticaly
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4, vfilter);
+
+ return vp9_variance4x4(temp2, 4, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance4x4_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint8_t temp2[20 * 16];
+ const int16_t *hfilter, *vfilter;
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 4 * 4); // compound pred buffer
+ uint16_t fdata3[5 * 4]; // Temp data bufffer used in filtering
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ // First filter 1d Horizontal
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 5, 4, hfilter);
+
+ // Now filter Verticaly
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4, vfilter);
+ comp_avg_pred(temp3, second_pred, 4, 4, temp2, 4);
+ return vp9_variance4x4(temp3, 4, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_variance8x8_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint16_t fdata3[9 * 8]; // Temp data bufffer used in filtering
+ uint8_t temp2[20 * 16];
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 9, 8, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 8, 8, 8, 8, vfilter);
+
+ return vp9_variance8x8(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance8x8_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint16_t fdata3[9 * 8]; // Temp data bufffer used in filtering
+ uint8_t temp2[20 * 16];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 8 * 8); // compound pred buffer
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 9, 8, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 8, 8, 8, 8, vfilter);
+ comp_avg_pred(temp3, second_pred, 8, 8, temp2, 8);
+ return vp9_variance8x8(temp3, 8, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_variance16x16_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint16_t fdata3[17 * 16]; // Temp data bufffer used in filtering
+ uint8_t temp2[20 * 16];
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 17, 16, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 16, 16, 16, 16, vfilter);
+
+ return vp9_variance16x16(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance16x16_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint16_t fdata3[17 * 16];
+ uint8_t temp2[20 * 16];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 16 * 16); // compound pred buffer
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 17, 16, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 16, 16, 16, 16, vfilter);
+
+ comp_avg_pred(temp3, second_pred, 16, 16, temp2, 16);
+ return vp9_variance16x16(temp3, 16, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_variance64x64_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint16_t fdata3[65 * 64]; // Temp data bufffer used in filtering
+ uint8_t temp2[68 * 64];
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 65, 64, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 64, 64, 64, 64, vfilter);
+
+ return vp9_variance64x64(temp2, 64, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance64x64_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint16_t fdata3[65 * 64]; // Temp data bufffer used in filtering
+ uint8_t temp2[68 * 64];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 64 * 64); // compound pred buffer
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 65, 64, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 64, 64, 64, 64, vfilter);
+ comp_avg_pred(temp3, second_pred, 64, 64, temp2, 64);
+ return vp9_variance64x64(temp3, 64, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_variance32x32_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint16_t fdata3[33 * 32]; // Temp data bufffer used in filtering
+ uint8_t temp2[36 * 32];
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 33, 32, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 32, 32, 32, 32, vfilter);
+
+ return vp9_variance32x32(temp2, 32, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance32x32_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint16_t fdata3[33 * 32]; // Temp data bufffer used in filtering
+ uint8_t temp2[36 * 32];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 32 * 32); // compound pred buffer
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 33, 32, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 32, 32, 32, 32, vfilter);
+ comp_avg_pred(temp3, second_pred, 32, 32, temp2, 32);
+ return vp9_variance32x32(temp3, 32, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_variance_halfpixvar16x16_h_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ return vp9_sub_pixel_variance16x16_c(src_ptr, source_stride, 8, 0,
+ ref_ptr, recon_stride, sse);
+}
+
+unsigned int vp9_variance_halfpixvar32x32_h_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ return vp9_sub_pixel_variance32x32_c(src_ptr, source_stride, 8, 0,
+ ref_ptr, recon_stride, sse);
+}
+
+unsigned int vp9_variance_halfpixvar64x64_h_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ return vp9_sub_pixel_variance64x64_c(src_ptr, source_stride, 8, 0,
+ ref_ptr, recon_stride, sse);
+}
+
+unsigned int vp9_variance_halfpixvar16x16_v_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ return vp9_sub_pixel_variance16x16_c(src_ptr, source_stride, 0, 8,
+ ref_ptr, recon_stride, sse);
+}
+
+unsigned int vp9_variance_halfpixvar32x32_v_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ return vp9_sub_pixel_variance32x32_c(src_ptr, source_stride, 0, 8,
+ ref_ptr, recon_stride, sse);
+}
+
+unsigned int vp9_variance_halfpixvar64x64_v_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ return vp9_sub_pixel_variance64x64_c(src_ptr, source_stride, 0, 8,
+ ref_ptr, recon_stride, sse);
+}
+
+unsigned int vp9_variance_halfpixvar16x16_hv_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ return vp9_sub_pixel_variance16x16_c(src_ptr, source_stride, 8, 8,
+ ref_ptr, recon_stride, sse);
+}
+
+unsigned int vp9_variance_halfpixvar32x32_hv_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ return vp9_sub_pixel_variance32x32_c(src_ptr, source_stride, 8, 8,
+ ref_ptr, recon_stride, sse);
+}
+
+unsigned int vp9_variance_halfpixvar64x64_hv_c(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ return vp9_sub_pixel_variance64x64_c(src_ptr, source_stride, 8, 8,
+ ref_ptr, recon_stride, sse);
+}
+
+unsigned int vp9_sub_pixel_mse16x16_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ vp9_sub_pixel_variance16x16_c(src_ptr, src_pixels_per_line,
+ xoffset, yoffset, dst_ptr,
+ dst_pixels_per_line, sse);
+ return *sse;
+}
+
+unsigned int vp9_sub_pixel_mse32x32_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ vp9_sub_pixel_variance32x32_c(src_ptr, src_pixels_per_line,
+ xoffset, yoffset, dst_ptr,
+ dst_pixels_per_line, sse);
+ return *sse;
+}
+
+unsigned int vp9_sub_pixel_mse64x64_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ vp9_sub_pixel_variance64x64_c(src_ptr, src_pixels_per_line,
+ xoffset, yoffset, dst_ptr,
+ dst_pixels_per_line, sse);
+ return *sse;
+}
+
+unsigned int vp9_sub_pixel_variance16x8_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint16_t fdata3[16 * 9]; // Temp data bufffer used in filtering
+ uint8_t temp2[20 * 16];
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 9, 16, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 16, 16, 8, 16, vfilter);
+
+ return vp9_variance16x8(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance16x8_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint16_t fdata3[16 * 9]; // Temp data bufffer used in filtering
+ uint8_t temp2[20 * 16];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 16 * 8); // compound pred buffer
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 9, 16, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 16, 16, 8, 16, vfilter);
+ comp_avg_pred(temp3, second_pred, 16, 8, temp2, 16);
+ return vp9_variance16x8(temp3, 16, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_variance8x16_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint16_t fdata3[9 * 16]; // Temp data bufffer used in filtering
+ uint8_t temp2[20 * 16];
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 17, 8, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 8, 8, 16, 8, vfilter);
+
+ return vp9_variance8x16(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance8x16_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint16_t fdata3[9 * 16]; // Temp data bufffer used in filtering
+ uint8_t temp2[20 * 16];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 8 * 16); // compound pred buffer
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 17, 8, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 8, 8, 16, 8, vfilter);
+ comp_avg_pred(temp3, second_pred, 8, 16, temp2, 8);
+ return vp9_variance8x16(temp3, 8, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_variance8x4_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint16_t fdata3[8 * 5]; // Temp data bufffer used in filtering
+ uint8_t temp2[20 * 16];
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 5, 8, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 8, 8, 4, 8, vfilter);
+
+ return vp9_variance8x4(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance8x4_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint16_t fdata3[8 * 5]; // Temp data bufffer used in filtering
+ uint8_t temp2[20 * 16];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 8 * 4); // compound pred buffer
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 5, 8, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 8, 8, 4, 8, vfilter);
+ comp_avg_pred(temp3, second_pred, 8, 4, temp2, 8);
+ return vp9_variance8x4(temp3, 8, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_variance4x8_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ uint16_t fdata3[5 * 8]; // Temp data bufffer used in filtering
+ // FIXME(jingning,rbultje): this temp2 buffer probably doesn't need to be
+ // of this big? same issue appears in all other block size settings.
+ uint8_t temp2[20 * 16];
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 9, 4, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 8, 4, vfilter);
+
+ return vp9_variance4x8(temp2, 4, dst_ptr, dst_pixels_per_line, sse);
+}
+
+unsigned int vp9_sub_pixel_avg_variance4x8_c(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse,
+ const uint8_t *second_pred) {
+ uint16_t fdata3[5 * 8]; // Temp data bufffer used in filtering
+ uint8_t temp2[20 * 16];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 4 * 8); // compound pred buffer
+ const int16_t *hfilter, *vfilter;
+
+ hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+
+ var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
+ 1, 9, 4, hfilter);
+ var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 8, 4, vfilter);
+ comp_avg_pred(temp3, second_pred, 4, 8, temp2, 4);
+ return vp9_variance4x8(temp3, 4, dst_ptr, dst_pixels_per_line, sse);
+}
diff --git a/libvpx/vp9/encoder/vp9_write_bit_buffer.h b/libvpx/vp9/encoder/vp9_write_bit_buffer.h
new file mode 100644
index 0000000..6f91cfc
--- /dev/null
+++ b/libvpx/vp9/encoder/vp9_write_bit_buffer.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_BIT_WRITE_BUFFER_H_
+#define VP9_BIT_WRITE_BUFFER_H_
+
+#include <limits.h>
+
+#include "vpx/vpx_integer.h"
+
+struct vp9_write_bit_buffer {
+ uint8_t *bit_buffer;
+ size_t bit_offset;
+};
+
+static size_t vp9_rb_bytes_written(struct vp9_write_bit_buffer *wb) {
+ return wb->bit_offset / CHAR_BIT + (wb->bit_offset % CHAR_BIT > 0);
+}
+
+static void vp9_wb_write_bit(struct vp9_write_bit_buffer *wb, int bit) {
+ const int off = wb->bit_offset;
+ const int p = off / CHAR_BIT;
+ const int q = CHAR_BIT - 1 - off % CHAR_BIT;
+ if (q == CHAR_BIT -1) {
+ wb->bit_buffer[p] = bit << q;
+ } else {
+ wb->bit_buffer[p] &= ~(1 << q);
+ wb->bit_buffer[p] |= bit << q;
+ }
+ wb->bit_offset = off + 1;
+}
+
+static void vp9_wb_write_literal(struct vp9_write_bit_buffer *wb,
+ int data, int bits) {
+ int bit;
+ for (bit = bits - 1; bit >= 0; bit--)
+ vp9_wb_write_bit(wb, (data >> bit) & 1);
+}
+
+
+#endif // VP9_BIT_WRITE_BUFFER_H_
diff --git a/libvpx/vp9/encoder/x86/vp9_dct_mmx.asm b/libvpx/vp9/encoder/x86/vp9_dct_mmx.asm
new file mode 100644
index 0000000..54766d8
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_dct_mmx.asm
@@ -0,0 +1,241 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+;void vp9_short_fdct4x4_mmx(short *input, short *output, int pitch)
+global sym(vp9_short_fdct4x4_mmx) PRIVATE
+sym(vp9_short_fdct4x4_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 3
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ; input
+ mov rdi, arg(1) ; output
+
+ movsxd rax, dword ptr arg(2) ;pitch
+
+ lea rcx, [rsi + rax*2]
+ ; read the input data
+ movq mm0, [rsi]
+ movq mm1, [rsi + rax]
+
+ movq mm2, [rcx]
+ movq mm4, [rcx + rax]
+
+ ; transpose for the first stage
+ movq mm3, mm0 ; 00 01 02 03
+ movq mm5, mm2 ; 20 21 22 23
+
+ punpcklwd mm0, mm1 ; 00 10 01 11
+ punpckhwd mm3, mm1 ; 02 12 03 13
+
+ punpcklwd mm2, mm4 ; 20 30 21 31
+ punpckhwd mm5, mm4 ; 22 32 23 33
+
+ movq mm1, mm0 ; 00 10 01 11
+ punpckldq mm0, mm2 ; 00 10 20 30
+
+ punpckhdq mm1, mm2 ; 01 11 21 31
+
+ movq mm2, mm3 ; 02 12 03 13
+ punpckldq mm2, mm5 ; 02 12 22 32
+
+ punpckhdq mm3, mm5 ; 03 13 23 33
+
+ ; mm0 0
+ ; mm1 1
+ ; mm2 2
+ ; mm3 3
+
+ ; first stage
+ movq mm5, mm0
+ movq mm4, mm1
+
+ paddw mm0, mm3 ; a1 = 0 + 3
+ paddw mm1, mm2 ; b1 = 1 + 2
+
+ psubw mm4, mm2 ; c1 = 1 - 2
+ psubw mm5, mm3 ; d1 = 0 - 3
+
+ psllw mm5, 3
+ psllw mm4, 3
+
+ psllw mm0, 3
+ psllw mm1, 3
+
+ ; output 0 and 2
+ movq mm2, mm0 ; a1
+
+ paddw mm0, mm1 ; op[0] = a1 + b1
+ psubw mm2, mm1 ; op[2] = a1 - b1
+
+ ; output 1 and 3
+ ; interleave c1, d1
+ movq mm1, mm5 ; d1
+ punpcklwd mm1, mm4 ; c1 d1
+ punpckhwd mm5, mm4 ; c1 d1
+
+ movq mm3, mm1
+ movq mm4, mm5
+
+ pmaddwd mm1, MMWORD PTR[GLOBAL (_5352_2217)] ; c1*2217 + d1*5352
+ pmaddwd mm4, MMWORD PTR[GLOBAL (_5352_2217)] ; c1*2217 + d1*5352
+
+ pmaddwd mm3, MMWORD PTR[GLOBAL(_2217_neg5352)] ; d1*2217 - c1*5352
+ pmaddwd mm5, MMWORD PTR[GLOBAL(_2217_neg5352)] ; d1*2217 - c1*5352
+
+ paddd mm1, MMWORD PTR[GLOBAL(_14500)]
+ paddd mm4, MMWORD PTR[GLOBAL(_14500)]
+ paddd mm3, MMWORD PTR[GLOBAL(_7500)]
+ paddd mm5, MMWORD PTR[GLOBAL(_7500)]
+
+ psrad mm1, 12 ; (c1 * 2217 + d1 * 5352 + 14500)>>12
+ psrad mm4, 12 ; (c1 * 2217 + d1 * 5352 + 14500)>>12
+ psrad mm3, 12 ; (d1 * 2217 - c1 * 5352 + 7500)>>12
+ psrad mm5, 12 ; (d1 * 2217 - c1 * 5352 + 7500)>>12
+
+ packssdw mm1, mm4 ; op[1]
+ packssdw mm3, mm5 ; op[3]
+
+ ; done with vertical
+ ; transpose for the second stage
+ movq mm4, mm0 ; 00 10 20 30
+ movq mm5, mm2 ; 02 12 22 32
+
+ punpcklwd mm0, mm1 ; 00 01 10 11
+ punpckhwd mm4, mm1 ; 20 21 30 31
+
+ punpcklwd mm2, mm3 ; 02 03 12 13
+ punpckhwd mm5, mm3 ; 22 23 32 33
+
+ movq mm1, mm0 ; 00 01 10 11
+ punpckldq mm0, mm2 ; 00 01 02 03
+
+ punpckhdq mm1, mm2 ; 01 22 12 13
+
+ movq mm2, mm4 ; 20 31 30 31
+ punpckldq mm2, mm5 ; 20 21 22 23
+
+ punpckhdq mm4, mm5 ; 30 31 32 33
+
+ ; mm0 0
+ ; mm1 1
+ ; mm2 2
+ ; mm3 4
+
+ movq mm5, mm0
+ movq mm3, mm1
+
+ paddw mm0, mm4 ; a1 = 0 + 3
+ paddw mm1, mm2 ; b1 = 1 + 2
+
+ psubw mm3, mm2 ; c1 = 1 - 2
+ psubw mm5, mm4 ; d1 = 0 - 3
+
+ pxor mm6, mm6 ; zero out for compare
+
+ pcmpeqw mm6, mm5 ; d1 != 0
+
+ pandn mm6, MMWORD PTR[GLOBAL(_cmp_mask)] ; clear upper,
+ ; and keep bit 0 of lower
+
+ ; output 0 and 2
+ movq mm2, mm0 ; a1
+
+ paddw mm0, mm1 ; a1 + b1
+ psubw mm2, mm1 ; a1 - b1
+
+ paddw mm0, MMWORD PTR[GLOBAL(_7w)]
+ paddw mm2, MMWORD PTR[GLOBAL(_7w)]
+
+ psraw mm0, 4 ; op[0] = (a1 + b1 + 7)>>4
+ psraw mm2, 4 ; op[8] = (a1 - b1 + 7)>>4
+
+ movq MMWORD PTR[rdi + 0 ], mm0
+ movq MMWORD PTR[rdi + 16], mm2
+
+ ; output 1 and 3
+ ; interleave c1, d1
+ movq mm1, mm5 ; d1
+ punpcklwd mm1, mm3 ; c1 d1
+ punpckhwd mm5, mm3 ; c1 d1
+
+ movq mm3, mm1
+ movq mm4, mm5
+
+ pmaddwd mm1, MMWORD PTR[GLOBAL (_5352_2217)] ; c1*2217 + d1*5352
+ pmaddwd mm4, MMWORD PTR[GLOBAL (_5352_2217)] ; c1*2217 + d1*5352
+
+ pmaddwd mm3, MMWORD PTR[GLOBAL(_2217_neg5352)] ; d1*2217 - c1*5352
+ pmaddwd mm5, MMWORD PTR[GLOBAL(_2217_neg5352)] ; d1*2217 - c1*5352
+
+ paddd mm1, MMWORD PTR[GLOBAL(_12000)]
+ paddd mm4, MMWORD PTR[GLOBAL(_12000)]
+ paddd mm3, MMWORD PTR[GLOBAL(_51000)]
+ paddd mm5, MMWORD PTR[GLOBAL(_51000)]
+
+ psrad mm1, 16 ; (c1 * 2217 + d1 * 5352 + 14500)>>16
+ psrad mm4, 16 ; (c1 * 2217 + d1 * 5352 + 14500)>>16
+ psrad mm3, 16 ; (d1 * 2217 - c1 * 5352 + 7500)>>16
+ psrad mm5, 16 ; (d1 * 2217 - c1 * 5352 + 7500)>>16
+
+ packssdw mm1, mm4 ; op[4]
+ packssdw mm3, mm5 ; op[12]
+
+ paddw mm1, mm6 ; op[4] += (d1!=0)
+
+ movq MMWORD PTR[rdi + 8 ], mm1
+ movq MMWORD PTR[rdi + 24], mm3
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+SECTION_RODATA
+align 8
+_5352_2217:
+ dw 5352
+ dw 2217
+ dw 5352
+ dw 2217
+align 8
+_2217_neg5352:
+ dw 2217
+ dw -5352
+ dw 2217
+ dw -5352
+align 8
+_cmp_mask:
+ times 4 dw 1
+align 8
+_7w:
+ times 4 dw 7
+align 8
+_14500:
+ times 2 dd 14500
+align 8
+_7500:
+ times 2 dd 7500
+align 8
+_12000:
+ times 2 dd 12000
+align 8
+_51000:
+ times 2 dd 51000
diff --git a/libvpx/vp9/encoder/x86/vp9_dct_mmx.h b/libvpx/vp9/encoder/x86/vp9_dct_mmx.h
new file mode 100644
index 0000000..3bac7c8
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_dct_mmx.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_ENCODER_X86_VP9_DCT_MMX_H_
+#define VP9_ENCODER_X86_VP9_DCT_MMX_H_
+
+extern void vp9_short_fdct4x4_mmx(short *input, short *output, int pitch);
+
+
+#endif /* VP9_ENCODER_X86_VP9_DCT_MMX_H_ */
diff --git a/libvpx/vp9/encoder/x86/vp9_dct_sse2.c b/libvpx/vp9/encoder/x86/vp9_dct_sse2.c
new file mode 100644
index 0000000..aaacebe
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_dct_sse2.c
@@ -0,0 +1,1000 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <emmintrin.h> // SSE2
+#include "vp9/common/vp9_idct.h" // for cospi constants
+
+void vp9_short_fdct4x4_sse2(int16_t *input, int16_t *output, int pitch) {
+ // The 2D transform is done with two passes which are actually pretty
+ // similar. In the first one, we transform the columns and transpose
+ // the results. In the second one, we transform the rows. To achieve that,
+ // as the first pass results are transposed, we tranpose the columns (that
+ // is the transposed rows) and transpose the results (so that it goes back
+ // in normal/row positions).
+ const int stride = pitch >> 1;
+ int pass;
+ // Constants
+ // When we use them, in one case, they are all the same. In all others
+ // it's a pair of them that we need to repeat four times. This is done
+ // by constructing the 32 bit constant corresponding to that pair.
+ const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
+ const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
+ const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1);
+ const __m128i k__nonzero_bias_b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0);
+ const __m128i kOne = _mm_set1_epi16(1);
+ __m128i in0, in1, in2, in3;
+ // Load inputs.
+ {
+ in0 = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
+ in1 = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
+ in2 = _mm_loadl_epi64((const __m128i *)(input + 2 * stride));
+ in3 = _mm_loadl_epi64((const __m128i *)(input + 3 * stride));
+ // x = x << 4
+ in0 = _mm_slli_epi16(in0, 4);
+ in1 = _mm_slli_epi16(in1, 4);
+ in2 = _mm_slli_epi16(in2, 4);
+ in3 = _mm_slli_epi16(in3, 4);
+ // if (i == 0 && input[0]) input[0] += 1;
+ {
+ // The mask will only contain wether the first value is zero, all
+ // other comparison will fail as something shifted by 4 (above << 4)
+ // can never be equal to one. To increment in the non-zero case, we
+ // add the mask and one for the first element:
+ // - if zero, mask = -1, v = v - 1 + 1 = v
+ // - if non-zero, mask = 0, v = v + 0 + 1 = v + 1
+ __m128i mask = _mm_cmpeq_epi16(in0, k__nonzero_bias_a);
+ in0 = _mm_add_epi16(in0, mask);
+ in0 = _mm_add_epi16(in0, k__nonzero_bias_b);
+ }
+ }
+ // Do the two transform/transpose passes
+ for (pass = 0; pass < 2; ++pass) {
+ // Transform 1/2: Add/substract
+ const __m128i r0 = _mm_add_epi16(in0, in3);
+ const __m128i r1 = _mm_add_epi16(in1, in2);
+ const __m128i r2 = _mm_sub_epi16(in1, in2);
+ const __m128i r3 = _mm_sub_epi16(in0, in3);
+ // Transform 1/2: Interleave to do the multiply by constants which gets us
+ // into 32 bits.
+ const __m128i t0 = _mm_unpacklo_epi16(r0, r1);
+ const __m128i t2 = _mm_unpacklo_epi16(r2, r3);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16);
+ const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16);
+ const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p24_p08);
+ const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m08_p24);
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
+ const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
+ const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
+ // Combine and transpose
+ const __m128i res0 = _mm_packs_epi32(w0, w2);
+ const __m128i res1 = _mm_packs_epi32(w4, w6);
+ // 00 01 02 03 20 21 22 23
+ // 10 11 12 13 30 31 32 33
+ const __m128i tr0_0 = _mm_unpacklo_epi16(res0, res1);
+ const __m128i tr0_1 = _mm_unpackhi_epi16(res0, res1);
+ // 00 10 01 11 02 12 03 13
+ // 20 30 21 31 22 32 23 33
+ in0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
+ in2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
+ // 00 10 20 30 01 11 21 31 in0 contains 0 followed by 1
+ // 02 12 22 32 03 13 23 33 in2 contains 2 followed by 3
+ if (0 == pass) {
+ // Extract values in the high part for second pass as transform code
+ // only uses the first four values.
+ in1 = _mm_unpackhi_epi64(in0, in0);
+ in3 = _mm_unpackhi_epi64(in2, in2);
+ } else {
+ // Post-condition output and store it (v + 1) >> 2, taking advantage
+ // of the fact 1/3 are stored just after 0/2.
+ __m128i out01 = _mm_add_epi16(in0, kOne);
+ __m128i out23 = _mm_add_epi16(in2, kOne);
+ out01 = _mm_srai_epi16(out01, 2);
+ out23 = _mm_srai_epi16(out23, 2);
+ _mm_storeu_si128((__m128i *)(output + 0 * 4), out01);
+ _mm_storeu_si128((__m128i *)(output + 2 * 4), out23);
+ }
+ }
+}
+
+void vp9_short_fdct8x4_sse2(int16_t *input, int16_t *output, int pitch) {
+ vp9_short_fdct4x4_sse2(input, output, pitch);
+ vp9_short_fdct4x4_sse2(input + 4, output + 16, pitch);
+}
+
+void vp9_short_fdct8x8_sse2(int16_t *input, int16_t *output, int pitch) {
+ const int stride = pitch >> 1;
+ int pass;
+ // Constants
+ // When we use them, in one case, they are all the same. In all others
+ // it's a pair of them that we need to repeat four times. This is done
+ // by constructing the 32 bit constant corresponding to that pair.
+ const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
+ const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
+ const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64);
+ const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
+ const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64);
+ const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
+ const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ // Load input
+ __m128i in0 = _mm_loadu_si128((const __m128i *)(input + 0 * stride));
+ __m128i in1 = _mm_loadu_si128((const __m128i *)(input + 1 * stride));
+ __m128i in2 = _mm_loadu_si128((const __m128i *)(input + 2 * stride));
+ __m128i in3 = _mm_loadu_si128((const __m128i *)(input + 3 * stride));
+ __m128i in4 = _mm_loadu_si128((const __m128i *)(input + 4 * stride));
+ __m128i in5 = _mm_loadu_si128((const __m128i *)(input + 5 * stride));
+ __m128i in6 = _mm_loadu_si128((const __m128i *)(input + 6 * stride));
+ __m128i in7 = _mm_loadu_si128((const __m128i *)(input + 7 * stride));
+ // Pre-condition input (shift by two)
+ in0 = _mm_slli_epi16(in0, 2);
+ in1 = _mm_slli_epi16(in1, 2);
+ in2 = _mm_slli_epi16(in2, 2);
+ in3 = _mm_slli_epi16(in3, 2);
+ in4 = _mm_slli_epi16(in4, 2);
+ in5 = _mm_slli_epi16(in5, 2);
+ in6 = _mm_slli_epi16(in6, 2);
+ in7 = _mm_slli_epi16(in7, 2);
+
+ // We do two passes, first the columns, then the rows. The results of the
+ // first pass are transposed so that the same column code can be reused. The
+ // results of the second pass are also transposed so that the rows (processed
+ // as columns) are put back in row positions.
+ for (pass = 0; pass < 2; pass++) {
+ // To store results of each pass before the transpose.
+ __m128i res0, res1, res2, res3, res4, res5, res6, res7;
+ // Add/substract
+ const __m128i q0 = _mm_add_epi16(in0, in7);
+ const __m128i q1 = _mm_add_epi16(in1, in6);
+ const __m128i q2 = _mm_add_epi16(in2, in5);
+ const __m128i q3 = _mm_add_epi16(in3, in4);
+ const __m128i q4 = _mm_sub_epi16(in3, in4);
+ const __m128i q5 = _mm_sub_epi16(in2, in5);
+ const __m128i q6 = _mm_sub_epi16(in1, in6);
+ const __m128i q7 = _mm_sub_epi16(in0, in7);
+ // Work on first four results
+ {
+ // Add/substract
+ const __m128i r0 = _mm_add_epi16(q0, q3);
+ const __m128i r1 = _mm_add_epi16(q1, q2);
+ const __m128i r2 = _mm_sub_epi16(q1, q2);
+ const __m128i r3 = _mm_sub_epi16(q0, q3);
+ // Interleave to do the multiply by constants which gets us into 32bits
+ const __m128i t0 = _mm_unpacklo_epi16(r0, r1);
+ const __m128i t1 = _mm_unpackhi_epi16(r0, r1);
+ const __m128i t2 = _mm_unpacklo_epi16(r2, r3);
+ const __m128i t3 = _mm_unpackhi_epi16(r2, r3);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16);
+ const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_p16);
+ const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16);
+ const __m128i u3 = _mm_madd_epi16(t1, k__cospi_p16_m16);
+ const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p24_p08);
+ const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p24_p08);
+ const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m08_p24);
+ const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m08_p24);
+ // dct_const_round_shift
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+ const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
+ const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
+ const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
+ const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
+ const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
+ const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
+ const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
+ // Combine
+ res0 = _mm_packs_epi32(w0, w1);
+ res4 = _mm_packs_epi32(w2, w3);
+ res2 = _mm_packs_epi32(w4, w5);
+ res6 = _mm_packs_epi32(w6, w7);
+ }
+ // Work on next four results
+ {
+ // Interleave to do the multiply by constants which gets us into 32bits
+ const __m128i d0 = _mm_unpacklo_epi16(q6, q5);
+ const __m128i d1 = _mm_unpackhi_epi16(q6, q5);
+ const __m128i e0 = _mm_madd_epi16(d0, k__cospi_p16_m16);
+ const __m128i e1 = _mm_madd_epi16(d1, k__cospi_p16_m16);
+ const __m128i e2 = _mm_madd_epi16(d0, k__cospi_p16_p16);
+ const __m128i e3 = _mm_madd_epi16(d1, k__cospi_p16_p16);
+ // dct_const_round_shift
+ const __m128i f0 = _mm_add_epi32(e0, k__DCT_CONST_ROUNDING);
+ const __m128i f1 = _mm_add_epi32(e1, k__DCT_CONST_ROUNDING);
+ const __m128i f2 = _mm_add_epi32(e2, k__DCT_CONST_ROUNDING);
+ const __m128i f3 = _mm_add_epi32(e3, k__DCT_CONST_ROUNDING);
+ const __m128i s0 = _mm_srai_epi32(f0, DCT_CONST_BITS);
+ const __m128i s1 = _mm_srai_epi32(f1, DCT_CONST_BITS);
+ const __m128i s2 = _mm_srai_epi32(f2, DCT_CONST_BITS);
+ const __m128i s3 = _mm_srai_epi32(f3, DCT_CONST_BITS);
+ // Combine
+ const __m128i r0 = _mm_packs_epi32(s0, s1);
+ const __m128i r1 = _mm_packs_epi32(s2, s3);
+ // Add/substract
+ const __m128i x0 = _mm_add_epi16(q4, r0);
+ const __m128i x1 = _mm_sub_epi16(q4, r0);
+ const __m128i x2 = _mm_sub_epi16(q7, r1);
+ const __m128i x3 = _mm_add_epi16(q7, r1);
+ // Interleave to do the multiply by constants which gets us into 32bits
+ const __m128i t0 = _mm_unpacklo_epi16(x0, x3);
+ const __m128i t1 = _mm_unpackhi_epi16(x0, x3);
+ const __m128i t2 = _mm_unpacklo_epi16(x1, x2);
+ const __m128i t3 = _mm_unpackhi_epi16(x1, x2);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p28_p04);
+ const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p28_p04);
+ const __m128i u2 = _mm_madd_epi16(t0, k__cospi_m04_p28);
+ const __m128i u3 = _mm_madd_epi16(t1, k__cospi_m04_p28);
+ const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p12_p20);
+ const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p12_p20);
+ const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m20_p12);
+ const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m20_p12);
+ // dct_const_round_shift
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+ const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
+ const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
+ const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
+ const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
+ const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
+ const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
+ const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
+ // Combine
+ res1 = _mm_packs_epi32(w0, w1);
+ res7 = _mm_packs_epi32(w2, w3);
+ res5 = _mm_packs_epi32(w4, w5);
+ res3 = _mm_packs_epi32(w6, w7);
+ }
+ // Transpose the 8x8.
+ {
+ // 00 01 02 03 04 05 06 07
+ // 10 11 12 13 14 15 16 17
+ // 20 21 22 23 24 25 26 27
+ // 30 31 32 33 34 35 36 37
+ // 40 41 42 43 44 45 46 47
+ // 50 51 52 53 54 55 56 57
+ // 60 61 62 63 64 65 66 67
+ // 70 71 72 73 74 75 76 77
+ const __m128i tr0_0 = _mm_unpacklo_epi16(res0, res1);
+ const __m128i tr0_1 = _mm_unpacklo_epi16(res2, res3);
+ const __m128i tr0_2 = _mm_unpackhi_epi16(res0, res1);
+ const __m128i tr0_3 = _mm_unpackhi_epi16(res2, res3);
+ const __m128i tr0_4 = _mm_unpacklo_epi16(res4, res5);
+ const __m128i tr0_5 = _mm_unpacklo_epi16(res6, res7);
+ const __m128i tr0_6 = _mm_unpackhi_epi16(res4, res5);
+ const __m128i tr0_7 = _mm_unpackhi_epi16(res6, res7);
+ // 00 10 01 11 02 12 03 13
+ // 20 30 21 31 22 32 23 33
+ // 04 14 05 15 06 16 07 17
+ // 24 34 25 35 26 36 27 37
+ // 40 50 41 51 42 52 43 53
+ // 60 70 61 71 62 72 63 73
+ // 54 54 55 55 56 56 57 57
+ // 64 74 65 75 66 76 67 77
+ const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
+ const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);
+ const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
+ const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);
+ const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);
+ const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
+ const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
+ const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
+ // 00 10 20 30 01 11 21 31
+ // 40 50 60 70 41 51 61 71
+ // 02 12 22 32 03 13 23 33
+ // 42 52 62 72 43 53 63 73
+ // 04 14 24 34 05 15 21 36
+ // 44 54 64 74 45 55 61 76
+ // 06 16 26 36 07 17 27 37
+ // 46 56 66 76 47 57 67 77
+ in0 = _mm_unpacklo_epi64(tr1_0, tr1_4);
+ in1 = _mm_unpackhi_epi64(tr1_0, tr1_4);
+ in2 = _mm_unpacklo_epi64(tr1_2, tr1_6);
+ in3 = _mm_unpackhi_epi64(tr1_2, tr1_6);
+ in4 = _mm_unpacklo_epi64(tr1_1, tr1_5);
+ in5 = _mm_unpackhi_epi64(tr1_1, tr1_5);
+ in6 = _mm_unpacklo_epi64(tr1_3, tr1_7);
+ in7 = _mm_unpackhi_epi64(tr1_3, tr1_7);
+ // 00 10 20 30 40 50 60 70
+ // 01 11 21 31 41 51 61 71
+ // 02 12 22 32 42 52 62 72
+ // 03 13 23 33 43 53 63 73
+ // 04 14 24 34 44 54 64 74
+ // 05 15 25 35 45 55 65 75
+ // 06 16 26 36 46 56 66 76
+ // 07 17 27 37 47 57 67 77
+ }
+ }
+ // Post-condition output and store it
+ {
+ // Post-condition (division by two)
+ // division of two 16 bits signed numbers using shifts
+ // n / 2 = (n - (n >> 15)) >> 1
+ const __m128i sign_in0 = _mm_srai_epi16(in0, 15);
+ const __m128i sign_in1 = _mm_srai_epi16(in1, 15);
+ const __m128i sign_in2 = _mm_srai_epi16(in2, 15);
+ const __m128i sign_in3 = _mm_srai_epi16(in3, 15);
+ const __m128i sign_in4 = _mm_srai_epi16(in4, 15);
+ const __m128i sign_in5 = _mm_srai_epi16(in5, 15);
+ const __m128i sign_in6 = _mm_srai_epi16(in6, 15);
+ const __m128i sign_in7 = _mm_srai_epi16(in7, 15);
+ in0 = _mm_sub_epi16(in0, sign_in0);
+ in1 = _mm_sub_epi16(in1, sign_in1);
+ in2 = _mm_sub_epi16(in2, sign_in2);
+ in3 = _mm_sub_epi16(in3, sign_in3);
+ in4 = _mm_sub_epi16(in4, sign_in4);
+ in5 = _mm_sub_epi16(in5, sign_in5);
+ in6 = _mm_sub_epi16(in6, sign_in6);
+ in7 = _mm_sub_epi16(in7, sign_in7);
+ in0 = _mm_srai_epi16(in0, 1);
+ in1 = _mm_srai_epi16(in1, 1);
+ in2 = _mm_srai_epi16(in2, 1);
+ in3 = _mm_srai_epi16(in3, 1);
+ in4 = _mm_srai_epi16(in4, 1);
+ in5 = _mm_srai_epi16(in5, 1);
+ in6 = _mm_srai_epi16(in6, 1);
+ in7 = _mm_srai_epi16(in7, 1);
+ // store results
+ _mm_storeu_si128((__m128i *)(output + 0 * 8), in0);
+ _mm_storeu_si128((__m128i *)(output + 1 * 8), in1);
+ _mm_storeu_si128((__m128i *)(output + 2 * 8), in2);
+ _mm_storeu_si128((__m128i *)(output + 3 * 8), in3);
+ _mm_storeu_si128((__m128i *)(output + 4 * 8), in4);
+ _mm_storeu_si128((__m128i *)(output + 5 * 8), in5);
+ _mm_storeu_si128((__m128i *)(output + 6 * 8), in6);
+ _mm_storeu_si128((__m128i *)(output + 7 * 8), in7);
+ }
+}
+
+void vp9_short_fdct16x16_sse2(int16_t *input, int16_t *output, int pitch) {
+ // The 2D transform is done with two passes which are actually pretty
+ // similar. In the first one, we transform the columns and transpose
+ // the results. In the second one, we transform the rows. To achieve that,
+ // as the first pass results are transposed, we tranpose the columns (that
+ // is the transposed rows) and transpose the results (so that it goes back
+ // in normal/row positions).
+ const int stride = pitch >> 1;
+ int pass;
+ // We need an intermediate buffer between passes.
+ int16_t intermediate[256];
+ int16_t *in = input;
+ int16_t *out = intermediate;
+ // Constants
+ // When we use them, in one case, they are all the same. In all others
+ // it's a pair of them that we need to repeat four times. This is done
+ // by constructing the 32 bit constant corresponding to that pair.
+ const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
+ const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
+ const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
+ const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64);
+ const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
+ const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64);
+ const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
+ const __m128i k__cospi_p30_p02 = pair_set_epi16(cospi_30_64, cospi_2_64);
+ const __m128i k__cospi_p14_p18 = pair_set_epi16(cospi_14_64, cospi_18_64);
+ const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64, cospi_30_64);
+ const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64, cospi_14_64);
+ const __m128i k__cospi_p22_p10 = pair_set_epi16(cospi_22_64, cospi_10_64);
+ const __m128i k__cospi_p06_p26 = pair_set_epi16(cospi_6_64, cospi_26_64);
+ const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64, cospi_22_64);
+ const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64, cospi_6_64);
+ const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ const __m128i kOne = _mm_set1_epi16(1);
+ // Do the two transform/transpose passes
+ for (pass = 0; pass < 2; ++pass) {
+ // We process eight columns (transposed rows in second pass) at a time.
+ int column_start;
+ for (column_start = 0; column_start < 16; column_start += 8) {
+ __m128i in00, in01, in02, in03, in04, in05, in06, in07;
+ __m128i in08, in09, in10, in11, in12, in13, in14, in15;
+ __m128i input0, input1, input2, input3, input4, input5, input6, input7;
+ __m128i step1_0, step1_1, step1_2, step1_3;
+ __m128i step1_4, step1_5, step1_6, step1_7;
+ __m128i step2_1, step2_2, step2_3, step2_4, step2_5, step2_6;
+ __m128i step3_0, step3_1, step3_2, step3_3;
+ __m128i step3_4, step3_5, step3_6, step3_7;
+ __m128i res00, res01, res02, res03, res04, res05, res06, res07;
+ __m128i res08, res09, res10, res11, res12, res13, res14, res15;
+ // Load and pre-condition input.
+ if (0 == pass) {
+ in00 = _mm_loadu_si128((const __m128i *)(in + 0 * stride));
+ in01 = _mm_loadu_si128((const __m128i *)(in + 1 * stride));
+ in02 = _mm_loadu_si128((const __m128i *)(in + 2 * stride));
+ in03 = _mm_loadu_si128((const __m128i *)(in + 3 * stride));
+ in04 = _mm_loadu_si128((const __m128i *)(in + 4 * stride));
+ in05 = _mm_loadu_si128((const __m128i *)(in + 5 * stride));
+ in06 = _mm_loadu_si128((const __m128i *)(in + 6 * stride));
+ in07 = _mm_loadu_si128((const __m128i *)(in + 7 * stride));
+ in08 = _mm_loadu_si128((const __m128i *)(in + 8 * stride));
+ in09 = _mm_loadu_si128((const __m128i *)(in + 9 * stride));
+ in10 = _mm_loadu_si128((const __m128i *)(in + 10 * stride));
+ in11 = _mm_loadu_si128((const __m128i *)(in + 11 * stride));
+ in12 = _mm_loadu_si128((const __m128i *)(in + 12 * stride));
+ in13 = _mm_loadu_si128((const __m128i *)(in + 13 * stride));
+ in14 = _mm_loadu_si128((const __m128i *)(in + 14 * stride));
+ in15 = _mm_loadu_si128((const __m128i *)(in + 15 * stride));
+ // x = x << 2
+ in00 = _mm_slli_epi16(in00, 2);
+ in01 = _mm_slli_epi16(in01, 2);
+ in02 = _mm_slli_epi16(in02, 2);
+ in03 = _mm_slli_epi16(in03, 2);
+ in04 = _mm_slli_epi16(in04, 2);
+ in05 = _mm_slli_epi16(in05, 2);
+ in06 = _mm_slli_epi16(in06, 2);
+ in07 = _mm_slli_epi16(in07, 2);
+ in08 = _mm_slli_epi16(in08, 2);
+ in09 = _mm_slli_epi16(in09, 2);
+ in10 = _mm_slli_epi16(in10, 2);
+ in11 = _mm_slli_epi16(in11, 2);
+ in12 = _mm_slli_epi16(in12, 2);
+ in13 = _mm_slli_epi16(in13, 2);
+ in14 = _mm_slli_epi16(in14, 2);
+ in15 = _mm_slli_epi16(in15, 2);
+ } else {
+ in00 = _mm_loadu_si128((const __m128i *)(in + 0 * 16));
+ in01 = _mm_loadu_si128((const __m128i *)(in + 1 * 16));
+ in02 = _mm_loadu_si128((const __m128i *)(in + 2 * 16));
+ in03 = _mm_loadu_si128((const __m128i *)(in + 3 * 16));
+ in04 = _mm_loadu_si128((const __m128i *)(in + 4 * 16));
+ in05 = _mm_loadu_si128((const __m128i *)(in + 5 * 16));
+ in06 = _mm_loadu_si128((const __m128i *)(in + 6 * 16));
+ in07 = _mm_loadu_si128((const __m128i *)(in + 7 * 16));
+ in08 = _mm_loadu_si128((const __m128i *)(in + 8 * 16));
+ in09 = _mm_loadu_si128((const __m128i *)(in + 9 * 16));
+ in10 = _mm_loadu_si128((const __m128i *)(in + 10 * 16));
+ in11 = _mm_loadu_si128((const __m128i *)(in + 11 * 16));
+ in12 = _mm_loadu_si128((const __m128i *)(in + 12 * 16));
+ in13 = _mm_loadu_si128((const __m128i *)(in + 13 * 16));
+ in14 = _mm_loadu_si128((const __m128i *)(in + 14 * 16));
+ in15 = _mm_loadu_si128((const __m128i *)(in + 15 * 16));
+ // x = (x + 1) >> 2
+ in00 = _mm_add_epi16(in00, kOne);
+ in01 = _mm_add_epi16(in01, kOne);
+ in02 = _mm_add_epi16(in02, kOne);
+ in03 = _mm_add_epi16(in03, kOne);
+ in04 = _mm_add_epi16(in04, kOne);
+ in05 = _mm_add_epi16(in05, kOne);
+ in06 = _mm_add_epi16(in06, kOne);
+ in07 = _mm_add_epi16(in07, kOne);
+ in08 = _mm_add_epi16(in08, kOne);
+ in09 = _mm_add_epi16(in09, kOne);
+ in10 = _mm_add_epi16(in10, kOne);
+ in11 = _mm_add_epi16(in11, kOne);
+ in12 = _mm_add_epi16(in12, kOne);
+ in13 = _mm_add_epi16(in13, kOne);
+ in14 = _mm_add_epi16(in14, kOne);
+ in15 = _mm_add_epi16(in15, kOne);
+ in00 = _mm_srai_epi16(in00, 2);
+ in01 = _mm_srai_epi16(in01, 2);
+ in02 = _mm_srai_epi16(in02, 2);
+ in03 = _mm_srai_epi16(in03, 2);
+ in04 = _mm_srai_epi16(in04, 2);
+ in05 = _mm_srai_epi16(in05, 2);
+ in06 = _mm_srai_epi16(in06, 2);
+ in07 = _mm_srai_epi16(in07, 2);
+ in08 = _mm_srai_epi16(in08, 2);
+ in09 = _mm_srai_epi16(in09, 2);
+ in10 = _mm_srai_epi16(in10, 2);
+ in11 = _mm_srai_epi16(in11, 2);
+ in12 = _mm_srai_epi16(in12, 2);
+ in13 = _mm_srai_epi16(in13, 2);
+ in14 = _mm_srai_epi16(in14, 2);
+ in15 = _mm_srai_epi16(in15, 2);
+ }
+ in += 8;
+ // Calculate input for the first 8 results.
+ {
+ input0 = _mm_add_epi16(in00, in15);
+ input1 = _mm_add_epi16(in01, in14);
+ input2 = _mm_add_epi16(in02, in13);
+ input3 = _mm_add_epi16(in03, in12);
+ input4 = _mm_add_epi16(in04, in11);
+ input5 = _mm_add_epi16(in05, in10);
+ input6 = _mm_add_epi16(in06, in09);
+ input7 = _mm_add_epi16(in07, in08);
+ }
+ // Calculate input for the next 8 results.
+ {
+ step1_0 = _mm_sub_epi16(in07, in08);
+ step1_1 = _mm_sub_epi16(in06, in09);
+ step1_2 = _mm_sub_epi16(in05, in10);
+ step1_3 = _mm_sub_epi16(in04, in11);
+ step1_4 = _mm_sub_epi16(in03, in12);
+ step1_5 = _mm_sub_epi16(in02, in13);
+ step1_6 = _mm_sub_epi16(in01, in14);
+ step1_7 = _mm_sub_epi16(in00, in15);
+ }
+ // Work on the first eight values; fdct8_1d(input, even_results);
+ {
+ // Add/substract
+ const __m128i q0 = _mm_add_epi16(input0, input7);
+ const __m128i q1 = _mm_add_epi16(input1, input6);
+ const __m128i q2 = _mm_add_epi16(input2, input5);
+ const __m128i q3 = _mm_add_epi16(input3, input4);
+ const __m128i q4 = _mm_sub_epi16(input3, input4);
+ const __m128i q5 = _mm_sub_epi16(input2, input5);
+ const __m128i q6 = _mm_sub_epi16(input1, input6);
+ const __m128i q7 = _mm_sub_epi16(input0, input7);
+ // Work on first four results
+ {
+ // Add/substract
+ const __m128i r0 = _mm_add_epi16(q0, q3);
+ const __m128i r1 = _mm_add_epi16(q1, q2);
+ const __m128i r2 = _mm_sub_epi16(q1, q2);
+ const __m128i r3 = _mm_sub_epi16(q0, q3);
+ // Interleave to do the multiply by constants which gets us
+ // into 32 bits.
+ const __m128i t0 = _mm_unpacklo_epi16(r0, r1);
+ const __m128i t1 = _mm_unpackhi_epi16(r0, r1);
+ const __m128i t2 = _mm_unpacklo_epi16(r2, r3);
+ const __m128i t3 = _mm_unpackhi_epi16(r2, r3);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16);
+ const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_p16);
+ const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16);
+ const __m128i u3 = _mm_madd_epi16(t1, k__cospi_p16_m16);
+ const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p24_p08);
+ const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p24_p08);
+ const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m08_p24);
+ const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m08_p24);
+ // dct_const_round_shift
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+ const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
+ const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
+ const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
+ const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
+ const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
+ const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
+ const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
+ // Combine
+ res00 = _mm_packs_epi32(w0, w1);
+ res08 = _mm_packs_epi32(w2, w3);
+ res04 = _mm_packs_epi32(w4, w5);
+ res12 = _mm_packs_epi32(w6, w7);
+ }
+ // Work on next four results
+ {
+ // Interleave to do the multiply by constants which gets us
+ // into 32 bits.
+ const __m128i d0 = _mm_unpacklo_epi16(q6, q5);
+ const __m128i d1 = _mm_unpackhi_epi16(q6, q5);
+ const __m128i e0 = _mm_madd_epi16(d0, k__cospi_p16_m16);
+ const __m128i e1 = _mm_madd_epi16(d1, k__cospi_p16_m16);
+ const __m128i e2 = _mm_madd_epi16(d0, k__cospi_p16_p16);
+ const __m128i e3 = _mm_madd_epi16(d1, k__cospi_p16_p16);
+ // dct_const_round_shift
+ const __m128i f0 = _mm_add_epi32(e0, k__DCT_CONST_ROUNDING);
+ const __m128i f1 = _mm_add_epi32(e1, k__DCT_CONST_ROUNDING);
+ const __m128i f2 = _mm_add_epi32(e2, k__DCT_CONST_ROUNDING);
+ const __m128i f3 = _mm_add_epi32(e3, k__DCT_CONST_ROUNDING);
+ const __m128i s0 = _mm_srai_epi32(f0, DCT_CONST_BITS);
+ const __m128i s1 = _mm_srai_epi32(f1, DCT_CONST_BITS);
+ const __m128i s2 = _mm_srai_epi32(f2, DCT_CONST_BITS);
+ const __m128i s3 = _mm_srai_epi32(f3, DCT_CONST_BITS);
+ // Combine
+ const __m128i r0 = _mm_packs_epi32(s0, s1);
+ const __m128i r1 = _mm_packs_epi32(s2, s3);
+ // Add/substract
+ const __m128i x0 = _mm_add_epi16(q4, r0);
+ const __m128i x1 = _mm_sub_epi16(q4, r0);
+ const __m128i x2 = _mm_sub_epi16(q7, r1);
+ const __m128i x3 = _mm_add_epi16(q7, r1);
+ // Interleave to do the multiply by constants which gets us
+ // into 32 bits.
+ const __m128i t0 = _mm_unpacklo_epi16(x0, x3);
+ const __m128i t1 = _mm_unpackhi_epi16(x0, x3);
+ const __m128i t2 = _mm_unpacklo_epi16(x1, x2);
+ const __m128i t3 = _mm_unpackhi_epi16(x1, x2);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p28_p04);
+ const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p28_p04);
+ const __m128i u2 = _mm_madd_epi16(t0, k__cospi_m04_p28);
+ const __m128i u3 = _mm_madd_epi16(t1, k__cospi_m04_p28);
+ const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p12_p20);
+ const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p12_p20);
+ const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m20_p12);
+ const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m20_p12);
+ // dct_const_round_shift
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+ const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
+ const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
+ const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
+ const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
+ const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
+ const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
+ const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
+ // Combine
+ res02 = _mm_packs_epi32(w0, w1);
+ res14 = _mm_packs_epi32(w2, w3);
+ res10 = _mm_packs_epi32(w4, w5);
+ res06 = _mm_packs_epi32(w6, w7);
+ }
+ }
+ // Work on the next eight values; step1 -> odd_results
+ {
+ // step 2
+ {
+ const __m128i t0 = _mm_unpacklo_epi16(step1_5, step1_2);
+ const __m128i t1 = _mm_unpackhi_epi16(step1_5, step1_2);
+ const __m128i t2 = _mm_unpacklo_epi16(step1_4, step1_3);
+ const __m128i t3 = _mm_unpackhi_epi16(step1_4, step1_3);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_m16);
+ const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_m16);
+ const __m128i u2 = _mm_madd_epi16(t2, k__cospi_p16_m16);
+ const __m128i u3 = _mm_madd_epi16(t3, k__cospi_p16_m16);
+ // dct_const_round_shift
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ // Combine
+ step2_2 = _mm_packs_epi32(w0, w1);
+ step2_3 = _mm_packs_epi32(w2, w3);
+ }
+ {
+ const __m128i t0 = _mm_unpacklo_epi16(step1_5, step1_2);
+ const __m128i t1 = _mm_unpackhi_epi16(step1_5, step1_2);
+ const __m128i t2 = _mm_unpacklo_epi16(step1_4, step1_3);
+ const __m128i t3 = _mm_unpackhi_epi16(step1_4, step1_3);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16);
+ const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_p16);
+ const __m128i u2 = _mm_madd_epi16(t2, k__cospi_p16_p16);
+ const __m128i u3 = _mm_madd_epi16(t3, k__cospi_p16_p16);
+ // dct_const_round_shift
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ // Combine
+ step2_5 = _mm_packs_epi32(w0, w1);
+ step2_4 = _mm_packs_epi32(w2, w3);
+ }
+ // step 3
+ {
+ step3_0 = _mm_add_epi16(step1_0, step2_3);
+ step3_1 = _mm_add_epi16(step1_1, step2_2);
+ step3_2 = _mm_sub_epi16(step1_1, step2_2);
+ step3_3 = _mm_sub_epi16(step1_0, step2_3);
+ step3_4 = _mm_sub_epi16(step1_7, step2_4);
+ step3_5 = _mm_sub_epi16(step1_6, step2_5);
+ step3_6 = _mm_add_epi16(step1_6, step2_5);
+ step3_7 = _mm_add_epi16(step1_7, step2_4);
+ }
+ // step 4
+ {
+ const __m128i t0 = _mm_unpacklo_epi16(step3_1, step3_6);
+ const __m128i t1 = _mm_unpackhi_epi16(step3_1, step3_6);
+ const __m128i t2 = _mm_unpacklo_epi16(step3_2, step3_5);
+ const __m128i t3 = _mm_unpackhi_epi16(step3_2, step3_5);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_m08_p24);
+ const __m128i u1 = _mm_madd_epi16(t1, k__cospi_m08_p24);
+ const __m128i u2 = _mm_madd_epi16(t2, k__cospi_m24_m08);
+ const __m128i u3 = _mm_madd_epi16(t3, k__cospi_m24_m08);
+ // dct_const_round_shift
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ // Combine
+ step2_1 = _mm_packs_epi32(w0, w1);
+ step2_2 = _mm_packs_epi32(w2, w3);
+ }
+ {
+ const __m128i t0 = _mm_unpacklo_epi16(step3_1, step3_6);
+ const __m128i t1 = _mm_unpackhi_epi16(step3_1, step3_6);
+ const __m128i t2 = _mm_unpacklo_epi16(step3_2, step3_5);
+ const __m128i t3 = _mm_unpackhi_epi16(step3_2, step3_5);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p24_p08);
+ const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p24_p08);
+ const __m128i u2 = _mm_madd_epi16(t2, k__cospi_m08_p24);
+ const __m128i u3 = _mm_madd_epi16(t3, k__cospi_m08_p24);
+ // dct_const_round_shift
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ // Combine
+ step2_6 = _mm_packs_epi32(w0, w1);
+ step2_5 = _mm_packs_epi32(w2, w3);
+ }
+ // step 5
+ {
+ step1_0 = _mm_add_epi16(step3_0, step2_1);
+ step1_1 = _mm_sub_epi16(step3_0, step2_1);
+ step1_2 = _mm_sub_epi16(step3_3, step2_2);
+ step1_3 = _mm_add_epi16(step3_3, step2_2);
+ step1_4 = _mm_add_epi16(step3_4, step2_5);
+ step1_5 = _mm_sub_epi16(step3_4, step2_5);
+ step1_6 = _mm_sub_epi16(step3_7, step2_6);
+ step1_7 = _mm_add_epi16(step3_7, step2_6);
+ }
+ // step 6
+ {
+ const __m128i t0 = _mm_unpacklo_epi16(step1_0, step1_7);
+ const __m128i t1 = _mm_unpackhi_epi16(step1_0, step1_7);
+ const __m128i t2 = _mm_unpacklo_epi16(step1_1, step1_6);
+ const __m128i t3 = _mm_unpackhi_epi16(step1_1, step1_6);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p30_p02);
+ const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p30_p02);
+ const __m128i u2 = _mm_madd_epi16(t2, k__cospi_p14_p18);
+ const __m128i u3 = _mm_madd_epi16(t3, k__cospi_p14_p18);
+ // dct_const_round_shift
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ // Combine
+ res01 = _mm_packs_epi32(w0, w1);
+ res09 = _mm_packs_epi32(w2, w3);
+ }
+ {
+ const __m128i t0 = _mm_unpacklo_epi16(step1_2, step1_5);
+ const __m128i t1 = _mm_unpackhi_epi16(step1_2, step1_5);
+ const __m128i t2 = _mm_unpacklo_epi16(step1_3, step1_4);
+ const __m128i t3 = _mm_unpackhi_epi16(step1_3, step1_4);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p22_p10);
+ const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p22_p10);
+ const __m128i u2 = _mm_madd_epi16(t2, k__cospi_p06_p26);
+ const __m128i u3 = _mm_madd_epi16(t3, k__cospi_p06_p26);
+ // dct_const_round_shift
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ // Combine
+ res05 = _mm_packs_epi32(w0, w1);
+ res13 = _mm_packs_epi32(w2, w3);
+ }
+ {
+ const __m128i t0 = _mm_unpacklo_epi16(step1_2, step1_5);
+ const __m128i t1 = _mm_unpackhi_epi16(step1_2, step1_5);
+ const __m128i t2 = _mm_unpacklo_epi16(step1_3, step1_4);
+ const __m128i t3 = _mm_unpackhi_epi16(step1_3, step1_4);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_m10_p22);
+ const __m128i u1 = _mm_madd_epi16(t1, k__cospi_m10_p22);
+ const __m128i u2 = _mm_madd_epi16(t2, k__cospi_m26_p06);
+ const __m128i u3 = _mm_madd_epi16(t3, k__cospi_m26_p06);
+ // dct_const_round_shift
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ // Combine
+ res11 = _mm_packs_epi32(w0, w1);
+ res03 = _mm_packs_epi32(w2, w3);
+ }
+ {
+ const __m128i t0 = _mm_unpacklo_epi16(step1_0, step1_7);
+ const __m128i t1 = _mm_unpackhi_epi16(step1_0, step1_7);
+ const __m128i t2 = _mm_unpacklo_epi16(step1_1, step1_6);
+ const __m128i t3 = _mm_unpackhi_epi16(step1_1, step1_6);
+ const __m128i u0 = _mm_madd_epi16(t0, k__cospi_m02_p30);
+ const __m128i u1 = _mm_madd_epi16(t1, k__cospi_m02_p30);
+ const __m128i u2 = _mm_madd_epi16(t2, k__cospi_m18_p14);
+ const __m128i u3 = _mm_madd_epi16(t3, k__cospi_m18_p14);
+ // dct_const_round_shift
+ const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+ const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+ const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+ const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+ const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+ const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+ const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+ const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+ // Combine
+ res15 = _mm_packs_epi32(w0, w1);
+ res07 = _mm_packs_epi32(w2, w3);
+ }
+ }
+ // Transpose the results, do it as two 8x8 transposes.
+ {
+ // 00 01 02 03 04 05 06 07
+ // 10 11 12 13 14 15 16 17
+ // 20 21 22 23 24 25 26 27
+ // 30 31 32 33 34 35 36 37
+ // 40 41 42 43 44 45 46 47
+ // 50 51 52 53 54 55 56 57
+ // 60 61 62 63 64 65 66 67
+ // 70 71 72 73 74 75 76 77
+ const __m128i tr0_0 = _mm_unpacklo_epi16(res00, res01);
+ const __m128i tr0_1 = _mm_unpacklo_epi16(res02, res03);
+ const __m128i tr0_2 = _mm_unpackhi_epi16(res00, res01);
+ const __m128i tr0_3 = _mm_unpackhi_epi16(res02, res03);
+ const __m128i tr0_4 = _mm_unpacklo_epi16(res04, res05);
+ const __m128i tr0_5 = _mm_unpacklo_epi16(res06, res07);
+ const __m128i tr0_6 = _mm_unpackhi_epi16(res04, res05);
+ const __m128i tr0_7 = _mm_unpackhi_epi16(res06, res07);
+ // 00 10 01 11 02 12 03 13
+ // 20 30 21 31 22 32 23 33
+ // 04 14 05 15 06 16 07 17
+ // 24 34 25 35 26 36 27 37
+ // 40 50 41 51 42 52 43 53
+ // 60 70 61 71 62 72 63 73
+ // 54 54 55 55 56 56 57 57
+ // 64 74 65 75 66 76 67 77
+ const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
+ const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);
+ const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
+ const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);
+ const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);
+ const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
+ const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
+ const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
+ // 00 10 20 30 01 11 21 31
+ // 40 50 60 70 41 51 61 71
+ // 02 12 22 32 03 13 23 33
+ // 42 52 62 72 43 53 63 73
+ // 04 14 24 34 05 15 21 36
+ // 44 54 64 74 45 55 61 76
+ // 06 16 26 36 07 17 27 37
+ // 46 56 66 76 47 57 67 77
+ const __m128i tr2_0 = _mm_unpacklo_epi64(tr1_0, tr1_4);
+ const __m128i tr2_1 = _mm_unpackhi_epi64(tr1_0, tr1_4);
+ const __m128i tr2_2 = _mm_unpacklo_epi64(tr1_2, tr1_6);
+ const __m128i tr2_3 = _mm_unpackhi_epi64(tr1_2, tr1_6);
+ const __m128i tr2_4 = _mm_unpacklo_epi64(tr1_1, tr1_5);
+ const __m128i tr2_5 = _mm_unpackhi_epi64(tr1_1, tr1_5);
+ const __m128i tr2_6 = _mm_unpacklo_epi64(tr1_3, tr1_7);
+ const __m128i tr2_7 = _mm_unpackhi_epi64(tr1_3, tr1_7);
+ // 00 10 20 30 40 50 60 70
+ // 01 11 21 31 41 51 61 71
+ // 02 12 22 32 42 52 62 72
+ // 03 13 23 33 43 53 63 73
+ // 04 14 24 34 44 54 64 74
+ // 05 15 25 35 45 55 65 75
+ // 06 16 26 36 46 56 66 76
+ // 07 17 27 37 47 57 67 77
+ _mm_storeu_si128((__m128i *)(out + 0 * 16), tr2_0);
+ _mm_storeu_si128((__m128i *)(out + 1 * 16), tr2_1);
+ _mm_storeu_si128((__m128i *)(out + 2 * 16), tr2_2);
+ _mm_storeu_si128((__m128i *)(out + 3 * 16), tr2_3);
+ _mm_storeu_si128((__m128i *)(out + 4 * 16), tr2_4);
+ _mm_storeu_si128((__m128i *)(out + 5 * 16), tr2_5);
+ _mm_storeu_si128((__m128i *)(out + 6 * 16), tr2_6);
+ _mm_storeu_si128((__m128i *)(out + 7 * 16), tr2_7);
+ }
+ {
+ // 00 01 02 03 04 05 06 07
+ // 10 11 12 13 14 15 16 17
+ // 20 21 22 23 24 25 26 27
+ // 30 31 32 33 34 35 36 37
+ // 40 41 42 43 44 45 46 47
+ // 50 51 52 53 54 55 56 57
+ // 60 61 62 63 64 65 66 67
+ // 70 71 72 73 74 75 76 77
+ const __m128i tr0_0 = _mm_unpacklo_epi16(res08, res09);
+ const __m128i tr0_1 = _mm_unpacklo_epi16(res10, res11);
+ const __m128i tr0_2 = _mm_unpackhi_epi16(res08, res09);
+ const __m128i tr0_3 = _mm_unpackhi_epi16(res10, res11);
+ const __m128i tr0_4 = _mm_unpacklo_epi16(res12, res13);
+ const __m128i tr0_5 = _mm_unpacklo_epi16(res14, res15);
+ const __m128i tr0_6 = _mm_unpackhi_epi16(res12, res13);
+ const __m128i tr0_7 = _mm_unpackhi_epi16(res14, res15);
+ // 00 10 01 11 02 12 03 13
+ // 20 30 21 31 22 32 23 33
+ // 04 14 05 15 06 16 07 17
+ // 24 34 25 35 26 36 27 37
+ // 40 50 41 51 42 52 43 53
+ // 60 70 61 71 62 72 63 73
+ // 54 54 55 55 56 56 57 57
+ // 64 74 65 75 66 76 67 77
+ const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
+ const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);
+ const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
+ const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);
+ const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);
+ const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
+ const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
+ const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
+ // 00 10 20 30 01 11 21 31
+ // 40 50 60 70 41 51 61 71
+ // 02 12 22 32 03 13 23 33
+ // 42 52 62 72 43 53 63 73
+ // 04 14 24 34 05 15 21 36
+ // 44 54 64 74 45 55 61 76
+ // 06 16 26 36 07 17 27 37
+ // 46 56 66 76 47 57 67 77
+ const __m128i tr2_0 = _mm_unpacklo_epi64(tr1_0, tr1_4);
+ const __m128i tr2_1 = _mm_unpackhi_epi64(tr1_0, tr1_4);
+ const __m128i tr2_2 = _mm_unpacklo_epi64(tr1_2, tr1_6);
+ const __m128i tr2_3 = _mm_unpackhi_epi64(tr1_2, tr1_6);
+ const __m128i tr2_4 = _mm_unpacklo_epi64(tr1_1, tr1_5);
+ const __m128i tr2_5 = _mm_unpackhi_epi64(tr1_1, tr1_5);
+ const __m128i tr2_6 = _mm_unpacklo_epi64(tr1_3, tr1_7);
+ const __m128i tr2_7 = _mm_unpackhi_epi64(tr1_3, tr1_7);
+ // 00 10 20 30 40 50 60 70
+ // 01 11 21 31 41 51 61 71
+ // 02 12 22 32 42 52 62 72
+ // 03 13 23 33 43 53 63 73
+ // 04 14 24 34 44 54 64 74
+ // 05 15 25 35 45 55 65 75
+ // 06 16 26 36 46 56 66 76
+ // 07 17 27 37 47 57 67 77
+ // Store results
+ _mm_storeu_si128((__m128i *)(out + 8 + 0 * 16), tr2_0);
+ _mm_storeu_si128((__m128i *)(out + 8 + 1 * 16), tr2_1);
+ _mm_storeu_si128((__m128i *)(out + 8 + 2 * 16), tr2_2);
+ _mm_storeu_si128((__m128i *)(out + 8 + 3 * 16), tr2_3);
+ _mm_storeu_si128((__m128i *)(out + 8 + 4 * 16), tr2_4);
+ _mm_storeu_si128((__m128i *)(out + 8 + 5 * 16), tr2_5);
+ _mm_storeu_si128((__m128i *)(out + 8 + 6 * 16), tr2_6);
+ _mm_storeu_si128((__m128i *)(out + 8 + 7 * 16), tr2_7);
+ }
+ out += 8*16;
+ }
+ // Setup in/out for next pass.
+ in = intermediate;
+ out = output;
+ }
+}
diff --git a/libvpx/vp9/encoder/x86/vp9_encodeopt.asm b/libvpx/vp9/encoder/x86/vp9_encodeopt.asm
new file mode 100644
index 0000000..734cb61
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_encodeopt.asm
@@ -0,0 +1,125 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+;int vp9_block_error_xmm(short *coeff_ptr, short *dcoef_ptr)
+global sym(vp9_block_error_xmm) PRIVATE
+sym(vp9_block_error_xmm):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 2
+ push rsi
+ push rdi
+ ; end prologue
+
+ mov rsi, arg(0) ;coeff_ptr
+ mov rdi, arg(1) ;dcoef_ptr
+
+ movdqa xmm0, [rsi]
+ movdqa xmm1, [rdi]
+
+ movdqa xmm2, [rsi+16]
+ movdqa xmm3, [rdi+16]
+
+ psubw xmm0, xmm1
+ psubw xmm2, xmm3
+
+ pmaddwd xmm0, xmm0
+ pmaddwd xmm2, xmm2
+
+ paddd xmm0, xmm2
+
+ pxor xmm5, xmm5
+ movdqa xmm1, xmm0
+
+ punpckldq xmm0, xmm5
+ punpckhdq xmm1, xmm5
+
+ paddd xmm0, xmm1
+ movdqa xmm1, xmm0
+
+ psrldq xmm0, 8
+ paddd xmm0, xmm1
+
+ movq rax, xmm0
+
+ pop rdi
+ pop rsi
+ ; begin epilog
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;int vp9_block_error_mmx(short *coeff_ptr, short *dcoef_ptr)
+global sym(vp9_block_error_mmx) PRIVATE
+sym(vp9_block_error_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 2
+ push rsi
+ push rdi
+ ; end prolog
+
+
+ mov rsi, arg(0) ;coeff_ptr
+ pxor mm7, mm7
+
+ mov rdi, arg(1) ;dcoef_ptr
+ movq mm3, [rsi]
+
+ movq mm4, [rdi]
+ movq mm5, [rsi+8]
+
+ movq mm6, [rdi+8]
+ pxor mm1, mm1 ; from movd mm1, dc ; dc =0
+
+ movq mm2, mm7
+ psubw mm5, mm6
+
+ por mm1, mm2
+ pmaddwd mm5, mm5
+
+ pcmpeqw mm1, mm7
+ psubw mm3, mm4
+
+ pand mm1, mm3
+ pmaddwd mm1, mm1
+
+ paddd mm1, mm5
+ movq mm3, [rsi+16]
+
+ movq mm4, [rdi+16]
+ movq mm5, [rsi+24]
+
+ movq mm6, [rdi+24]
+ psubw mm5, mm6
+
+ pmaddwd mm5, mm5
+ psubw mm3, mm4
+
+ pmaddwd mm3, mm3
+ paddd mm3, mm5
+
+ paddd mm1, mm3
+ movq mm0, mm1
+
+ psrlq mm1, 32
+ paddd mm0, mm1
+
+ movq rax, mm0
+
+ pop rdi
+ pop rsi
+ ; begin epilog
+ UNSHADOW_ARGS
+ pop rbp
+ ret
diff --git a/libvpx/vp9/encoder/x86/vp9_fwalsh_sse2.asm b/libvpx/vp9/encoder/x86/vp9_fwalsh_sse2.asm
new file mode 100644
index 0000000..7bee9ef
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_fwalsh_sse2.asm
@@ -0,0 +1,164 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+;void vp9_short_walsh4x4_sse2(short *input, short *output, int pitch)
+global sym(vp9_short_walsh4x4_sse2) PRIVATE
+sym(vp9_short_walsh4x4_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 3
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ; input
+ mov rdi, arg(1) ; output
+ movsxd rdx, dword ptr arg(2) ; pitch
+
+ ; first for loop
+ movq xmm0, MMWORD PTR [rsi] ; load input
+ movq xmm1, MMWORD PTR [rsi + rdx]
+ lea rsi, [rsi + rdx*2]
+ movq xmm2, MMWORD PTR [rsi]
+ movq xmm3, MMWORD PTR [rsi + rdx]
+
+ punpcklwd xmm0, xmm1
+ punpcklwd xmm2, xmm3
+
+ movdqa xmm1, xmm0
+ punpckldq xmm0, xmm2 ; ip[1] ip[0]
+ punpckhdq xmm1, xmm2 ; ip[3] ip[2]
+
+ movdqa xmm2, xmm0
+ paddw xmm0, xmm1
+ psubw xmm2, xmm1
+
+ psllw xmm0, 2 ; d1 a1
+ psllw xmm2, 2 ; c1 b1
+
+ movdqa xmm1, xmm0
+ punpcklqdq xmm0, xmm2 ; b1 a1
+ punpckhqdq xmm1, xmm2 ; c1 d1
+
+ pxor xmm6, xmm6
+ movq xmm6, xmm0
+ pxor xmm7, xmm7
+ pcmpeqw xmm7, xmm6
+ paddw xmm7, [GLOBAL(c1)]
+
+ movdqa xmm2, xmm0
+ paddw xmm0, xmm1 ; b1+c1 a1+d1
+ psubw xmm2, xmm1 ; b1-c1 a1-d1
+ paddw xmm0, xmm7 ; b1+c1 a1+d1+(a1!=0)
+
+ ; second for loop
+ ; input: 13 9 5 1 12 8 4 0 (xmm0)
+ ; 14 10 6 2 15 11 7 3 (xmm2)
+ ; after shuffle:
+ ; 13 5 9 1 12 4 8 0 (xmm0)
+ ; 14 6 10 2 15 7 11 3 (xmm1)
+ pshuflw xmm3, xmm0, 0xd8
+ pshufhw xmm0, xmm3, 0xd8
+ pshuflw xmm3, xmm2, 0xd8
+ pshufhw xmm1, xmm3, 0xd8
+
+ movdqa xmm2, xmm0
+ pmaddwd xmm0, [GLOBAL(c1)] ; d11 a11 d10 a10
+ pmaddwd xmm2, [GLOBAL(cn1)] ; c11 b11 c10 b10
+ movdqa xmm3, xmm1
+ pmaddwd xmm1, [GLOBAL(c1)] ; d12 a12 d13 a13
+ pmaddwd xmm3, [GLOBAL(cn1)] ; c12 b12 c13 b13
+
+ pshufd xmm4, xmm0, 0xd8 ; d11 d10 a11 a10
+ pshufd xmm5, xmm2, 0xd8 ; c11 c10 b11 b10
+ pshufd xmm6, xmm1, 0x72 ; d13 d12 a13 a12
+ pshufd xmm7, xmm3, 0x72 ; c13 c12 b13 b12
+
+ movdqa xmm0, xmm4
+ punpcklqdq xmm0, xmm5 ; b11 b10 a11 a10
+ punpckhqdq xmm4, xmm5 ; c11 c10 d11 d10
+ movdqa xmm1, xmm6
+ punpcklqdq xmm1, xmm7 ; b13 b12 a13 a12
+ punpckhqdq xmm6, xmm7 ; c13 c12 d13 d12
+
+ movdqa xmm2, xmm0
+ paddd xmm0, xmm4 ; b21 b20 a21 a20
+ psubd xmm2, xmm4 ; c21 c20 d21 d20
+ movdqa xmm3, xmm1
+ paddd xmm1, xmm6 ; b23 b22 a23 a22
+ psubd xmm3, xmm6 ; c23 c22 d23 d22
+
+ pxor xmm4, xmm4
+ movdqa xmm5, xmm4
+ pcmpgtd xmm4, xmm0
+ pcmpgtd xmm5, xmm2
+ pand xmm4, [GLOBAL(cd1)]
+ pand xmm5, [GLOBAL(cd1)]
+
+ pxor xmm6, xmm6
+ movdqa xmm7, xmm6
+ pcmpgtd xmm6, xmm1
+ pcmpgtd xmm7, xmm3
+ pand xmm6, [GLOBAL(cd1)]
+ pand xmm7, [GLOBAL(cd1)]
+
+ paddd xmm0, xmm4
+ paddd xmm2, xmm5
+ paddd xmm0, [GLOBAL(cd3)]
+ paddd xmm2, [GLOBAL(cd3)]
+ paddd xmm1, xmm6
+ paddd xmm3, xmm7
+ paddd xmm1, [GLOBAL(cd3)]
+ paddd xmm3, [GLOBAL(cd3)]
+
+ psrad xmm0, 3
+ psrad xmm1, 3
+ psrad xmm2, 3
+ psrad xmm3, 3
+ movdqa xmm4, xmm0
+ punpcklqdq xmm0, xmm1 ; a23 a22 a21 a20
+ punpckhqdq xmm4, xmm1 ; b23 b22 b21 b20
+ movdqa xmm5, xmm2
+ punpckhqdq xmm2, xmm3 ; c23 c22 c21 c20
+ punpcklqdq xmm5, xmm3 ; d23 d22 d21 d20
+
+ packssdw xmm0, xmm4 ; b23 b22 b21 b20 a23 a22 a21 a20
+ packssdw xmm2, xmm5 ; d23 d22 d21 d20 c23 c22 c21 c20
+
+ movdqa XMMWORD PTR [rdi], xmm0
+ movdqa XMMWORD PTR [rdi + 16], xmm2
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+SECTION_RODATA
+align 16
+c1:
+ dw 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001
+align 16
+cn1:
+ dw 0x0001, 0xffff, 0x0001, 0xffff, 0x0001, 0xffff, 0x0001, 0xffff
+align 16
+cd1:
+ dd 0x00000001, 0x00000001, 0x00000001, 0x00000001
+align 16
+cd3:
+ dd 0x00000003, 0x00000003, 0x00000003, 0x00000003
diff --git a/libvpx/vp9/encoder/x86/vp9_mcomp_x86.h b/libvpx/vp9/encoder/x86/vp9_mcomp_x86.h
new file mode 100644
index 0000000..ca80b8b
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_mcomp_x86.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef VP9_ENCODER_X86_VP9_MCOMP_X86_H_
+#define VP9_ENCODER_X86_VP9_MCOMP_X86_H_
+
+#if HAVE_SSE3
+#if !CONFIG_RUNTIME_CPU_DETECT
+
+#undef vp9_search_full_search
+#define vp9_search_full_search vp9_full_search_sadx3
+
+#undef vp9_search_refining_search
+#define vp9_search_refining_search vp9_refining_search_sadx4
+
+#undef vp9_search_diamond_search
+#define vp9_search_diamond_search vp9_diamond_search_sadx4
+
+#endif
+#endif
+
+#if HAVE_SSE4_1
+#if !CONFIG_RUNTIME_CPU_DETECT
+
+#undef vp9_search_full_search
+#define vp9_search_full_search vp9_full_search_sadx8
+
+#endif
+#endif
+
+#endif
+
diff --git a/libvpx/vp9/encoder/x86/vp9_sad4d_sse2.asm b/libvpx/vp9/encoder/x86/vp9_sad4d_sse2.asm
new file mode 100644
index 0000000..b493628
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_sad4d_sse2.asm
@@ -0,0 +1,231 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+%include "third_party/x86inc/x86inc.asm"
+
+SECTION .text
+
+; PROCESS_4x2x4 first, off_{first,second}_{src,ref}, advance_at_end
+%macro PROCESS_4x2x4 5-6 0
+ movd m0, [srcq +%2]
+%if %1 == 1
+ movd m6, [ref1q+%3]
+ movd m4, [ref2q+%3]
+ movd m7, [ref3q+%3]
+ movd m5, [ref4q+%3]
+ punpckldq m0, [srcq +%4]
+ punpckldq m6, [ref1q+%5]
+ punpckldq m4, [ref2q+%5]
+ punpckldq m7, [ref3q+%5]
+ punpckldq m5, [ref4q+%5]
+ psadbw m6, m0
+ psadbw m4, m0
+ psadbw m7, m0
+ psadbw m5, m0
+ punpckldq m6, m4
+ punpckldq m7, m5
+%else
+ movd m1, [ref1q+%3]
+ movd m2, [ref2q+%3]
+ movd m3, [ref3q+%3]
+ movd m4, [ref4q+%3]
+ punpckldq m0, [srcq +%4]
+ punpckldq m1, [ref1q+%5]
+ punpckldq m2, [ref2q+%5]
+ punpckldq m3, [ref3q+%5]
+ punpckldq m4, [ref4q+%5]
+ psadbw m1, m0
+ psadbw m2, m0
+ psadbw m3, m0
+ psadbw m4, m0
+ punpckldq m1, m2
+ punpckldq m3, m4
+ paddd m6, m1
+ paddd m7, m3
+%endif
+%if %6 == 1
+ lea srcq, [srcq +src_strideq*2]
+ lea ref1q, [ref1q+ref_strideq*2]
+ lea ref2q, [ref2q+ref_strideq*2]
+ lea ref3q, [ref3q+ref_strideq*2]
+ lea ref4q, [ref4q+ref_strideq*2]
+%endif
+%endmacro
+
+; PROCESS_8x2x4 first, off_{first,second}_{src,ref}, advance_at_end
+%macro PROCESS_8x2x4 5-6 0
+ movh m0, [srcq +%2]
+%if %1 == 1
+ movh m4, [ref1q+%3]
+ movh m5, [ref2q+%3]
+ movh m6, [ref3q+%3]
+ movh m7, [ref4q+%3]
+ movhps m0, [srcq +%4]
+ movhps m4, [ref1q+%5]
+ movhps m5, [ref2q+%5]
+ movhps m6, [ref3q+%5]
+ movhps m7, [ref4q+%5]
+ psadbw m4, m0
+ psadbw m5, m0
+ psadbw m6, m0
+ psadbw m7, m0
+%else
+ movh m1, [ref1q+%3]
+ movh m2, [ref2q+%3]
+ movh m3, [ref3q+%3]
+ movhps m0, [srcq +%4]
+ movhps m1, [ref1q+%5]
+ movhps m2, [ref2q+%5]
+ movhps m3, [ref3q+%5]
+ psadbw m1, m0
+ psadbw m2, m0
+ psadbw m3, m0
+ paddd m4, m1
+ movh m1, [ref4q+%3]
+ movhps m1, [ref4q+%5]
+ paddd m5, m2
+ paddd m6, m3
+ psadbw m1, m0
+ paddd m7, m1
+%endif
+%if %6 == 1
+ lea srcq, [srcq +src_strideq*2]
+ lea ref1q, [ref1q+ref_strideq*2]
+ lea ref2q, [ref2q+ref_strideq*2]
+ lea ref3q, [ref3q+ref_strideq*2]
+ lea ref4q, [ref4q+ref_strideq*2]
+%endif
+%endmacro
+
+; PROCESS_16x2x4 first, off_{first,second}_{src,ref}, advance_at_end
+%macro PROCESS_16x2x4 5-6 0
+ ; 1st 16 px
+ mova m0, [srcq +%2]
+%if %1 == 1
+ movu m4, [ref1q+%3]
+ movu m5, [ref2q+%3]
+ movu m6, [ref3q+%3]
+ movu m7, [ref4q+%3]
+ psadbw m4, m0
+ psadbw m5, m0
+ psadbw m6, m0
+ psadbw m7, m0
+%else
+ movu m1, [ref1q+%3]
+ movu m2, [ref2q+%3]
+ movu m3, [ref3q+%3]
+ psadbw m1, m0
+ psadbw m2, m0
+ psadbw m3, m0
+ paddd m4, m1
+ movu m1, [ref4q+%3]
+ paddd m5, m2
+ paddd m6, m3
+ psadbw m1, m0
+ paddd m7, m1
+%endif
+
+ ; 2nd 16 px
+ mova m0, [srcq +%4]
+ movu m1, [ref1q+%5]
+ movu m2, [ref2q+%5]
+ movu m3, [ref3q+%5]
+ psadbw m1, m0
+ psadbw m2, m0
+ psadbw m3, m0
+ paddd m4, m1
+ movu m1, [ref4q+%5]
+ paddd m5, m2
+ paddd m6, m3
+%if %6 == 1
+ lea srcq, [srcq +src_strideq*2]
+ lea ref1q, [ref1q+ref_strideq*2]
+ lea ref2q, [ref2q+ref_strideq*2]
+ lea ref3q, [ref3q+ref_strideq*2]
+ lea ref4q, [ref4q+ref_strideq*2]
+%endif
+ psadbw m1, m0
+ paddd m7, m1
+%endmacro
+
+; PROCESS_32x2x4 first, off_{first,second}_{src,ref}, advance_at_end
+%macro PROCESS_32x2x4 5-6 0
+ PROCESS_16x2x4 %1, %2, %3, %2 + 16, %3 + 16
+ PROCESS_16x2x4 0, %4, %5, %4 + 16, %5 + 16, %6
+%endmacro
+
+; PROCESS_64x2x4 first, off_{first,second}_{src,ref}, advance_at_end
+%macro PROCESS_64x2x4 5-6 0
+ PROCESS_32x2x4 %1, %2, %3, %2 + 32, %3 + 32
+ PROCESS_32x2x4 0, %4, %5, %4 + 32, %5 + 32, %6
+%endmacro
+
+; void vp9_sadNxNx4d_sse2(uint8_t *src, int src_stride,
+; uint8_t *ref[4], int ref_stride,
+; unsigned int res[4]);
+; where NxN = 64x64, 32x32, 16x16, 16x8, 8x16 or 8x8
+%macro SADNXN4D 2
+%if UNIX64
+cglobal sad%1x%2x4d, 5, 8, 8, src, src_stride, ref1, ref_stride, \
+ res, ref2, ref3, ref4
+%else
+cglobal sad%1x%2x4d, 4, 7, 8, src, src_stride, ref1, ref_stride, \
+ ref2, ref3, ref4
+%endif
+ movsxdifnidn src_strideq, src_strided
+ movsxdifnidn ref_strideq, ref_strided
+ mov ref2q, [ref1q+gprsize*1]
+ mov ref3q, [ref1q+gprsize*2]
+ mov ref4q, [ref1q+gprsize*3]
+ mov ref1q, [ref1q+gprsize*0]
+
+ PROCESS_%1x2x4 1, 0, 0, src_strideq, ref_strideq, 1
+%rep (%2-4)/2
+ PROCESS_%1x2x4 0, 0, 0, src_strideq, ref_strideq, 1
+%endrep
+ PROCESS_%1x2x4 0, 0, 0, src_strideq, ref_strideq, 0
+
+%if mmsize == 16
+ pslldq m5, 4
+ pslldq m7, 4
+ por m4, m5
+ por m6, m7
+ mova m5, m4
+ mova m7, m6
+ punpcklqdq m4, m6
+ punpckhqdq m5, m7
+ movifnidn r4, r4mp
+ paddd m4, m5
+ movu [r4], m4
+ RET
+%else
+ movifnidn r4, r4mp
+ movq [r4+0], m6
+ movq [r4+8], m7
+ RET
+%endif
+%endmacro
+
+INIT_XMM sse2
+SADNXN4D 64, 64
+SADNXN4D 64, 32
+SADNXN4D 32, 64
+SADNXN4D 32, 32
+SADNXN4D 32, 16
+SADNXN4D 16, 32
+SADNXN4D 16, 16
+SADNXN4D 16, 8
+SADNXN4D 8, 16
+SADNXN4D 8, 8
+SADNXN4D 8, 4
+
+INIT_MMX sse
+SADNXN4D 4, 8
+SADNXN4D 4, 4
diff --git a/libvpx/vp9/encoder/x86/vp9_sad_mmx.asm b/libvpx/vp9/encoder/x86/vp9_sad_mmx.asm
new file mode 100644
index 0000000..32fdd23
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_sad_mmx.asm
@@ -0,0 +1,427 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+global sym(vp9_sad16x16_mmx) PRIVATE
+global sym(vp9_sad8x16_mmx) PRIVATE
+global sym(vp9_sad8x8_mmx) PRIVATE
+global sym(vp9_sad4x4_mmx) PRIVATE
+global sym(vp9_sad16x8_mmx) PRIVATE
+
+;unsigned int vp9_sad16x16_mmx(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride)
+sym(vp9_sad16x16_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ lea rcx, [rsi+rax*8]
+
+ lea rcx, [rcx+rax*8]
+ pxor mm7, mm7
+
+ pxor mm6, mm6
+
+.x16x16sad_mmx_loop:
+
+ movq mm0, QWORD PTR [rsi]
+ movq mm2, QWORD PTR [rsi+8]
+
+ movq mm1, QWORD PTR [rdi]
+ movq mm3, QWORD PTR [rdi+8]
+
+ movq mm4, mm0
+ movq mm5, mm2
+
+ psubusb mm0, mm1
+ psubusb mm1, mm4
+
+ psubusb mm2, mm3
+ psubusb mm3, mm5
+
+ por mm0, mm1
+ por mm2, mm3
+
+ movq mm1, mm0
+ movq mm3, mm2
+
+ punpcklbw mm0, mm6
+ punpcklbw mm2, mm6
+
+ punpckhbw mm1, mm6
+ punpckhbw mm3, mm6
+
+ paddw mm0, mm2
+ paddw mm1, mm3
+
+
+ lea rsi, [rsi+rax]
+ add rdi, rdx
+
+ paddw mm7, mm0
+ paddw mm7, mm1
+
+ cmp rsi, rcx
+ jne .x16x16sad_mmx_loop
+
+
+ movq mm0, mm7
+
+ punpcklwd mm0, mm6
+ punpckhwd mm7, mm6
+
+ paddw mm0, mm7
+ movq mm7, mm0
+
+
+ psrlq mm0, 32
+ paddw mm7, mm0
+
+ movq rax, mm7
+
+ pop rdi
+ pop rsi
+ mov rsp, rbp
+ ; begin epilog
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;unsigned int vp9_sad8x16_mmx(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride)
+sym(vp9_sad8x16_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ lea rcx, [rsi+rax*8]
+
+ lea rcx, [rcx+rax*8]
+ pxor mm7, mm7
+
+ pxor mm6, mm6
+
+.x8x16sad_mmx_loop:
+
+ movq mm0, QWORD PTR [rsi]
+ movq mm1, QWORD PTR [rdi]
+
+ movq mm2, mm0
+ psubusb mm0, mm1
+
+ psubusb mm1, mm2
+ por mm0, mm1
+
+ movq mm2, mm0
+ punpcklbw mm0, mm6
+
+ punpckhbw mm2, mm6
+ lea rsi, [rsi+rax]
+
+ add rdi, rdx
+ paddw mm7, mm0
+
+ paddw mm7, mm2
+ cmp rsi, rcx
+
+ jne .x8x16sad_mmx_loop
+
+ movq mm0, mm7
+ punpcklwd mm0, mm6
+
+ punpckhwd mm7, mm6
+ paddw mm0, mm7
+
+ movq mm7, mm0
+ psrlq mm0, 32
+
+ paddw mm7, mm0
+ movq rax, mm7
+
+ pop rdi
+ pop rsi
+ mov rsp, rbp
+ ; begin epilog
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;unsigned int vp9_sad8x8_mmx(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride)
+sym(vp9_sad8x8_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ lea rcx, [rsi+rax*8]
+ pxor mm7, mm7
+
+ pxor mm6, mm6
+
+.x8x8sad_mmx_loop:
+
+ movq mm0, QWORD PTR [rsi]
+ movq mm1, QWORD PTR [rdi]
+
+ movq mm2, mm0
+ psubusb mm0, mm1
+
+ psubusb mm1, mm2
+ por mm0, mm1
+
+ movq mm2, mm0
+ punpcklbw mm0, mm6
+
+ punpckhbw mm2, mm6
+ paddw mm0, mm2
+
+ lea rsi, [rsi+rax]
+ add rdi, rdx
+
+ paddw mm7, mm0
+ cmp rsi, rcx
+
+ jne .x8x8sad_mmx_loop
+
+ movq mm0, mm7
+ punpcklwd mm0, mm6
+
+ punpckhwd mm7, mm6
+ paddw mm0, mm7
+
+ movq mm7, mm0
+ psrlq mm0, 32
+
+ paddw mm7, mm0
+ movq rax, mm7
+
+ pop rdi
+ pop rsi
+ mov rsp, rbp
+ ; begin epilog
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;unsigned int vp9_sad4x4_mmx(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride)
+sym(vp9_sad4x4_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ movd mm0, DWORD PTR [rsi]
+ movd mm1, DWORD PTR [rdi]
+
+ movd mm2, DWORD PTR [rsi+rax]
+ movd mm3, DWORD PTR [rdi+rdx]
+
+ punpcklbw mm0, mm2
+ punpcklbw mm1, mm3
+
+ movq mm2, mm0
+ psubusb mm0, mm1
+
+ psubusb mm1, mm2
+ por mm0, mm1
+
+ movq mm2, mm0
+ pxor mm3, mm3
+
+ punpcklbw mm0, mm3
+ punpckhbw mm2, mm3
+
+ paddw mm0, mm2
+
+ lea rsi, [rsi+rax*2]
+ lea rdi, [rdi+rdx*2]
+
+ movd mm4, DWORD PTR [rsi]
+ movd mm5, DWORD PTR [rdi]
+
+ movd mm6, DWORD PTR [rsi+rax]
+ movd mm7, DWORD PTR [rdi+rdx]
+
+ punpcklbw mm4, mm6
+ punpcklbw mm5, mm7
+
+ movq mm6, mm4
+ psubusb mm4, mm5
+
+ psubusb mm5, mm6
+ por mm4, mm5
+
+ movq mm5, mm4
+ punpcklbw mm4, mm3
+
+ punpckhbw mm5, mm3
+ paddw mm4, mm5
+
+ paddw mm0, mm4
+ movq mm1, mm0
+
+ punpcklwd mm0, mm3
+ punpckhwd mm1, mm3
+
+ paddw mm0, mm1
+ movq mm1, mm0
+
+ psrlq mm0, 32
+ paddw mm0, mm1
+
+ movq rax, mm0
+
+ pop rdi
+ pop rsi
+ mov rsp, rbp
+ ; begin epilog
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;unsigned int vp9_sad16x8_mmx(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride)
+sym(vp9_sad16x8_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ lea rcx, [rsi+rax*8]
+ pxor mm7, mm7
+
+ pxor mm6, mm6
+
+.x16x8sad_mmx_loop:
+
+ movq mm0, [rsi]
+ movq mm1, [rdi]
+
+ movq mm2, [rsi+8]
+ movq mm3, [rdi+8]
+
+ movq mm4, mm0
+ movq mm5, mm2
+
+ psubusb mm0, mm1
+ psubusb mm1, mm4
+
+ psubusb mm2, mm3
+ psubusb mm3, mm5
+
+ por mm0, mm1
+ por mm2, mm3
+
+ movq mm1, mm0
+ movq mm3, mm2
+
+ punpcklbw mm0, mm6
+ punpckhbw mm1, mm6
+
+ punpcklbw mm2, mm6
+ punpckhbw mm3, mm6
+
+
+ paddw mm0, mm2
+ paddw mm1, mm3
+
+ paddw mm0, mm1
+ lea rsi, [rsi+rax]
+
+ add rdi, rdx
+ paddw mm7, mm0
+
+ cmp rsi, rcx
+ jne .x16x8sad_mmx_loop
+
+ movq mm0, mm7
+ punpcklwd mm0, mm6
+
+ punpckhwd mm7, mm6
+ paddw mm0, mm7
+
+ movq mm7, mm0
+ psrlq mm0, 32
+
+ paddw mm7, mm0
+ movq rax, mm7
+
+ pop rdi
+ pop rsi
+ mov rsp, rbp
+ ; begin epilog
+ UNSHADOW_ARGS
+ pop rbp
+ ret
diff --git a/libvpx/vp9/encoder/x86/vp9_sad_sse2.asm b/libvpx/vp9/encoder/x86/vp9_sad_sse2.asm
new file mode 100644
index 0000000..8fb7d41
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_sad_sse2.asm
@@ -0,0 +1,211 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+%include "third_party/x86inc/x86inc.asm"
+
+SECTION .text
+
+; unsigned int vp9_sad64x64_sse2(uint8_t *src, int src_stride,
+; uint8_t *ref, int ref_stride);
+%macro SAD64XN 1
+cglobal sad64x%1, 4, 5, 5, src, src_stride, ref, ref_stride, n_rows
+ movsxdifnidn src_strideq, src_strided
+ movsxdifnidn ref_strideq, ref_strided
+ mov n_rowsd, %1
+ pxor m0, m0
+.loop:
+ movu m1, [refq]
+ movu m2, [refq+16]
+ movu m3, [refq+32]
+ movu m4, [refq+48]
+ psadbw m1, [srcq]
+ psadbw m2, [srcq+16]
+ psadbw m3, [srcq+32]
+ psadbw m4, [srcq+48]
+ paddd m1, m2
+ paddd m3, m4
+ add refq, ref_strideq
+ paddd m0, m1
+ add srcq, src_strideq
+ paddd m0, m3
+ dec n_rowsd
+ jg .loop
+
+ movhlps m1, m0
+ paddd m0, m1
+ movd eax, m0
+ RET
+%endmacro
+
+INIT_XMM sse2
+SAD64XN 64 ; sad64x64_sse2
+SAD64XN 32 ; sad64x32_sse2
+
+; unsigned int vp9_sad32x32_sse2(uint8_t *src, int src_stride,
+; uint8_t *ref, int ref_stride);
+%macro SAD32XN 1
+cglobal sad32x%1, 4, 5, 5, src, src_stride, ref, ref_stride, n_rows
+ movsxdifnidn src_strideq, src_strided
+ movsxdifnidn ref_strideq, ref_strided
+ mov n_rowsd, %1/2
+ pxor m0, m0
+
+.loop:
+ movu m1, [refq]
+ movu m2, [refq+16]
+ movu m3, [refq+ref_strideq]
+ movu m4, [refq+ref_strideq+16]
+ psadbw m1, [srcq]
+ psadbw m2, [srcq+16]
+ psadbw m3, [srcq+src_strideq]
+ psadbw m4, [srcq+src_strideq+16]
+ paddd m1, m2
+ paddd m3, m4
+ lea refq, [refq+ref_strideq*2]
+ paddd m0, m1
+ lea srcq, [srcq+src_strideq*2]
+ paddd m0, m3
+ dec n_rowsd
+ jg .loop
+
+ movhlps m1, m0
+ paddd m0, m1
+ movd eax, m0
+ RET
+%endmacro
+
+INIT_XMM sse2
+SAD32XN 64 ; sad32x64_sse2
+SAD32XN 32 ; sad32x32_sse2
+SAD32XN 16 ; sad32x16_sse2
+
+; unsigned int vp9_sad16x{8,16}_sse2(uint8_t *src, int src_stride,
+; uint8_t *ref, int ref_stride);
+%macro SAD16XN 1
+cglobal sad16x%1, 4, 7, 5, src, src_stride, ref, ref_stride, \
+ src_stride3, ref_stride3, n_rows
+ movsxdifnidn src_strideq, src_strided
+ movsxdifnidn ref_strideq, ref_strided
+ lea src_stride3q, [src_strideq*3]
+ lea ref_stride3q, [ref_strideq*3]
+ mov n_rowsd, %1/4
+ pxor m0, m0
+
+.loop:
+ movu m1, [refq]
+ movu m2, [refq+ref_strideq]
+ movu m3, [refq+ref_strideq*2]
+ movu m4, [refq+ref_stride3q]
+ psadbw m1, [srcq]
+ psadbw m2, [srcq+src_strideq]
+ psadbw m3, [srcq+src_strideq*2]
+ psadbw m4, [srcq+src_stride3q]
+ paddd m1, m2
+ paddd m3, m4
+ lea refq, [refq+ref_strideq*4]
+ paddd m0, m1
+ lea srcq, [srcq+src_strideq*4]
+ paddd m0, m3
+ dec n_rowsd
+ jg .loop
+
+ movhlps m1, m0
+ paddd m0, m1
+ movd eax, m0
+ RET
+%endmacro
+
+INIT_XMM sse2
+SAD16XN 32 ; sad16x32_sse2
+SAD16XN 16 ; sad16x16_sse2
+SAD16XN 8 ; sad16x8_sse2
+
+; unsigned int vp9_sad8x{8,16}_sse2(uint8_t *src, int src_stride,
+; uint8_t *ref, int ref_stride);
+%macro SAD8XN 1
+cglobal sad8x%1, 4, 7, 5, src, src_stride, ref, ref_stride, \
+ src_stride3, ref_stride3, n_rows
+ movsxdifnidn src_strideq, src_strided
+ movsxdifnidn ref_strideq, ref_strided
+ lea src_stride3q, [src_strideq*3]
+ lea ref_stride3q, [ref_strideq*3]
+ mov n_rowsd, %1/4
+ pxor m0, m0
+
+.loop:
+ movh m1, [refq]
+ movhps m1, [refq+ref_strideq]
+ movh m2, [refq+ref_strideq*2]
+ movhps m2, [refq+ref_stride3q]
+ movh m3, [srcq]
+ movhps m3, [srcq+src_strideq]
+ movh m4, [srcq+src_strideq*2]
+ movhps m4, [srcq+src_stride3q]
+ psadbw m1, m3
+ psadbw m2, m4
+ lea refq, [refq+ref_strideq*4]
+ paddd m0, m1
+ lea srcq, [srcq+src_strideq*4]
+ paddd m0, m2
+ dec n_rowsd
+ jg .loop
+
+ movhlps m1, m0
+ paddd m0, m1
+ movd eax, m0
+ RET
+%endmacro
+
+INIT_XMM sse2
+SAD8XN 16 ; sad8x16_sse2
+SAD8XN 8 ; sad8x8_sse2
+SAD8XN 4 ; sad8x4_sse2
+
+; unsigned int vp9_sad4x{4, 8}_sse(uint8_t *src, int src_stride,
+; uint8_t *ref, int ref_stride);
+%macro SAD4XN 1
+cglobal sad4x%1, 4, 7, 7, src, src_stride, ref, ref_stride, \
+ src_stride3, ref_stride3, n_rows
+ movsxdifnidn src_strideq, src_strided
+ movsxdifnidn ref_strideq, ref_strided
+ lea src_stride3q, [src_strideq*3]
+ lea ref_stride3q, [ref_strideq*3]
+ mov n_rowsd, %1/4
+ pxor m0, m0
+
+.loop:
+ movd m1, [refq]
+ movd m2, [refq+ref_strideq]
+ movd m3, [refq+ref_strideq*2]
+ movd m4, [refq+ref_stride3q]
+ punpckldq m1, m2
+ punpckldq m3, m4
+ movd m2, [srcq]
+ movd m5, [srcq+src_strideq]
+ movd m4, [srcq+src_strideq*2]
+ movd m6, [srcq+src_stride3q]
+ punpckldq m2, m5
+ punpckldq m4, m6
+ psadbw m1, m2
+ psadbw m3, m4
+ lea refq, [refq+ref_strideq*4]
+ paddd m0, m1
+ lea srcq, [srcq+src_strideq*4]
+ paddd m0, m3
+ dec n_rowsd
+ jg .loop
+
+ movd eax, m0
+ RET
+%endmacro
+
+INIT_MMX sse
+SAD4XN 8 ; sad4x8_sse
+SAD4XN 4 ; sad4x4_sse
diff --git a/libvpx/vp9/encoder/x86/vp9_sad_sse3.asm b/libvpx/vp9/encoder/x86/vp9_sad_sse3.asm
new file mode 100644
index 0000000..2b90a5d
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_sad_sse3.asm
@@ -0,0 +1,378 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+%include "vpx_ports/x86_abi_support.asm"
+
+%macro STACK_FRAME_CREATE_X3 0
+%if ABI_IS_32BIT
+ %define src_ptr rsi
+ %define src_stride rax
+ %define ref_ptr rdi
+ %define ref_stride rdx
+ %define end_ptr rcx
+ %define ret_var rbx
+ %define result_ptr arg(4)
+ %define max_err arg(4)
+ %define height dword ptr arg(4)
+ push rbp
+ mov rbp, rsp
+ push rsi
+ push rdi
+ push rbx
+
+ mov rsi, arg(0) ; src_ptr
+ mov rdi, arg(2) ; ref_ptr
+
+ movsxd rax, dword ptr arg(1) ; src_stride
+ movsxd rdx, dword ptr arg(3) ; ref_stride
+%else
+ %if LIBVPX_YASM_WIN64
+ SAVE_XMM 7, u
+ %define src_ptr rcx
+ %define src_stride rdx
+ %define ref_ptr r8
+ %define ref_stride r9
+ %define end_ptr r10
+ %define ret_var r11
+ %define result_ptr [rsp+xmm_stack_space+8+4*8]
+ %define max_err [rsp+xmm_stack_space+8+4*8]
+ %define height dword ptr [rsp+xmm_stack_space+8+4*8]
+ %else
+ %define src_ptr rdi
+ %define src_stride rsi
+ %define ref_ptr rdx
+ %define ref_stride rcx
+ %define end_ptr r9
+ %define ret_var r10
+ %define result_ptr r8
+ %define max_err r8
+ %define height r8
+ %endif
+%endif
+
+%endmacro
+
+%macro STACK_FRAME_DESTROY_X3 0
+ %define src_ptr
+ %define src_stride
+ %define ref_ptr
+ %define ref_stride
+ %define end_ptr
+ %define ret_var
+ %define result_ptr
+ %define max_err
+ %define height
+
+%if ABI_IS_32BIT
+ pop rbx
+ pop rdi
+ pop rsi
+ pop rbp
+%else
+ %if LIBVPX_YASM_WIN64
+ RESTORE_XMM
+ %endif
+%endif
+ ret
+%endmacro
+
+%macro PROCESS_16X2X3 5
+%if %1==0
+ movdqa xmm0, XMMWORD PTR [%2]
+ lddqu xmm5, XMMWORD PTR [%3]
+ lddqu xmm6, XMMWORD PTR [%3+1]
+ lddqu xmm7, XMMWORD PTR [%3+2]
+
+ psadbw xmm5, xmm0
+ psadbw xmm6, xmm0
+ psadbw xmm7, xmm0
+%else
+ movdqa xmm0, XMMWORD PTR [%2]
+ lddqu xmm1, XMMWORD PTR [%3]
+ lddqu xmm2, XMMWORD PTR [%3+1]
+ lddqu xmm3, XMMWORD PTR [%3+2]
+
+ psadbw xmm1, xmm0
+ psadbw xmm2, xmm0
+ psadbw xmm3, xmm0
+
+ paddw xmm5, xmm1
+ paddw xmm6, xmm2
+ paddw xmm7, xmm3
+%endif
+ movdqa xmm0, XMMWORD PTR [%2+%4]
+ lddqu xmm1, XMMWORD PTR [%3+%5]
+ lddqu xmm2, XMMWORD PTR [%3+%5+1]
+ lddqu xmm3, XMMWORD PTR [%3+%5+2]
+
+%if %1==0 || %1==1
+ lea %2, [%2+%4*2]
+ lea %3, [%3+%5*2]
+%endif
+
+ psadbw xmm1, xmm0
+ psadbw xmm2, xmm0
+ psadbw xmm3, xmm0
+
+ paddw xmm5, xmm1
+ paddw xmm6, xmm2
+ paddw xmm7, xmm3
+%endmacro
+
+%macro PROCESS_8X2X3 5
+%if %1==0
+ movq mm0, QWORD PTR [%2]
+ movq mm5, QWORD PTR [%3]
+ movq mm6, QWORD PTR [%3+1]
+ movq mm7, QWORD PTR [%3+2]
+
+ psadbw mm5, mm0
+ psadbw mm6, mm0
+ psadbw mm7, mm0
+%else
+ movq mm0, QWORD PTR [%2]
+ movq mm1, QWORD PTR [%3]
+ movq mm2, QWORD PTR [%3+1]
+ movq mm3, QWORD PTR [%3+2]
+
+ psadbw mm1, mm0
+ psadbw mm2, mm0
+ psadbw mm3, mm0
+
+ paddw mm5, mm1
+ paddw mm6, mm2
+ paddw mm7, mm3
+%endif
+ movq mm0, QWORD PTR [%2+%4]
+ movq mm1, QWORD PTR [%3+%5]
+ movq mm2, QWORD PTR [%3+%5+1]
+ movq mm3, QWORD PTR [%3+%5+2]
+
+%if %1==0 || %1==1
+ lea %2, [%2+%4*2]
+ lea %3, [%3+%5*2]
+%endif
+
+ psadbw mm1, mm0
+ psadbw mm2, mm0
+ psadbw mm3, mm0
+
+ paddw mm5, mm1
+ paddw mm6, mm2
+ paddw mm7, mm3
+%endmacro
+
+;void int vp9_sad16x16x3_sse3(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride,
+; int *results)
+global sym(vp9_sad16x16x3_sse3) PRIVATE
+sym(vp9_sad16x16x3_sse3):
+
+ STACK_FRAME_CREATE_X3
+
+ PROCESS_16X2X3 0, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_16X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_16X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_16X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_16X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_16X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_16X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_16X2X3 2, src_ptr, ref_ptr, src_stride, ref_stride
+
+ mov rcx, result_ptr
+
+ movq xmm0, xmm5
+ psrldq xmm5, 8
+
+ paddw xmm0, xmm5
+ movd [rcx], xmm0
+;-
+ movq xmm0, xmm6
+ psrldq xmm6, 8
+
+ paddw xmm0, xmm6
+ movd [rcx+4], xmm0
+;-
+ movq xmm0, xmm7
+ psrldq xmm7, 8
+
+ paddw xmm0, xmm7
+ movd [rcx+8], xmm0
+
+ STACK_FRAME_DESTROY_X3
+
+;void int vp9_sad16x8x3_sse3(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride,
+; int *results)
+global sym(vp9_sad16x8x3_sse3) PRIVATE
+sym(vp9_sad16x8x3_sse3):
+
+ STACK_FRAME_CREATE_X3
+
+ PROCESS_16X2X3 0, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_16X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_16X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_16X2X3 2, src_ptr, ref_ptr, src_stride, ref_stride
+
+ mov rcx, result_ptr
+
+ movq xmm0, xmm5
+ psrldq xmm5, 8
+
+ paddw xmm0, xmm5
+ movd [rcx], xmm0
+;-
+ movq xmm0, xmm6
+ psrldq xmm6, 8
+
+ paddw xmm0, xmm6
+ movd [rcx+4], xmm0
+;-
+ movq xmm0, xmm7
+ psrldq xmm7, 8
+
+ paddw xmm0, xmm7
+ movd [rcx+8], xmm0
+
+ STACK_FRAME_DESTROY_X3
+
+;void int vp9_sad8x16x3_sse3(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride,
+; int *results)
+global sym(vp9_sad8x16x3_sse3) PRIVATE
+sym(vp9_sad8x16x3_sse3):
+
+ STACK_FRAME_CREATE_X3
+
+ PROCESS_8X2X3 0, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_8X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_8X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_8X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_8X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_8X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_8X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_8X2X3 2, src_ptr, ref_ptr, src_stride, ref_stride
+
+ mov rcx, result_ptr
+
+ punpckldq mm5, mm6
+
+ movq [rcx], mm5
+ movd [rcx+8], mm7
+
+ STACK_FRAME_DESTROY_X3
+
+;void int vp9_sad8x8x3_sse3(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride,
+; int *results)
+global sym(vp9_sad8x8x3_sse3) PRIVATE
+sym(vp9_sad8x8x3_sse3):
+
+ STACK_FRAME_CREATE_X3
+
+ PROCESS_8X2X3 0, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_8X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_8X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
+ PROCESS_8X2X3 2, src_ptr, ref_ptr, src_stride, ref_stride
+
+ mov rcx, result_ptr
+
+ punpckldq mm5, mm6
+
+ movq [rcx], mm5
+ movd [rcx+8], mm7
+
+ STACK_FRAME_DESTROY_X3
+
+;void int vp9_sad4x4x3_sse3(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride,
+; int *results)
+global sym(vp9_sad4x4x3_sse3) PRIVATE
+sym(vp9_sad4x4x3_sse3):
+
+ STACK_FRAME_CREATE_X3
+
+ movd mm0, DWORD PTR [src_ptr]
+ movd mm1, DWORD PTR [ref_ptr]
+
+ movd mm2, DWORD PTR [src_ptr+src_stride]
+ movd mm3, DWORD PTR [ref_ptr+ref_stride]
+
+ punpcklbw mm0, mm2
+ punpcklbw mm1, mm3
+
+ movd mm4, DWORD PTR [ref_ptr+1]
+ movd mm5, DWORD PTR [ref_ptr+2]
+
+ movd mm2, DWORD PTR [ref_ptr+ref_stride+1]
+ movd mm3, DWORD PTR [ref_ptr+ref_stride+2]
+
+ psadbw mm1, mm0
+
+ punpcklbw mm4, mm2
+ punpcklbw mm5, mm3
+
+ psadbw mm4, mm0
+ psadbw mm5, mm0
+
+ lea src_ptr, [src_ptr+src_stride*2]
+ lea ref_ptr, [ref_ptr+ref_stride*2]
+
+ movd mm0, DWORD PTR [src_ptr]
+ movd mm2, DWORD PTR [ref_ptr]
+
+ movd mm3, DWORD PTR [src_ptr+src_stride]
+ movd mm6, DWORD PTR [ref_ptr+ref_stride]
+
+ punpcklbw mm0, mm3
+ punpcklbw mm2, mm6
+
+ movd mm3, DWORD PTR [ref_ptr+1]
+ movd mm7, DWORD PTR [ref_ptr+2]
+
+ psadbw mm2, mm0
+
+ paddw mm1, mm2
+
+ movd mm2, DWORD PTR [ref_ptr+ref_stride+1]
+ movd mm6, DWORD PTR [ref_ptr+ref_stride+2]
+
+ punpcklbw mm3, mm2
+ punpcklbw mm7, mm6
+
+ psadbw mm3, mm0
+ psadbw mm7, mm0
+
+ paddw mm3, mm4
+ paddw mm7, mm5
+
+ mov rcx, result_ptr
+
+ punpckldq mm1, mm3
+
+ movq [rcx], mm1
+ movd [rcx+8], mm7
+
+ STACK_FRAME_DESTROY_X3
diff --git a/libvpx/vp9/encoder/x86/vp9_sad_sse4.asm b/libvpx/vp9/encoder/x86/vp9_sad_sse4.asm
new file mode 100644
index 0000000..faf1768
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_sad_sse4.asm
@@ -0,0 +1,359 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+%macro PROCESS_16X2X8 1
+%if %1
+ movdqa xmm0, XMMWORD PTR [rsi]
+ movq xmm1, MMWORD PTR [rdi]
+ movq xmm3, MMWORD PTR [rdi+8]
+ movq xmm2, MMWORD PTR [rdi+16]
+ punpcklqdq xmm1, xmm3
+ punpcklqdq xmm3, xmm2
+
+ movdqa xmm2, xmm1
+ mpsadbw xmm1, xmm0, 0x0
+ mpsadbw xmm2, xmm0, 0x5
+
+ psrldq xmm0, 8
+
+ movdqa xmm4, xmm3
+ mpsadbw xmm3, xmm0, 0x0
+ mpsadbw xmm4, xmm0, 0x5
+
+ paddw xmm1, xmm2
+ paddw xmm1, xmm3
+ paddw xmm1, xmm4
+%else
+ movdqa xmm0, XMMWORD PTR [rsi]
+ movq xmm5, MMWORD PTR [rdi]
+ movq xmm3, MMWORD PTR [rdi+8]
+ movq xmm2, MMWORD PTR [rdi+16]
+ punpcklqdq xmm5, xmm3
+ punpcklqdq xmm3, xmm2
+
+ movdqa xmm2, xmm5
+ mpsadbw xmm5, xmm0, 0x0
+ mpsadbw xmm2, xmm0, 0x5
+
+ psrldq xmm0, 8
+
+ movdqa xmm4, xmm3
+ mpsadbw xmm3, xmm0, 0x0
+ mpsadbw xmm4, xmm0, 0x5
+
+ paddw xmm5, xmm2
+ paddw xmm5, xmm3
+ paddw xmm5, xmm4
+
+ paddw xmm1, xmm5
+%endif
+ movdqa xmm0, XMMWORD PTR [rsi + rax]
+ movq xmm5, MMWORD PTR [rdi+ rdx]
+ movq xmm3, MMWORD PTR [rdi+ rdx+8]
+ movq xmm2, MMWORD PTR [rdi+ rdx+16]
+ punpcklqdq xmm5, xmm3
+ punpcklqdq xmm3, xmm2
+
+ lea rsi, [rsi+rax*2]
+ lea rdi, [rdi+rdx*2]
+
+ movdqa xmm2, xmm5
+ mpsadbw xmm5, xmm0, 0x0
+ mpsadbw xmm2, xmm0, 0x5
+
+ psrldq xmm0, 8
+ movdqa xmm4, xmm3
+ mpsadbw xmm3, xmm0, 0x0
+ mpsadbw xmm4, xmm0, 0x5
+
+ paddw xmm5, xmm2
+ paddw xmm5, xmm3
+ paddw xmm5, xmm4
+
+ paddw xmm1, xmm5
+%endmacro
+
+%macro PROCESS_8X2X8 1
+%if %1
+ movq xmm0, MMWORD PTR [rsi]
+ movq xmm1, MMWORD PTR [rdi]
+ movq xmm3, MMWORD PTR [rdi+8]
+ punpcklqdq xmm1, xmm3
+
+ movdqa xmm2, xmm1
+ mpsadbw xmm1, xmm0, 0x0
+ mpsadbw xmm2, xmm0, 0x5
+ paddw xmm1, xmm2
+%else
+ movq xmm0, MMWORD PTR [rsi]
+ movq xmm5, MMWORD PTR [rdi]
+ movq xmm3, MMWORD PTR [rdi+8]
+ punpcklqdq xmm5, xmm3
+
+ movdqa xmm2, xmm5
+ mpsadbw xmm5, xmm0, 0x0
+ mpsadbw xmm2, xmm0, 0x5
+ paddw xmm5, xmm2
+
+ paddw xmm1, xmm5
+%endif
+ movq xmm0, MMWORD PTR [rsi + rax]
+ movq xmm5, MMWORD PTR [rdi+ rdx]
+ movq xmm3, MMWORD PTR [rdi+ rdx+8]
+ punpcklqdq xmm5, xmm3
+
+ lea rsi, [rsi+rax*2]
+ lea rdi, [rdi+rdx*2]
+
+ movdqa xmm2, xmm5
+ mpsadbw xmm5, xmm0, 0x0
+ mpsadbw xmm2, xmm0, 0x5
+ paddw xmm5, xmm2
+
+ paddw xmm1, xmm5
+%endmacro
+
+%macro PROCESS_4X2X8 1
+%if %1
+ movd xmm0, [rsi]
+ movq xmm1, MMWORD PTR [rdi]
+ movq xmm3, MMWORD PTR [rdi+8]
+ punpcklqdq xmm1, xmm3
+
+ mpsadbw xmm1, xmm0, 0x0
+%else
+ movd xmm0, [rsi]
+ movq xmm5, MMWORD PTR [rdi]
+ movq xmm3, MMWORD PTR [rdi+8]
+ punpcklqdq xmm5, xmm3
+
+ mpsadbw xmm5, xmm0, 0x0
+
+ paddw xmm1, xmm5
+%endif
+ movd xmm0, [rsi + rax]
+ movq xmm5, MMWORD PTR [rdi+ rdx]
+ movq xmm3, MMWORD PTR [rdi+ rdx+8]
+ punpcklqdq xmm5, xmm3
+
+ lea rsi, [rsi+rax*2]
+ lea rdi, [rdi+rdx*2]
+
+ mpsadbw xmm5, xmm0, 0x0
+
+ paddw xmm1, xmm5
+%endmacro
+
+%macro WRITE_AS_INTS 0
+ mov rdi, arg(4) ;Results
+ pxor xmm0, xmm0
+ movdqa xmm2, xmm1
+ punpcklwd xmm1, xmm0
+ punpckhwd xmm2, xmm0
+
+ movdqa [rdi], xmm1
+ movdqa [rdi + 16], xmm2
+%endmacro
+
+;void vp9_sad16x16x8_sse4(
+; const unsigned char *src_ptr,
+; int src_stride,
+; const unsigned char *ref_ptr,
+; int ref_stride,
+; unsigned short *sad_array);
+global sym(vp9_sad16x16x8_sse4) PRIVATE
+sym(vp9_sad16x16x8_sse4):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ PROCESS_16X2X8 1
+ PROCESS_16X2X8 0
+ PROCESS_16X2X8 0
+ PROCESS_16X2X8 0
+ PROCESS_16X2X8 0
+ PROCESS_16X2X8 0
+ PROCESS_16X2X8 0
+ PROCESS_16X2X8 0
+
+ WRITE_AS_INTS
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp9_sad16x8x8_sse4(
+; const unsigned char *src_ptr,
+; int src_stride,
+; const unsigned char *ref_ptr,
+; int ref_stride,
+; unsigned short *sad_array
+;);
+global sym(vp9_sad16x8x8_sse4) PRIVATE
+sym(vp9_sad16x8x8_sse4):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ PROCESS_16X2X8 1
+ PROCESS_16X2X8 0
+ PROCESS_16X2X8 0
+ PROCESS_16X2X8 0
+
+ WRITE_AS_INTS
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp9_sad8x8x8_sse4(
+; const unsigned char *src_ptr,
+; int src_stride,
+; const unsigned char *ref_ptr,
+; int ref_stride,
+; unsigned short *sad_array
+;);
+global sym(vp9_sad8x8x8_sse4) PRIVATE
+sym(vp9_sad8x8x8_sse4):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ PROCESS_8X2X8 1
+ PROCESS_8X2X8 0
+ PROCESS_8X2X8 0
+ PROCESS_8X2X8 0
+
+ WRITE_AS_INTS
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp9_sad8x16x8_sse4(
+; const unsigned char *src_ptr,
+; int src_stride,
+; const unsigned char *ref_ptr,
+; int ref_stride,
+; unsigned short *sad_array
+;);
+global sym(vp9_sad8x16x8_sse4) PRIVATE
+sym(vp9_sad8x16x8_sse4):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ PROCESS_8X2X8 1
+ PROCESS_8X2X8 0
+ PROCESS_8X2X8 0
+ PROCESS_8X2X8 0
+ PROCESS_8X2X8 0
+ PROCESS_8X2X8 0
+ PROCESS_8X2X8 0
+ PROCESS_8X2X8 0
+
+ WRITE_AS_INTS
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp9_sad4x4x8_c(
+; const unsigned char *src_ptr,
+; int src_stride,
+; const unsigned char *ref_ptr,
+; int ref_stride,
+; unsigned short *sad_array
+;);
+global sym(vp9_sad4x4x8_sse4) PRIVATE
+sym(vp9_sad4x4x8_sse4):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ PROCESS_4X2X8 1
+ PROCESS_4X2X8 0
+
+ WRITE_AS_INTS
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+
+
diff --git a/libvpx/vp9/encoder/x86/vp9_sad_ssse3.asm b/libvpx/vp9/encoder/x86/vp9_sad_ssse3.asm
new file mode 100644
index 0000000..0cb3542
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_sad_ssse3.asm
@@ -0,0 +1,370 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+%macro PROCESS_16X2X3 1
+%if %1
+ movdqa xmm0, XMMWORD PTR [rsi]
+ lddqu xmm5, XMMWORD PTR [rdi]
+ lddqu xmm6, XMMWORD PTR [rdi+1]
+ lddqu xmm7, XMMWORD PTR [rdi+2]
+
+ psadbw xmm5, xmm0
+ psadbw xmm6, xmm0
+ psadbw xmm7, xmm0
+%else
+ movdqa xmm0, XMMWORD PTR [rsi]
+ lddqu xmm1, XMMWORD PTR [rdi]
+ lddqu xmm2, XMMWORD PTR [rdi+1]
+ lddqu xmm3, XMMWORD PTR [rdi+2]
+
+ psadbw xmm1, xmm0
+ psadbw xmm2, xmm0
+ psadbw xmm3, xmm0
+
+ paddw xmm5, xmm1
+ paddw xmm6, xmm2
+ paddw xmm7, xmm3
+%endif
+ movdqa xmm0, XMMWORD PTR [rsi+rax]
+ lddqu xmm1, XMMWORD PTR [rdi+rdx]
+ lddqu xmm2, XMMWORD PTR [rdi+rdx+1]
+ lddqu xmm3, XMMWORD PTR [rdi+rdx+2]
+
+ lea rsi, [rsi+rax*2]
+ lea rdi, [rdi+rdx*2]
+
+ psadbw xmm1, xmm0
+ psadbw xmm2, xmm0
+ psadbw xmm3, xmm0
+
+ paddw xmm5, xmm1
+ paddw xmm6, xmm2
+ paddw xmm7, xmm3
+%endmacro
+
+%macro PROCESS_16X2X3_OFFSET 2
+%if %1
+ movdqa xmm0, XMMWORD PTR [rsi]
+ movdqa xmm4, XMMWORD PTR [rdi]
+ movdqa xmm7, XMMWORD PTR [rdi+16]
+
+ movdqa xmm5, xmm7
+ palignr xmm5, xmm4, %2
+
+ movdqa xmm6, xmm7
+ palignr xmm6, xmm4, (%2+1)
+
+ palignr xmm7, xmm4, (%2+2)
+
+ psadbw xmm5, xmm0
+ psadbw xmm6, xmm0
+ psadbw xmm7, xmm0
+%else
+ movdqa xmm0, XMMWORD PTR [rsi]
+ movdqa xmm4, XMMWORD PTR [rdi]
+ movdqa xmm3, XMMWORD PTR [rdi+16]
+
+ movdqa xmm1, xmm3
+ palignr xmm1, xmm4, %2
+
+ movdqa xmm2, xmm3
+ palignr xmm2, xmm4, (%2+1)
+
+ palignr xmm3, xmm4, (%2+2)
+
+ psadbw xmm1, xmm0
+ psadbw xmm2, xmm0
+ psadbw xmm3, xmm0
+
+ paddw xmm5, xmm1
+ paddw xmm6, xmm2
+ paddw xmm7, xmm3
+%endif
+ movdqa xmm0, XMMWORD PTR [rsi+rax]
+ movdqa xmm4, XMMWORD PTR [rdi+rdx]
+ movdqa xmm3, XMMWORD PTR [rdi+rdx+16]
+
+ movdqa xmm1, xmm3
+ palignr xmm1, xmm4, %2
+
+ movdqa xmm2, xmm3
+ palignr xmm2, xmm4, (%2+1)
+
+ palignr xmm3, xmm4, (%2+2)
+
+ lea rsi, [rsi+rax*2]
+ lea rdi, [rdi+rdx*2]
+
+ psadbw xmm1, xmm0
+ psadbw xmm2, xmm0
+ psadbw xmm3, xmm0
+
+ paddw xmm5, xmm1
+ paddw xmm6, xmm2
+ paddw xmm7, xmm3
+%endmacro
+
+%macro PROCESS_16X16X3_OFFSET 2
+%2_aligned_by_%1:
+
+ sub rdi, %1
+
+ PROCESS_16X2X3_OFFSET 1, %1
+ PROCESS_16X2X3_OFFSET 0, %1
+ PROCESS_16X2X3_OFFSET 0, %1
+ PROCESS_16X2X3_OFFSET 0, %1
+ PROCESS_16X2X3_OFFSET 0, %1
+ PROCESS_16X2X3_OFFSET 0, %1
+ PROCESS_16X2X3_OFFSET 0, %1
+ PROCESS_16X2X3_OFFSET 0, %1
+
+ jmp %2_store_off
+
+%endmacro
+
+%macro PROCESS_16X8X3_OFFSET 2
+%2_aligned_by_%1:
+
+ sub rdi, %1
+
+ PROCESS_16X2X3_OFFSET 1, %1
+ PROCESS_16X2X3_OFFSET 0, %1
+ PROCESS_16X2X3_OFFSET 0, %1
+ PROCESS_16X2X3_OFFSET 0, %1
+
+ jmp %2_store_off
+
+%endmacro
+
+;void int vp9_sad16x16x3_ssse3(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride,
+; int *results)
+global sym(vp9_sad16x16x3_ssse3) PRIVATE
+sym(vp9_sad16x16x3_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ SAVE_XMM 7
+ push rsi
+ push rdi
+ push rcx
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ mov rdx, 0xf
+ and rdx, rdi
+
+ jmp .vp9_sad16x16x3_ssse3_skiptable
+.vp9_sad16x16x3_ssse3_jumptable:
+ dd .vp9_sad16x16x3_ssse3_aligned_by_0 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_1 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_2 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_3 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_4 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_5 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_6 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_7 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_8 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_9 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_10 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_11 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_12 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_13 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_14 - .vp9_sad16x16x3_ssse3_do_jump
+ dd .vp9_sad16x16x3_ssse3_aligned_by_15 - .vp9_sad16x16x3_ssse3_do_jump
+.vp9_sad16x16x3_ssse3_skiptable:
+
+ call .vp9_sad16x16x3_ssse3_do_jump
+.vp9_sad16x16x3_ssse3_do_jump:
+ pop rcx ; get the address of do_jump
+ mov rax, .vp9_sad16x16x3_ssse3_jumptable - .vp9_sad16x16x3_ssse3_do_jump
+ add rax, rcx ; get the absolute address of vp9_sad16x16x3_ssse3_jumptable
+
+ movsxd rax, dword [rax + 4*rdx] ; get the 32 bit offset from the jumptable
+ add rcx, rax
+
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ jmp rcx
+
+ PROCESS_16X16X3_OFFSET 0, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 1, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 2, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 3, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 4, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 5, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 6, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 7, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 8, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 9, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 10, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 11, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 12, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 13, .vp9_sad16x16x3_ssse3
+ PROCESS_16X16X3_OFFSET 14, .vp9_sad16x16x3_ssse3
+
+.vp9_sad16x16x3_ssse3_aligned_by_15:
+ PROCESS_16X2X3 1
+ PROCESS_16X2X3 0
+ PROCESS_16X2X3 0
+ PROCESS_16X2X3 0
+ PROCESS_16X2X3 0
+ PROCESS_16X2X3 0
+ PROCESS_16X2X3 0
+ PROCESS_16X2X3 0
+
+.vp9_sad16x16x3_ssse3_store_off:
+ mov rdi, arg(4) ;Results
+
+ movq xmm0, xmm5
+ psrldq xmm5, 8
+
+ paddw xmm0, xmm5
+ movd [rdi], xmm0
+;-
+ movq xmm0, xmm6
+ psrldq xmm6, 8
+
+ paddw xmm0, xmm6
+ movd [rdi+4], xmm0
+;-
+ movq xmm0, xmm7
+ psrldq xmm7, 8
+
+ paddw xmm0, xmm7
+ movd [rdi+8], xmm0
+
+ ; begin epilog
+ pop rcx
+ pop rdi
+ pop rsi
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void int vp9_sad16x8x3_ssse3(
+; unsigned char *src_ptr,
+; int src_stride,
+; unsigned char *ref_ptr,
+; int ref_stride,
+; int *results)
+global sym(vp9_sad16x8x3_ssse3) PRIVATE
+sym(vp9_sad16x8x3_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ SAVE_XMM 7
+ push rsi
+ push rdi
+ push rcx
+ ; end prolog
+
+ mov rsi, arg(0) ;src_ptr
+ mov rdi, arg(2) ;ref_ptr
+
+ mov rdx, 0xf
+ and rdx, rdi
+
+ jmp .vp9_sad16x8x3_ssse3_skiptable
+.vp9_sad16x8x3_ssse3_jumptable:
+ dd .vp9_sad16x8x3_ssse3_aligned_by_0 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_1 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_2 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_3 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_4 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_5 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_6 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_7 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_8 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_9 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_10 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_11 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_12 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_13 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_14 - .vp9_sad16x8x3_ssse3_do_jump
+ dd .vp9_sad16x8x3_ssse3_aligned_by_15 - .vp9_sad16x8x3_ssse3_do_jump
+.vp9_sad16x8x3_ssse3_skiptable:
+
+ call .vp9_sad16x8x3_ssse3_do_jump
+.vp9_sad16x8x3_ssse3_do_jump:
+ pop rcx ; get the address of do_jump
+ mov rax, .vp9_sad16x8x3_ssse3_jumptable - .vp9_sad16x8x3_ssse3_do_jump
+ add rax, rcx ; get the absolute address of vp9_sad16x8x3_ssse3_jumptable
+
+ movsxd rax, dword [rax + 4*rdx] ; get the 32 bit offset from the jumptable
+ add rcx, rax
+
+ movsxd rax, dword ptr arg(1) ;src_stride
+ movsxd rdx, dword ptr arg(3) ;ref_stride
+
+ jmp rcx
+
+ PROCESS_16X8X3_OFFSET 0, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 1, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 2, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 3, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 4, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 5, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 6, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 7, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 8, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 9, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 10, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 11, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 12, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 13, .vp9_sad16x8x3_ssse3
+ PROCESS_16X8X3_OFFSET 14, .vp9_sad16x8x3_ssse3
+
+.vp9_sad16x8x3_ssse3_aligned_by_15:
+
+ PROCESS_16X2X3 1
+ PROCESS_16X2X3 0
+ PROCESS_16X2X3 0
+ PROCESS_16X2X3 0
+
+.vp9_sad16x8x3_ssse3_store_off:
+ mov rdi, arg(4) ;Results
+
+ movq xmm0, xmm5
+ psrldq xmm5, 8
+
+ paddw xmm0, xmm5
+ movd [rdi], xmm0
+;-
+ movq xmm0, xmm6
+ psrldq xmm6, 8
+
+ paddw xmm0, xmm6
+ movd [rdi+4], xmm0
+;-
+ movq xmm0, xmm7
+ psrldq xmm7, 8
+
+ paddw xmm0, xmm7
+ movd [rdi+8], xmm0
+
+ ; begin epilog
+ pop rcx
+ pop rdi
+ pop rsi
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
diff --git a/libvpx/vp9/encoder/x86/vp9_ssim_opt.asm b/libvpx/vp9/encoder/x86/vp9_ssim_opt.asm
new file mode 100644
index 0000000..455d10d
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_ssim_opt.asm
@@ -0,0 +1,216 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+%include "vpx_ports/x86_abi_support.asm"
+
+; tabulate_ssim - sums sum_s,sum_r,sum_sq_s,sum_sq_r, sum_sxr
+%macro TABULATE_SSIM 0
+ paddusw xmm15, xmm3 ; sum_s
+ paddusw xmm14, xmm4 ; sum_r
+ movdqa xmm1, xmm3
+ pmaddwd xmm1, xmm1
+ paddd xmm13, xmm1 ; sum_sq_s
+ movdqa xmm2, xmm4
+ pmaddwd xmm2, xmm2
+ paddd xmm12, xmm2 ; sum_sq_r
+ pmaddwd xmm3, xmm4
+ paddd xmm11, xmm3 ; sum_sxr
+%endmacro
+
+; Sum across the register %1 starting with q words
+%macro SUM_ACROSS_Q 1
+ movdqa xmm2,%1
+ punpckldq %1,xmm0
+ punpckhdq xmm2,xmm0
+ paddq %1,xmm2
+ movdqa xmm2,%1
+ punpcklqdq %1,xmm0
+ punpckhqdq xmm2,xmm0
+ paddq %1,xmm2
+%endmacro
+
+; Sum across the register %1 starting with q words
+%macro SUM_ACROSS_W 1
+ movdqa xmm1, %1
+ punpcklwd %1,xmm0
+ punpckhwd xmm1,xmm0
+ paddd %1, xmm1
+ SUM_ACROSS_Q %1
+%endmacro
+;void ssim_parms_sse2(
+; unsigned char *s,
+; int sp,
+; unsigned char *r,
+; int rp
+; unsigned long *sum_s,
+; unsigned long *sum_r,
+; unsigned long *sum_sq_s,
+; unsigned long *sum_sq_r,
+; unsigned long *sum_sxr);
+;
+; TODO: Use parm passing through structure, probably don't need the pxors
+; ( calling app will initialize to 0 ) could easily fit everything in sse2
+; without too much hastle, and can probably do better estimates with psadw
+; or pavgb At this point this is just meant to be first pass for calculating
+; all the parms needed for 16x16 ssim so we can play with dssim as distortion
+; in mode selection code.
+global sym(vp9_ssim_parms_16x16_sse2) PRIVATE
+sym(vp9_ssim_parms_16x16_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 9
+ SAVE_XMM 15
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;s
+ mov rcx, arg(1) ;sp
+ mov rdi, arg(2) ;r
+ mov rax, arg(3) ;rp
+
+ pxor xmm0, xmm0
+ pxor xmm15,xmm15 ;sum_s
+ pxor xmm14,xmm14 ;sum_r
+ pxor xmm13,xmm13 ;sum_sq_s
+ pxor xmm12,xmm12 ;sum_sq_r
+ pxor xmm11,xmm11 ;sum_sxr
+
+ mov rdx, 16 ;row counter
+.NextRow:
+
+ ;grab source and reference pixels
+ movdqu xmm5, [rsi]
+ movdqu xmm6, [rdi]
+ movdqa xmm3, xmm5
+ movdqa xmm4, xmm6
+ punpckhbw xmm3, xmm0 ; high_s
+ punpckhbw xmm4, xmm0 ; high_r
+
+ TABULATE_SSIM
+
+ movdqa xmm3, xmm5
+ movdqa xmm4, xmm6
+ punpcklbw xmm3, xmm0 ; low_s
+ punpcklbw xmm4, xmm0 ; low_r
+
+ TABULATE_SSIM
+
+ add rsi, rcx ; next s row
+ add rdi, rax ; next r row
+
+ dec rdx ; counter
+ jnz .NextRow
+
+ SUM_ACROSS_W xmm15
+ SUM_ACROSS_W xmm14
+ SUM_ACROSS_Q xmm13
+ SUM_ACROSS_Q xmm12
+ SUM_ACROSS_Q xmm11
+
+ mov rdi,arg(4)
+ movd [rdi], xmm15;
+ mov rdi,arg(5)
+ movd [rdi], xmm14;
+ mov rdi,arg(6)
+ movd [rdi], xmm13;
+ mov rdi,arg(7)
+ movd [rdi], xmm12;
+ mov rdi,arg(8)
+ movd [rdi], xmm11;
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void ssim_parms_sse2(
+; unsigned char *s,
+; int sp,
+; unsigned char *r,
+; int rp
+; unsigned long *sum_s,
+; unsigned long *sum_r,
+; unsigned long *sum_sq_s,
+; unsigned long *sum_sq_r,
+; unsigned long *sum_sxr);
+;
+; TODO: Use parm passing through structure, probably don't need the pxors
+; ( calling app will initialize to 0 ) could easily fit everything in sse2
+; without too much hastle, and can probably do better estimates with psadw
+; or pavgb At this point this is just meant to be first pass for calculating
+; all the parms needed for 16x16 ssim so we can play with dssim as distortion
+; in mode selection code.
+global sym(vp9_ssim_parms_8x8_sse2) PRIVATE
+sym(vp9_ssim_parms_8x8_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 9
+ SAVE_XMM 15
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;s
+ mov rcx, arg(1) ;sp
+ mov rdi, arg(2) ;r
+ mov rax, arg(3) ;rp
+
+ pxor xmm0, xmm0
+ pxor xmm15,xmm15 ;sum_s
+ pxor xmm14,xmm14 ;sum_r
+ pxor xmm13,xmm13 ;sum_sq_s
+ pxor xmm12,xmm12 ;sum_sq_r
+ pxor xmm11,xmm11 ;sum_sxr
+
+ mov rdx, 8 ;row counter
+.NextRow:
+
+ ;grab source and reference pixels
+ movq xmm3, [rsi]
+ movq xmm4, [rdi]
+ punpcklbw xmm3, xmm0 ; low_s
+ punpcklbw xmm4, xmm0 ; low_r
+
+ TABULATE_SSIM
+
+ add rsi, rcx ; next s row
+ add rdi, rax ; next r row
+
+ dec rdx ; counter
+ jnz .NextRow
+
+ SUM_ACROSS_W xmm15
+ SUM_ACROSS_W xmm14
+ SUM_ACROSS_Q xmm13
+ SUM_ACROSS_Q xmm12
+ SUM_ACROSS_Q xmm11
+
+ mov rdi,arg(4)
+ movd [rdi], xmm15;
+ mov rdi,arg(5)
+ movd [rdi], xmm14;
+ mov rdi,arg(6)
+ movd [rdi], xmm13;
+ mov rdi,arg(7)
+ movd [rdi], xmm12;
+ mov rdi,arg(8)
+ movd [rdi], xmm11;
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
diff --git a/libvpx/vp9/encoder/x86/vp9_subpel_variance_impl_sse2.asm b/libvpx/vp9/encoder/x86/vp9_subpel_variance_impl_sse2.asm
new file mode 100644
index 0000000..8a2a471
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_subpel_variance_impl_sse2.asm
@@ -0,0 +1,645 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+%define xmm_filter_shift 7
+
+;void vp9_filter_block2d_bil_var_sse2
+;(
+; unsigned char *ref_ptr,
+; int ref_pixels_per_line,
+; unsigned char *src_ptr,
+; int src_pixels_per_line,
+; unsigned int Height,
+; int xoffset,
+; int yoffset,
+; int *sum,
+; unsigned int *sumsquared;;
+;
+;)
+global sym(vp9_filter_block2d_bil_var_sse2) PRIVATE
+sym(vp9_filter_block2d_bil_var_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 9
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ push rbx
+ ; end prolog
+
+ pxor xmm6, xmm6 ;
+ pxor xmm7, xmm7 ;
+
+ lea rsi, [GLOBAL(xmm_bi_rd)] ; rounding
+ movdqa xmm4, XMMWORD PTR [rsi]
+
+ lea rcx, [GLOBAL(bilinear_filters_sse2)]
+ movsxd rax, dword ptr arg(5) ; xoffset
+
+ cmp rax, 0 ; skip first_pass filter if xoffset=0
+ je filter_block2d_bil_var_sse2_sp_only
+
+ shl rax, 5 ; point to filter coeff with xoffset
+ lea rax, [rax + rcx] ; HFilter
+
+ movsxd rdx, dword ptr arg(6) ; yoffset
+
+ cmp rdx, 0 ; skip second_pass filter if yoffset=0
+ je filter_block2d_bil_var_sse2_fp_only
+
+ shl rdx, 5
+ lea rdx, [rdx + rcx] ; VFilter
+
+ mov rsi, arg(0) ;ref_ptr
+ mov rdi, arg(2) ;src_ptr
+ movsxd rcx, dword ptr arg(4) ;Height
+
+ pxor xmm0, xmm0 ;
+ movq xmm1, QWORD PTR [rsi] ;
+ movq xmm3, QWORD PTR [rsi+1] ;
+
+ punpcklbw xmm1, xmm0 ;
+ pmullw xmm1, [rax] ;
+ punpcklbw xmm3, xmm0
+ pmullw xmm3, [rax+16] ;
+
+ paddw xmm1, xmm3 ;
+ paddw xmm1, xmm4 ;
+ psraw xmm1, xmm_filter_shift ;
+ movdqa xmm5, xmm1
+
+ movsxd rbx, dword ptr arg(1) ;ref_pixels_per_line
+ lea rsi, [rsi + rbx]
+%if ABI_IS_32BIT=0
+ movsxd r9, dword ptr arg(3) ;src_pixels_per_line
+%endif
+
+filter_block2d_bil_var_sse2_loop:
+ movq xmm1, QWORD PTR [rsi] ;
+ movq xmm3, QWORD PTR [rsi+1] ;
+
+ punpcklbw xmm1, xmm0 ;
+ pmullw xmm1, [rax] ;
+ punpcklbw xmm3, xmm0 ;
+ pmullw xmm3, [rax+16] ;
+
+ paddw xmm1, xmm3 ;
+ paddw xmm1, xmm4 ;
+ psraw xmm1, xmm_filter_shift ;
+
+ movdqa xmm3, xmm5 ;
+ movdqa xmm5, xmm1 ;
+
+ pmullw xmm3, [rdx] ;
+ pmullw xmm1, [rdx+16] ;
+ paddw xmm1, xmm3 ;
+ paddw xmm1, xmm4 ;
+ psraw xmm1, xmm_filter_shift ;
+
+ movq xmm3, QWORD PTR [rdi] ;
+ punpcklbw xmm3, xmm0 ;
+
+ psubw xmm1, xmm3 ;
+ paddw xmm6, xmm1 ;
+
+ pmaddwd xmm1, xmm1 ;
+ paddd xmm7, xmm1 ;
+
+ lea rsi, [rsi + rbx] ;ref_pixels_per_line
+%if ABI_IS_32BIT
+ add rdi, dword ptr arg(3) ;src_pixels_per_line
+%else
+ lea rdi, [rdi + r9]
+%endif
+
+ sub rcx, 1 ;
+ jnz filter_block2d_bil_var_sse2_loop ;
+
+ jmp filter_block2d_bil_variance
+
+filter_block2d_bil_var_sse2_sp_only:
+ movsxd rdx, dword ptr arg(6) ; yoffset
+
+ cmp rdx, 0 ; skip all if both xoffset=0 and yoffset=0
+ je filter_block2d_bil_var_sse2_full_pixel
+
+ shl rdx, 5
+ lea rdx, [rdx + rcx] ; VFilter
+
+ mov rsi, arg(0) ;ref_ptr
+ mov rdi, arg(2) ;src_ptr
+ movsxd rcx, dword ptr arg(4) ;Height
+ movsxd rax, dword ptr arg(1) ;ref_pixels_per_line
+
+ pxor xmm0, xmm0 ;
+ movq xmm1, QWORD PTR [rsi] ;
+ punpcklbw xmm1, xmm0 ;
+
+ movsxd rbx, dword ptr arg(3) ;src_pixels_per_line
+ lea rsi, [rsi + rax]
+
+filter_block2d_bil_sp_only_loop:
+ movq xmm3, QWORD PTR [rsi] ;
+ punpcklbw xmm3, xmm0 ;
+ movdqa xmm5, xmm3
+
+ pmullw xmm1, [rdx] ;
+ pmullw xmm3, [rdx+16] ;
+ paddw xmm1, xmm3 ;
+ paddw xmm1, xmm4 ;
+ psraw xmm1, xmm_filter_shift ;
+
+ movq xmm3, QWORD PTR [rdi] ;
+ punpcklbw xmm3, xmm0 ;
+
+ psubw xmm1, xmm3 ;
+ paddw xmm6, xmm1 ;
+
+ pmaddwd xmm1, xmm1 ;
+ paddd xmm7, xmm1 ;
+
+ movdqa xmm1, xmm5 ;
+ lea rsi, [rsi + rax] ;ref_pixels_per_line
+ lea rdi, [rdi + rbx] ;src_pixels_per_line
+
+ sub rcx, 1 ;
+ jnz filter_block2d_bil_sp_only_loop ;
+
+ jmp filter_block2d_bil_variance
+
+filter_block2d_bil_var_sse2_full_pixel:
+ mov rsi, arg(0) ;ref_ptr
+ mov rdi, arg(2) ;src_ptr
+ movsxd rcx, dword ptr arg(4) ;Height
+ movsxd rax, dword ptr arg(1) ;ref_pixels_per_line
+ movsxd rbx, dword ptr arg(3) ;src_pixels_per_line
+ pxor xmm0, xmm0 ;
+
+filter_block2d_bil_full_pixel_loop:
+ movq xmm1, QWORD PTR [rsi] ;
+ punpcklbw xmm1, xmm0 ;
+
+ movq xmm2, QWORD PTR [rdi] ;
+ punpcklbw xmm2, xmm0 ;
+
+ psubw xmm1, xmm2 ;
+ paddw xmm6, xmm1 ;
+
+ pmaddwd xmm1, xmm1 ;
+ paddd xmm7, xmm1 ;
+
+ lea rsi, [rsi + rax] ;ref_pixels_per_line
+ lea rdi, [rdi + rbx] ;src_pixels_per_line
+
+ sub rcx, 1 ;
+ jnz filter_block2d_bil_full_pixel_loop ;
+
+ jmp filter_block2d_bil_variance
+
+filter_block2d_bil_var_sse2_fp_only:
+ mov rsi, arg(0) ;ref_ptr
+ mov rdi, arg(2) ;src_ptr
+ movsxd rcx, dword ptr arg(4) ;Height
+ movsxd rdx, dword ptr arg(1) ;ref_pixels_per_line
+
+ pxor xmm0, xmm0 ;
+ movsxd rbx, dword ptr arg(3) ;src_pixels_per_line
+
+filter_block2d_bil_fp_only_loop:
+ movq xmm1, QWORD PTR [rsi] ;
+ movq xmm3, QWORD PTR [rsi+1] ;
+
+ punpcklbw xmm1, xmm0 ;
+ pmullw xmm1, [rax] ;
+ punpcklbw xmm3, xmm0 ;
+ pmullw xmm3, [rax+16] ;
+
+ paddw xmm1, xmm3 ;
+ paddw xmm1, xmm4 ;
+ psraw xmm1, xmm_filter_shift ;
+
+ movq xmm3, QWORD PTR [rdi] ;
+ punpcklbw xmm3, xmm0 ;
+
+ psubw xmm1, xmm3 ;
+ paddw xmm6, xmm1 ;
+
+ pmaddwd xmm1, xmm1 ;
+ paddd xmm7, xmm1 ;
+ lea rsi, [rsi + rdx]
+ lea rdi, [rdi + rbx] ;src_pixels_per_line
+
+ sub rcx, 1 ;
+ jnz filter_block2d_bil_fp_only_loop ;
+
+ jmp filter_block2d_bil_variance
+
+filter_block2d_bil_variance:
+ movdq2q mm6, xmm6 ;
+ movdq2q mm7, xmm7 ;
+
+ psrldq xmm6, 8
+ psrldq xmm7, 8
+
+ movdq2q mm2, xmm6
+ movdq2q mm3, xmm7
+
+ paddw mm6, mm2
+ paddd mm7, mm3
+
+ pxor mm3, mm3 ;
+ pxor mm2, mm2 ;
+
+ punpcklwd mm2, mm6 ;
+ punpckhwd mm3, mm6 ;
+
+ paddd mm2, mm3 ;
+ movq mm6, mm2 ;
+
+ psrlq mm6, 32 ;
+ paddd mm2, mm6 ;
+
+ psrad mm2, 16 ;
+ movq mm4, mm7 ;
+
+ psrlq mm4, 32 ;
+ paddd mm4, mm7 ;
+
+ mov rsi, arg(7) ; sum
+ mov rdi, arg(8) ; sumsquared
+
+ movd [rsi], mm2 ; xsum
+ movd [rdi], mm4 ; xxsum
+
+ ; begin epilog
+ pop rbx
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+
+;void vp9_half_horiz_vert_variance16x_h_sse2
+;(
+; unsigned char *ref_ptr,
+; int ref_pixels_per_line,
+; unsigned char *src_ptr,
+; int src_pixels_per_line,
+; unsigned int Height,
+; int *sum,
+; unsigned int *sumsquared
+;)
+global sym(vp9_half_horiz_vert_variance16x_h_sse2) PRIVATE
+sym(vp9_half_horiz_vert_variance16x_h_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 7
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ pxor xmm6, xmm6 ; error accumulator
+ pxor xmm7, xmm7 ; sse eaccumulator
+ mov rsi, arg(0) ;ref_ptr ;
+
+ mov rdi, arg(2) ;src_ptr ;
+ movsxd rcx, dword ptr arg(4) ;Height ;
+ movsxd rax, dword ptr arg(1) ;ref_pixels_per_line
+ movsxd rdx, dword ptr arg(3) ;src_pixels_per_line
+
+ pxor xmm0, xmm0 ;
+
+ movdqu xmm5, XMMWORD PTR [rsi]
+ movdqu xmm3, XMMWORD PTR [rsi+1]
+ pavgb xmm5, xmm3 ; xmm5 = avg(xmm1,xmm3) horizontal line 1
+
+ lea rsi, [rsi + rax]
+
+.half_horiz_vert_variance16x_h_1:
+ movdqu xmm1, XMMWORD PTR [rsi] ;
+ movdqu xmm2, XMMWORD PTR [rsi+1] ;
+ pavgb xmm1, xmm2 ; xmm1 = avg(xmm1,xmm3) horizontal line i+1
+
+ pavgb xmm5, xmm1 ; xmm = vertical average of the above
+
+ movdqa xmm4, xmm5
+ punpcklbw xmm5, xmm0 ; xmm5 = words of above
+ punpckhbw xmm4, xmm0
+
+ movq xmm3, QWORD PTR [rdi] ; xmm3 = d0,d1,d2..d7
+ punpcklbw xmm3, xmm0 ; xmm3 = words of above
+ psubw xmm5, xmm3 ; xmm5 -= xmm3
+
+ movq xmm3, QWORD PTR [rdi+8]
+ punpcklbw xmm3, xmm0
+ psubw xmm4, xmm3
+
+ paddw xmm6, xmm5 ; xmm6 += accumulated column differences
+ paddw xmm6, xmm4
+ pmaddwd xmm5, xmm5 ; xmm5 *= xmm5
+ pmaddwd xmm4, xmm4
+ paddd xmm7, xmm5 ; xmm7 += accumulated square column differences
+ paddd xmm7, xmm4
+
+ movdqa xmm5, xmm1 ; save xmm1 for use on the next row
+
+ lea rsi, [rsi + rax]
+ lea rdi, [rdi + rdx]
+
+ sub rcx, 1 ;
+ jnz .half_horiz_vert_variance16x_h_1 ;
+
+ pxor xmm1, xmm1
+ pxor xmm5, xmm5
+
+ punpcklwd xmm0, xmm6
+ punpckhwd xmm1, xmm6
+ psrad xmm0, 16
+ psrad xmm1, 16
+ paddd xmm0, xmm1
+ movdqa xmm1, xmm0
+
+ movdqa xmm6, xmm7
+ punpckldq xmm6, xmm5
+ punpckhdq xmm7, xmm5
+ paddd xmm6, xmm7
+
+ punpckldq xmm0, xmm5
+ punpckhdq xmm1, xmm5
+ paddd xmm0, xmm1
+
+ movdqa xmm7, xmm6
+ movdqa xmm1, xmm0
+
+ psrldq xmm7, 8
+ psrldq xmm1, 8
+
+ paddd xmm6, xmm7
+ paddd xmm0, xmm1
+
+ mov rsi, arg(5) ;[Sum]
+ mov rdi, arg(6) ;[SSE]
+
+ movd [rsi], xmm0
+ movd [rdi], xmm6
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp9_half_vert_variance16x_h_sse2
+;(
+; unsigned char *ref_ptr,
+; int ref_pixels_per_line,
+; unsigned char *src_ptr,
+; int src_pixels_per_line,
+; unsigned int Height,
+; int *sum,
+; unsigned int *sumsquared
+;)
+global sym(vp9_half_vert_variance16x_h_sse2) PRIVATE
+sym(vp9_half_vert_variance16x_h_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 7
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ pxor xmm6, xmm6 ; error accumulator
+ pxor xmm7, xmm7 ; sse eaccumulator
+ mov rsi, arg(0) ;ref_ptr
+
+ mov rdi, arg(2) ;src_ptr
+ movsxd rcx, dword ptr arg(4) ;Height
+ movsxd rax, dword ptr arg(1) ;ref_pixels_per_line
+ movsxd rdx, dword ptr arg(3) ;src_pixels_per_line
+
+ movdqu xmm5, XMMWORD PTR [rsi]
+ lea rsi, [rsi + rax ]
+ pxor xmm0, xmm0
+
+.half_vert_variance16x_h_1:
+ movdqu xmm3, XMMWORD PTR [rsi]
+
+ pavgb xmm5, xmm3 ; xmm5 = avg(xmm1,xmm3)
+ movdqa xmm4, xmm5
+ punpcklbw xmm5, xmm0
+ punpckhbw xmm4, xmm0
+
+ movq xmm2, QWORD PTR [rdi]
+ punpcklbw xmm2, xmm0
+ psubw xmm5, xmm2
+ movq xmm2, QWORD PTR [rdi+8]
+ punpcklbw xmm2, xmm0
+ psubw xmm4, xmm2
+
+ paddw xmm6, xmm5 ; xmm6 += accumulated column differences
+ paddw xmm6, xmm4
+ pmaddwd xmm5, xmm5 ; xmm5 *= xmm5
+ pmaddwd xmm4, xmm4
+ paddd xmm7, xmm5 ; xmm7 += accumulated square column differences
+ paddd xmm7, xmm4
+
+ movdqa xmm5, xmm3
+
+ lea rsi, [rsi + rax]
+ lea rdi, [rdi + rdx]
+
+ sub rcx, 1
+ jnz .half_vert_variance16x_h_1
+
+ pxor xmm1, xmm1
+ pxor xmm5, xmm5
+
+ punpcklwd xmm0, xmm6
+ punpckhwd xmm1, xmm6
+ psrad xmm0, 16
+ psrad xmm1, 16
+ paddd xmm0, xmm1
+ movdqa xmm1, xmm0
+
+ movdqa xmm6, xmm7
+ punpckldq xmm6, xmm5
+ punpckhdq xmm7, xmm5
+ paddd xmm6, xmm7
+
+ punpckldq xmm0, xmm5
+ punpckhdq xmm1, xmm5
+ paddd xmm0, xmm1
+
+ movdqa xmm7, xmm6
+ movdqa xmm1, xmm0
+
+ psrldq xmm7, 8
+ psrldq xmm1, 8
+
+ paddd xmm6, xmm7
+ paddd xmm0, xmm1
+
+ mov rsi, arg(5) ;[Sum]
+ mov rdi, arg(6) ;[SSE]
+
+ movd [rsi], xmm0
+ movd [rdi], xmm6
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp9_half_horiz_variance16x_h_sse2
+;(
+; unsigned char *ref_ptr,
+; int ref_pixels_per_line,
+; unsigned char *src_ptr,
+; int src_pixels_per_line,
+; unsigned int Height,
+; int *sum,
+; unsigned int *sumsquared
+;)
+global sym(vp9_half_horiz_variance16x_h_sse2) PRIVATE
+sym(vp9_half_horiz_variance16x_h_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 7
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ pxor xmm6, xmm6 ; error accumulator
+ pxor xmm7, xmm7 ; sse eaccumulator
+ mov rsi, arg(0) ;ref_ptr ;
+
+ mov rdi, arg(2) ;src_ptr ;
+ movsxd rcx, dword ptr arg(4) ;Height ;
+ movsxd rax, dword ptr arg(1) ;ref_pixels_per_line
+ movsxd rdx, dword ptr arg(3) ;src_pixels_per_line
+
+ pxor xmm0, xmm0 ;
+
+.half_horiz_variance16x_h_1:
+ movdqu xmm5, XMMWORD PTR [rsi] ; xmm5 = s0,s1,s2..s15
+ movdqu xmm3, XMMWORD PTR [rsi+1] ; xmm3 = s1,s2,s3..s16
+
+ pavgb xmm5, xmm3 ; xmm5 = avg(xmm1,xmm3)
+ movdqa xmm1, xmm5
+ punpcklbw xmm5, xmm0 ; xmm5 = words of above
+ punpckhbw xmm1, xmm0
+
+ movq xmm3, QWORD PTR [rdi] ; xmm3 = d0,d1,d2..d7
+ punpcklbw xmm3, xmm0 ; xmm3 = words of above
+ movq xmm2, QWORD PTR [rdi+8]
+ punpcklbw xmm2, xmm0
+
+ psubw xmm5, xmm3 ; xmm5 -= xmm3
+ psubw xmm1, xmm2
+ paddw xmm6, xmm5 ; xmm6 += accumulated column differences
+ paddw xmm6, xmm1
+ pmaddwd xmm5, xmm5 ; xmm5 *= xmm5
+ pmaddwd xmm1, xmm1
+ paddd xmm7, xmm5 ; xmm7 += accumulated square column differences
+ paddd xmm7, xmm1
+
+ lea rsi, [rsi + rax]
+ lea rdi, [rdi + rdx]
+
+ sub rcx, 1 ;
+ jnz .half_horiz_variance16x_h_1 ;
+
+ pxor xmm1, xmm1
+ pxor xmm5, xmm5
+
+ punpcklwd xmm0, xmm6
+ punpckhwd xmm1, xmm6
+ psrad xmm0, 16
+ psrad xmm1, 16
+ paddd xmm0, xmm1
+ movdqa xmm1, xmm0
+
+ movdqa xmm6, xmm7
+ punpckldq xmm6, xmm5
+ punpckhdq xmm7, xmm5
+ paddd xmm6, xmm7
+
+ punpckldq xmm0, xmm5
+ punpckhdq xmm1, xmm5
+ paddd xmm0, xmm1
+
+ movdqa xmm7, xmm6
+ movdqa xmm1, xmm0
+
+ psrldq xmm7, 8
+ psrldq xmm1, 8
+
+ paddd xmm6, xmm7
+ paddd xmm0, xmm1
+
+ mov rsi, arg(5) ;[Sum]
+ mov rdi, arg(6) ;[SSE]
+
+ movd [rsi], xmm0
+ movd [rdi], xmm6
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+SECTION_RODATA
+; short xmm_bi_rd[8] = { 64, 64, 64, 64,64, 64, 64, 64};
+align 16
+xmm_bi_rd:
+ times 8 dw 64
+align 16
+bilinear_filters_sse2:
+ dw 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0
+ dw 120, 120, 120, 120, 120, 120, 120, 120, 8, 8, 8, 8, 8, 8, 8, 8
+ dw 112, 112, 112, 112, 112, 112, 112, 112, 16, 16, 16, 16, 16, 16, 16, 16
+ dw 104, 104, 104, 104, 104, 104, 104, 104, 24, 24, 24, 24, 24, 24, 24, 24
+ dw 96, 96, 96, 96, 96, 96, 96, 96, 32, 32, 32, 32, 32, 32, 32, 32
+ dw 88, 88, 88, 88, 88, 88, 88, 88, 40, 40, 40, 40, 40, 40, 40, 40
+ dw 80, 80, 80, 80, 80, 80, 80, 80, 48, 48, 48, 48, 48, 48, 48, 48
+ dw 72, 72, 72, 72, 72, 72, 72, 72, 56, 56, 56, 56, 56, 56, 56, 56
+ dw 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64
+ dw 56, 56, 56, 56, 56, 56, 56, 56, 72, 72, 72, 72, 72, 72, 72, 72
+ dw 48, 48, 48, 48, 48, 48, 48, 48, 80, 80, 80, 80, 80, 80, 80, 80
+ dw 40, 40, 40, 40, 40, 40, 40, 40, 88, 88, 88, 88, 88, 88, 88, 88
+ dw 32, 32, 32, 32, 32, 32, 32, 32, 96, 96, 96, 96, 96, 96, 96, 96
+ dw 24, 24, 24, 24, 24, 24, 24, 24, 104, 104, 104, 104, 104, 104, 104, 104
+ dw 16, 16, 16, 16, 16, 16, 16, 16, 112, 112, 112, 112, 112, 112, 112, 112
+ dw 8, 8, 8, 8, 8, 8, 8, 8, 120, 120, 120, 120, 120, 120, 120, 120
diff --git a/libvpx/vp9/encoder/x86/vp9_subtract_mmx.asm b/libvpx/vp9/encoder/x86/vp9_subtract_mmx.asm
new file mode 100644
index 0000000..e9eda4f
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_subtract_mmx.asm
@@ -0,0 +1,432 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+;void vp9_subtract_b_mmx_impl(unsigned char *z, int src_stride,
+; short *diff, unsigned char *Predictor,
+; int pitch);
+global sym(vp9_subtract_b_mmx_impl) PRIVATE
+sym(vp9_subtract_b_mmx_impl):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ push rsi
+ push rdi
+ ; end prolog
+
+
+ mov rdi, arg(2) ;diff
+ mov rax, arg(3) ;Predictor
+ mov rsi, arg(0) ;z
+ movsxd rdx, dword ptr arg(1);src_stride;
+ movsxd rcx, dword ptr arg(4);pitch
+ pxor mm7, mm7
+
+ movd mm0, [rsi]
+ movd mm1, [rax]
+ punpcklbw mm0, mm7
+ punpcklbw mm1, mm7
+ psubw mm0, mm1
+ movq [rdi], mm0
+
+
+ movd mm0, [rsi+rdx]
+ movd mm1, [rax+rcx]
+ punpcklbw mm0, mm7
+ punpcklbw mm1, mm7
+ psubw mm0, mm1
+ movq [rdi+rcx*2],mm0
+
+
+ movd mm0, [rsi+rdx*2]
+ movd mm1, [rax+rcx*2]
+ punpcklbw mm0, mm7
+ punpcklbw mm1, mm7
+ psubw mm0, mm1
+ movq [rdi+rcx*4], mm0
+
+ lea rsi, [rsi+rdx*2]
+ lea rcx, [rcx+rcx*2]
+
+
+
+ movd mm0, [rsi+rdx]
+ movd mm1, [rax+rcx]
+ punpcklbw mm0, mm7
+ punpcklbw mm1, mm7
+ psubw mm0, mm1
+ movq [rdi+rcx*2], mm0
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp9_subtract_mby_mmx(short *diff, unsigned char *src, unsigned char *pred, int stride)
+global sym(vp9_subtract_mby_mmx) PRIVATE
+sym(vp9_subtract_mby_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ push rsi
+ push rdi
+ ; end prolog
+
+
+ mov rsi, arg(1) ;src
+ mov rdi, arg(0) ;diff
+
+ mov rax, arg(2) ;pred
+ movsxd rdx, dword ptr arg(3) ;stride
+
+ mov rcx, 16
+ pxor mm0, mm0
+
+.submby_loop:
+
+ movq mm1, [rsi]
+ movq mm3, [rax]
+
+ movq mm2, mm1
+ movq mm4, mm3
+
+ punpcklbw mm1, mm0
+ punpcklbw mm3, mm0
+
+ punpckhbw mm2, mm0
+ punpckhbw mm4, mm0
+
+ psubw mm1, mm3
+ psubw mm2, mm4
+
+ movq [rdi], mm1
+ movq [rdi+8], mm2
+
+
+ movq mm1, [rsi+8]
+ movq mm3, [rax+8]
+
+ movq mm2, mm1
+ movq mm4, mm3
+
+ punpcklbw mm1, mm0
+ punpcklbw mm3, mm0
+
+ punpckhbw mm2, mm0
+ punpckhbw mm4, mm0
+
+ psubw mm1, mm3
+ psubw mm2, mm4
+
+ movq [rdi+16], mm1
+ movq [rdi+24], mm2
+
+
+ add rdi, 32
+ add rax, 16
+
+ lea rsi, [rsi+rdx]
+
+ sub rcx, 1
+ jnz .submby_loop
+
+ pop rdi
+ pop rsi
+ ; begin epilog
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp9_subtract_mbuv_mmx(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride)
+global sym(vp9_subtract_mbuv_mmx) PRIVATE
+sym(vp9_subtract_mbuv_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ push rsi
+ push rdi
+ ; end prolog
+
+ ;short *udiff = diff + 256;
+ ;short *vdiff = diff + 320;
+ ;unsigned char *upred = pred + 256;
+ ;unsigned char *vpred = pred + 320;
+
+ ;unsigned char *z = usrc;
+ ;unsigned short *diff = udiff;
+ ;unsigned char *Predictor= upred;
+
+ mov rdi, arg(0) ;diff
+ mov rax, arg(3) ;pred
+ mov rsi, arg(1) ;z = usrc
+ add rdi, 256*2 ;diff = diff + 256 (shorts)
+ add rax, 256 ;Predictor = pred + 256
+ movsxd rdx, dword ptr arg(4) ;stride;
+ pxor mm7, mm7
+
+ movq mm0, [rsi]
+ movq mm1, [rax]
+ movq mm3, mm0
+ movq mm4, mm1
+ punpcklbw mm0, mm7
+ punpcklbw mm1, mm7
+ punpckhbw mm3, mm7
+ punpckhbw mm4, mm7
+ psubw mm0, mm1
+ psubw mm3, mm4
+ movq [rdi], mm0
+ movq [rdi+8], mm3
+
+
+ movq mm0, [rsi+rdx]
+ movq mm1, [rax+8]
+ movq mm3, mm0
+ movq mm4, mm1
+ punpcklbw mm0, mm7
+ punpcklbw mm1, mm7
+ punpckhbw mm3, mm7
+ punpckhbw mm4, mm7
+ psubw mm0, mm1
+ psubw mm3, mm4
+ movq [rdi+16], mm0
+ movq [rdi+24], mm3
+
+ movq mm0, [rsi+rdx*2]
+ movq mm1, [rax+16]
+ movq mm3, mm0
+ movq mm4, mm1
+ punpcklbw mm0, mm7
+ punpcklbw mm1, mm7
+ punpckhbw mm3, mm7
+ punpckhbw mm4, mm7
+ psubw mm0, mm1
+ psubw mm3, mm4
+ movq [rdi+32], mm0
+ movq [rdi+40], mm3
+ lea rsi, [rsi+rdx*2]
+
+
+ movq mm0, [rsi+rdx]
+ movq mm1, [rax+24]
+ movq mm3, mm0
+ movq mm4, mm1
+ punpcklbw mm0, mm7
+ punpcklbw mm1, mm7
+ punpckhbw mm3, mm7
+ punpckhbw mm4, mm7
+ psubw mm0, mm1
+ psubw mm3, mm4
+
+ movq [rdi+48], mm0
+ movq [rdi+56], mm3
+
+
+ add rdi, 64
+ add rax, 32
+ lea rsi, [rsi+rdx*2]
+
+
+ movq mm0, [rsi]
+ movq mm1, [rax]
+ movq mm3, mm0
+ movq mm4, mm1
+ punpcklbw mm0, mm7
+ punpcklbw mm1, mm7
+ punpckhbw mm3, mm7
+ punpckhbw mm4, mm7
+ psubw mm0, mm1
+ psubw mm3, mm4
+ movq [rdi], mm0
+ movq [rdi+8], mm3
+
+
+ movq mm0, [rsi+rdx]
+ movq mm1, [rax+8]
+ movq mm3, mm0
+ movq mm4, mm1
+ punpcklbw mm0, mm7
+ punpcklbw mm1, mm7
+ punpckhbw mm3, mm7
+ punpckhbw mm4, mm7
+ psubw mm0, mm1
+ psubw mm3, mm4
+ movq [rdi+16], mm0
+ movq [rdi+24], mm3
+
+ movq mm0, [rsi+rdx*2]
+ movq mm1, [rax+16]
+ movq mm3, mm0
+ movq mm4, mm1
+ punpcklbw mm0, mm7
+ punpcklbw mm1, mm7
+ punpckhbw mm3, mm7
+ punpckhbw mm4, mm7
+ psubw mm0, mm1
+ psubw mm3, mm4
+ movq [rdi+32], mm0
+ movq [rdi+40], mm3
+ lea rsi, [rsi+rdx*2]
+
+
+ movq mm0, [rsi+rdx]
+ movq mm1, [rax+24]
+ movq mm3, mm0
+ movq mm4, mm1
+ punpcklbw mm0, mm7
+ punpcklbw mm1, mm7
+ punpckhbw mm3, mm7
+ punpckhbw mm4, mm7
+ psubw mm0, mm1
+ psubw mm3, mm4
+
+ movq [rdi+48], mm0
+ movq [rdi+56], mm3
+
+ ;unsigned char *z = vsrc;
+ ;unsigned short *diff = vdiff;
+ ;unsigned char *Predictor= vpred;
+
+ mov rdi, arg(0) ;diff
+ mov rax, arg(3) ;pred
+ mov rsi, arg(2) ;z = usrc
+ add rdi, 320*2 ;diff = diff + 320 (shorts)
+ add rax, 320 ;Predictor = pred + 320
+ movsxd rdx, dword ptr arg(4) ;stride;
+ pxor mm7, mm7
+
+ movq mm0, [rsi]
+ movq mm1, [rax]
+ movq mm3, mm0
+ movq mm4, mm1
+ punpcklbw mm0, mm7
+ punpcklbw mm1, mm7
+ punpckhbw mm3, mm7
+ punpckhbw mm4, mm7
+ psubw mm0, mm1
+ psubw mm3, mm4
+ movq [rdi], mm0
+ movq [rdi+8], mm3
+
+
+ movq mm0, [rsi+rdx]
+ movq mm1, [rax+8]
+ movq mm3, mm0
+ movq mm4, mm1
+ punpcklbw mm0, mm7
+ punpcklbw mm1, mm7
+ punpckhbw mm3, mm7
+ punpckhbw mm4, mm7
+ psubw mm0, mm1
+ psubw mm3, mm4
+ movq [rdi+16], mm0
+ movq [rdi+24], mm3
+
+ movq mm0, [rsi+rdx*2]
+ movq mm1, [rax+16]
+ movq mm3, mm0
+ movq mm4, mm1
+ punpcklbw mm0, mm7
+ punpcklbw mm1, mm7
+ punpckhbw mm3, mm7
+ punpckhbw mm4, mm7
+ psubw mm0, mm1
+ psubw mm3, mm4
+ movq [rdi+32], mm0
+ movq [rdi+40], mm3
+ lea rsi, [rsi+rdx*2]
+
+
+ movq mm0, [rsi+rdx]
+ movq mm1, [rax+24]
+ movq mm3, mm0
+ movq mm4, mm1
+ punpcklbw mm0, mm7
+ punpcklbw mm1, mm7
+ punpckhbw mm3, mm7
+ punpckhbw mm4, mm7
+ psubw mm0, mm1
+ psubw mm3, mm4
+
+ movq [rdi+48], mm0
+ movq [rdi+56], mm3
+
+
+ add rdi, 64
+ add rax, 32
+ lea rsi, [rsi+rdx*2]
+
+
+ movq mm0, [rsi]
+ movq mm1, [rax]
+ movq mm3, mm0
+ movq mm4, mm1
+ punpcklbw mm0, mm7
+ punpcklbw mm1, mm7
+ punpckhbw mm3, mm7
+ punpckhbw mm4, mm7
+ psubw mm0, mm1
+ psubw mm3, mm4
+ movq [rdi], mm0
+ movq [rdi+8], mm3
+
+
+ movq mm0, [rsi+rdx]
+ movq mm1, [rax+8]
+ movq mm3, mm0
+ movq mm4, mm1
+ punpcklbw mm0, mm7
+ punpcklbw mm1, mm7
+ punpckhbw mm3, mm7
+ punpckhbw mm4, mm7
+ psubw mm0, mm1
+ psubw mm3, mm4
+ movq [rdi+16], mm0
+ movq [rdi+24], mm3
+
+ movq mm0, [rsi+rdx*2]
+ movq mm1, [rax+16]
+ movq mm3, mm0
+ movq mm4, mm1
+ punpcklbw mm0, mm7
+ punpcklbw mm1, mm7
+ punpckhbw mm3, mm7
+ punpckhbw mm4, mm7
+ psubw mm0, mm1
+ psubw mm3, mm4
+ movq [rdi+32], mm0
+ movq [rdi+40], mm3
+ lea rsi, [rsi+rdx*2]
+
+
+ movq mm0, [rsi+rdx]
+ movq mm1, [rax+24]
+ movq mm3, mm0
+ movq mm4, mm1
+ punpcklbw mm0, mm7
+ punpcklbw mm1, mm7
+ punpckhbw mm3, mm7
+ punpckhbw mm4, mm7
+ psubw mm0, mm1
+ psubw mm3, mm4
+
+ movq [rdi+48], mm0
+ movq [rdi+56], mm3
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
diff --git a/libvpx/vp9/encoder/x86/vp9_subtract_sse2.asm b/libvpx/vp9/encoder/x86/vp9_subtract_sse2.asm
new file mode 100644
index 0000000..739d948
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_subtract_sse2.asm
@@ -0,0 +1,356 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+;void vp9_subtract_b_sse2_impl(unsigned char *z, int src_stride,
+; short *diff, unsigned char *Predictor,
+; int pitch);
+global sym(vp9_subtract_b_sse2_impl) PRIVATE
+sym(vp9_subtract_b_sse2_impl):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rdi, arg(2) ;diff
+ mov rax, arg(3) ;Predictor
+ mov rsi, arg(0) ;z
+ movsxd rdx, dword ptr arg(1);src_stride;
+ movsxd rcx, dword ptr arg(4);pitch
+ pxor mm7, mm7
+
+ movd mm0, [rsi]
+ movd mm1, [rax]
+ punpcklbw mm0, mm7
+ punpcklbw mm1, mm7
+ psubw mm0, mm1
+ movq MMWORD PTR [rdi], mm0
+
+ movd mm0, [rsi+rdx]
+ movd mm1, [rax+rcx]
+ punpcklbw mm0, mm7
+ punpcklbw mm1, mm7
+ psubw mm0, mm1
+ movq MMWORD PTR [rdi+rcx*2], mm0
+
+ movd mm0, [rsi+rdx*2]
+ movd mm1, [rax+rcx*2]
+ punpcklbw mm0, mm7
+ punpcklbw mm1, mm7
+ psubw mm0, mm1
+ movq MMWORD PTR [rdi+rcx*4], mm0
+
+ lea rsi, [rsi+rdx*2]
+ lea rcx, [rcx+rcx*2]
+
+ movd mm0, [rsi+rdx]
+ movd mm1, [rax+rcx]
+ punpcklbw mm0, mm7
+ punpcklbw mm1, mm7
+ psubw mm0, mm1
+ movq MMWORD PTR [rdi+rcx*2], mm0
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp9_subtract_mby_sse2(short *diff, unsigned char *src, unsigned char *pred, int stride)
+global sym(vp9_subtract_mby_sse2) PRIVATE
+sym(vp9_subtract_mby_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(1) ;src
+ mov rdi, arg(0) ;diff
+
+ mov rax, arg(2) ;pred
+ movsxd rdx, dword ptr arg(3) ;stride
+
+ mov rcx, 8 ; do two lines at one time
+
+.submby_loop:
+ movdqa xmm0, XMMWORD PTR [rsi] ; src
+ movdqa xmm1, XMMWORD PTR [rax] ; pred
+
+ movdqa xmm2, xmm0
+ psubb xmm0, xmm1
+
+ pxor xmm1, [GLOBAL(t80)] ;convert to signed values
+ pxor xmm2, [GLOBAL(t80)]
+ pcmpgtb xmm1, xmm2 ; obtain sign information
+
+ movdqa xmm2, xmm0
+ movdqa xmm3, xmm1
+ punpcklbw xmm0, xmm1 ; put sign back to subtraction
+ punpckhbw xmm2, xmm3 ; put sign back to subtraction
+
+ movdqa XMMWORD PTR [rdi], xmm0
+ movdqa XMMWORD PTR [rdi +16], xmm2
+
+ movdqa xmm4, XMMWORD PTR [rsi + rdx]
+ movdqa xmm5, XMMWORD PTR [rax + 16]
+
+ movdqa xmm6, xmm4
+ psubb xmm4, xmm5
+
+ pxor xmm5, [GLOBAL(t80)] ;convert to signed values
+ pxor xmm6, [GLOBAL(t80)]
+ pcmpgtb xmm5, xmm6 ; obtain sign information
+
+ movdqa xmm6, xmm4
+ movdqa xmm7, xmm5
+ punpcklbw xmm4, xmm5 ; put sign back to subtraction
+ punpckhbw xmm6, xmm7 ; put sign back to subtraction
+
+ movdqa XMMWORD PTR [rdi +32], xmm4
+ movdqa XMMWORD PTR [rdi +48], xmm6
+
+ add rdi, 64
+ add rax, 32
+ lea rsi, [rsi+rdx*2]
+
+ sub rcx, 1
+ jnz .submby_loop
+
+ pop rdi
+ pop rsi
+ ; begin epilog
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp9_subtract_mbuv_sse2(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride)
+global sym(vp9_subtract_mbuv_sse2) PRIVATE
+sym(vp9_subtract_mbuv_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rdi, arg(0) ;diff
+ mov rax, arg(3) ;pred
+ mov rsi, arg(1) ;z = usrc
+ add rdi, 256*2 ;diff = diff + 256 (shorts)
+ add rax, 256 ;Predictor = pred + 256
+ movsxd rdx, dword ptr arg(4) ;stride;
+ lea rcx, [rdx + rdx*2]
+
+ ;u
+ ;line 0 1
+ movq xmm0, MMWORD PTR [rsi] ; src
+ movq xmm2, MMWORD PTR [rsi+rdx]
+ movdqa xmm1, XMMWORD PTR [rax] ; pred
+ punpcklqdq xmm0, xmm2
+
+ movdqa xmm2, xmm0
+ psubb xmm0, xmm1 ; subtraction with sign missed
+
+ pxor xmm1, [GLOBAL(t80)] ;convert to signed values
+ pxor xmm2, [GLOBAL(t80)]
+ pcmpgtb xmm1, xmm2 ; obtain sign information
+
+ movdqa xmm2, xmm0
+ movdqa xmm3, xmm1
+ punpcklbw xmm0, xmm1 ; put sign back to subtraction
+ punpckhbw xmm2, xmm3 ; put sign back to subtraction
+
+ movdqa XMMWORD PTR [rdi], xmm0
+ movdqa XMMWORD PTR [rdi +16], xmm2
+
+ ;line 2 3
+ movq xmm0, MMWORD PTR [rsi+rdx*2] ; src
+ movq xmm2, MMWORD PTR [rsi+rcx]
+ movdqa xmm1, XMMWORD PTR [rax+16] ; pred
+ punpcklqdq xmm0, xmm2
+
+ movdqa xmm2, xmm0
+ psubb xmm0, xmm1 ; subtraction with sign missed
+
+ pxor xmm1, [GLOBAL(t80)] ;convert to signed values
+ pxor xmm2, [GLOBAL(t80)]
+ pcmpgtb xmm1, xmm2 ; obtain sign information
+
+ movdqa xmm2, xmm0
+ movdqa xmm3, xmm1
+ punpcklbw xmm0, xmm1 ; put sign back to subtraction
+ punpckhbw xmm2, xmm3 ; put sign back to subtraction
+
+ movdqa XMMWORD PTR [rdi + 32], xmm0
+ movdqa XMMWORD PTR [rdi + 48], xmm2
+
+ ;line 4 5
+ lea rsi, [rsi + rdx*4]
+
+ movq xmm0, MMWORD PTR [rsi] ; src
+ movq xmm2, MMWORD PTR [rsi+rdx]
+ movdqa xmm1, XMMWORD PTR [rax + 32] ; pred
+ punpcklqdq xmm0, xmm2
+
+ movdqa xmm2, xmm0
+ psubb xmm0, xmm1 ; subtraction with sign missed
+
+ pxor xmm1, [GLOBAL(t80)] ;convert to signed values
+ pxor xmm2, [GLOBAL(t80)]
+ pcmpgtb xmm1, xmm2 ; obtain sign information
+
+ movdqa xmm2, xmm0
+ movdqa xmm3, xmm1
+ punpcklbw xmm0, xmm1 ; put sign back to subtraction
+ punpckhbw xmm2, xmm3 ; put sign back to subtraction
+
+ movdqa XMMWORD PTR [rdi + 64], xmm0
+ movdqa XMMWORD PTR [rdi + 80], xmm2
+
+ ;line 6 7
+ movq xmm0, MMWORD PTR [rsi+rdx*2] ; src
+ movq xmm2, MMWORD PTR [rsi+rcx]
+ movdqa xmm1, XMMWORD PTR [rax+ 48] ; pred
+ punpcklqdq xmm0, xmm2
+
+ movdqa xmm2, xmm0
+ psubb xmm0, xmm1 ; subtraction with sign missed
+
+ pxor xmm1, [GLOBAL(t80)] ;convert to signed values
+ pxor xmm2, [GLOBAL(t80)]
+ pcmpgtb xmm1, xmm2 ; obtain sign information
+
+ movdqa xmm2, xmm0
+ movdqa xmm3, xmm1
+ punpcklbw xmm0, xmm1 ; put sign back to subtraction
+ punpckhbw xmm2, xmm3 ; put sign back to subtraction
+
+ movdqa XMMWORD PTR [rdi + 96], xmm0
+ movdqa XMMWORD PTR [rdi + 112], xmm2
+
+ ;v
+ mov rsi, arg(2) ;z = vsrc
+ add rdi, 64*2 ;diff = diff + 320 (shorts)
+ add rax, 64 ;Predictor = pred + 320
+
+ ;line 0 1
+ movq xmm0, MMWORD PTR [rsi] ; src
+ movq xmm2, MMWORD PTR [rsi+rdx]
+ movdqa xmm1, XMMWORD PTR [rax] ; pred
+ punpcklqdq xmm0, xmm2
+
+ movdqa xmm2, xmm0
+ psubb xmm0, xmm1 ; subtraction with sign missed
+
+ pxor xmm1, [GLOBAL(t80)] ;convert to signed values
+ pxor xmm2, [GLOBAL(t80)]
+ pcmpgtb xmm1, xmm2 ; obtain sign information
+
+ movdqa xmm2, xmm0
+ movdqa xmm3, xmm1
+ punpcklbw xmm0, xmm1 ; put sign back to subtraction
+ punpckhbw xmm2, xmm3 ; put sign back to subtraction
+
+ movdqa XMMWORD PTR [rdi], xmm0
+ movdqa XMMWORD PTR [rdi +16], xmm2
+
+ ;line 2 3
+ movq xmm0, MMWORD PTR [rsi+rdx*2] ; src
+ movq xmm2, MMWORD PTR [rsi+rcx]
+ movdqa xmm1, XMMWORD PTR [rax+16] ; pred
+ punpcklqdq xmm0, xmm2
+
+ movdqa xmm2, xmm0
+ psubb xmm0, xmm1 ; subtraction with sign missed
+
+ pxor xmm1, [GLOBAL(t80)] ;convert to signed values
+ pxor xmm2, [GLOBAL(t80)]
+ pcmpgtb xmm1, xmm2 ; obtain sign information
+
+ movdqa xmm2, xmm0
+ movdqa xmm3, xmm1
+ punpcklbw xmm0, xmm1 ; put sign back to subtraction
+ punpckhbw xmm2, xmm3 ; put sign back to subtraction
+
+ movdqa XMMWORD PTR [rdi + 32], xmm0
+ movdqa XMMWORD PTR [rdi + 48], xmm2
+
+ ;line 4 5
+ lea rsi, [rsi + rdx*4]
+
+ movq xmm0, MMWORD PTR [rsi] ; src
+ movq xmm2, MMWORD PTR [rsi+rdx]
+ movdqa xmm1, XMMWORD PTR [rax + 32] ; pred
+ punpcklqdq xmm0, xmm2
+
+ movdqa xmm2, xmm0
+ psubb xmm0, xmm1 ; subtraction with sign missed
+
+ pxor xmm1, [GLOBAL(t80)] ;convert to signed values
+ pxor xmm2, [GLOBAL(t80)]
+ pcmpgtb xmm1, xmm2 ; obtain sign information
+
+ movdqa xmm2, xmm0
+ movdqa xmm3, xmm1
+ punpcklbw xmm0, xmm1 ; put sign back to subtraction
+ punpckhbw xmm2, xmm3 ; put sign back to subtraction
+
+ movdqa XMMWORD PTR [rdi + 64], xmm0
+ movdqa XMMWORD PTR [rdi + 80], xmm2
+
+ ;line 6 7
+ movq xmm0, MMWORD PTR [rsi+rdx*2] ; src
+ movq xmm2, MMWORD PTR [rsi+rcx]
+ movdqa xmm1, XMMWORD PTR [rax+ 48] ; pred
+ punpcklqdq xmm0, xmm2
+
+ movdqa xmm2, xmm0
+ psubb xmm0, xmm1 ; subtraction with sign missed
+
+ pxor xmm1, [GLOBAL(t80)] ;convert to signed values
+ pxor xmm2, [GLOBAL(t80)]
+ pcmpgtb xmm1, xmm2 ; obtain sign information
+
+ movdqa xmm2, xmm0
+ movdqa xmm3, xmm1
+ punpcklbw xmm0, xmm1 ; put sign back to subtraction
+ punpckhbw xmm2, xmm3 ; put sign back to subtraction
+
+ movdqa XMMWORD PTR [rdi + 96], xmm0
+ movdqa XMMWORD PTR [rdi + 112], xmm2
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+SECTION_RODATA
+align 16
+t80:
+ times 16 db 0x80
diff --git a/libvpx/vp9/encoder/x86/vp9_temporal_filter_apply_sse2.asm b/libvpx/vp9/encoder/x86/vp9_temporal_filter_apply_sse2.asm
new file mode 100644
index 0000000..d2d13b3
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_temporal_filter_apply_sse2.asm
@@ -0,0 +1,207 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+; void vp9_temporal_filter_apply_sse2 | arg
+; (unsigned char *frame1, | 0
+; unsigned int stride, | 1
+; unsigned char *frame2, | 2
+; unsigned int block_size, | 3
+; int strength, | 4
+; int filter_weight, | 5
+; unsigned int *accumulator, | 6
+; unsigned short *count) | 7
+global sym(vp9_temporal_filter_apply_sse2) PRIVATE
+sym(vp9_temporal_filter_apply_sse2):
+
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 8
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ALIGN_STACK 16, rax
+ %define block_size 0
+ %define strength 16
+ %define filter_weight 32
+ %define rounding_bit 48
+ %define rbp_backup 64
+ %define stack_size 80
+ sub rsp, stack_size
+ mov [rsp + rbp_backup], rbp
+ ; end prolog
+
+ mov rdx, arg(3)
+ mov [rsp + block_size], rdx
+ movd xmm6, arg(4)
+ movdqa [rsp + strength], xmm6 ; where strength is used, all 16 bytes are read
+
+ ; calculate the rounding bit outside the loop
+ ; 0x8000 >> (16 - strength)
+ mov rdx, 16
+ sub rdx, arg(4) ; 16 - strength
+ movq xmm4, rdx ; can't use rdx w/ shift
+ movdqa xmm5, [GLOBAL(_const_top_bit)]
+ psrlw xmm5, xmm4
+ movdqa [rsp + rounding_bit], xmm5
+
+ mov rsi, arg(0) ; src/frame1
+ mov rdx, arg(2) ; predictor frame
+ mov rdi, arg(6) ; accumulator
+ mov rax, arg(7) ; count
+
+ ; dup the filter weight and store for later
+ movd xmm0, arg(5) ; filter_weight
+ pshuflw xmm0, xmm0, 0
+ punpcklwd xmm0, xmm0
+ movdqa [rsp + filter_weight], xmm0
+
+ mov rbp, arg(1) ; stride
+ pxor xmm7, xmm7 ; zero for extraction
+
+ lea rcx, [rdx + 16*16*1]
+ cmp dword ptr [rsp + block_size], 8
+ jne .temporal_filter_apply_load_16
+ lea rcx, [rdx + 8*8*1]
+
+.temporal_filter_apply_load_8:
+ movq xmm0, [rsi] ; first row
+ lea rsi, [rsi + rbp] ; += stride
+ punpcklbw xmm0, xmm7 ; src[ 0- 7]
+ movq xmm1, [rsi] ; second row
+ lea rsi, [rsi + rbp] ; += stride
+ punpcklbw xmm1, xmm7 ; src[ 8-15]
+ jmp .temporal_filter_apply_load_finished
+
+.temporal_filter_apply_load_16:
+ movdqa xmm0, [rsi] ; src (frame1)
+ lea rsi, [rsi + rbp] ; += stride
+ movdqa xmm1, xmm0
+ punpcklbw xmm0, xmm7 ; src[ 0- 7]
+ punpckhbw xmm1, xmm7 ; src[ 8-15]
+
+.temporal_filter_apply_load_finished:
+ movdqa xmm2, [rdx] ; predictor (frame2)
+ movdqa xmm3, xmm2
+ punpcklbw xmm2, xmm7 ; pred[ 0- 7]
+ punpckhbw xmm3, xmm7 ; pred[ 8-15]
+
+ ; modifier = src_byte - pixel_value
+ psubw xmm0, xmm2 ; src - pred[ 0- 7]
+ psubw xmm1, xmm3 ; src - pred[ 8-15]
+
+ ; modifier *= modifier
+ pmullw xmm0, xmm0 ; modifer[ 0- 7]^2
+ pmullw xmm1, xmm1 ; modifer[ 8-15]^2
+
+ ; modifier *= 3
+ pmullw xmm0, [GLOBAL(_const_3w)]
+ pmullw xmm1, [GLOBAL(_const_3w)]
+
+ ; modifer += 0x8000 >> (16 - strength)
+ paddw xmm0, [rsp + rounding_bit]
+ paddw xmm1, [rsp + rounding_bit]
+
+ ; modifier >>= strength
+ psrlw xmm0, [rsp + strength]
+ psrlw xmm1, [rsp + strength]
+
+ ; modifier = 16 - modifier
+ ; saturation takes care of modifier > 16
+ movdqa xmm3, [GLOBAL(_const_16w)]
+ movdqa xmm2, [GLOBAL(_const_16w)]
+ psubusw xmm3, xmm1
+ psubusw xmm2, xmm0
+
+ ; modifier *= filter_weight
+ pmullw xmm2, [rsp + filter_weight]
+ pmullw xmm3, [rsp + filter_weight]
+
+ ; count
+ movdqa xmm4, [rax]
+ movdqa xmm5, [rax+16]
+ ; += modifier
+ paddw xmm4, xmm2
+ paddw xmm5, xmm3
+ ; write back
+ movdqa [rax], xmm4
+ movdqa [rax+16], xmm5
+ lea rax, [rax + 16*2] ; count += 16*(sizeof(short))
+
+ ; load and extract the predictor up to shorts
+ pxor xmm7, xmm7
+ movdqa xmm0, [rdx]
+ lea rdx, [rdx + 16*1] ; pred += 16*(sizeof(char))
+ movdqa xmm1, xmm0
+ punpcklbw xmm0, xmm7 ; pred[ 0- 7]
+ punpckhbw xmm1, xmm7 ; pred[ 8-15]
+
+ ; modifier *= pixel_value
+ pmullw xmm0, xmm2
+ pmullw xmm1, xmm3
+
+ ; expand to double words
+ movdqa xmm2, xmm0
+ punpcklwd xmm0, xmm7 ; [ 0- 3]
+ punpckhwd xmm2, xmm7 ; [ 4- 7]
+ movdqa xmm3, xmm1
+ punpcklwd xmm1, xmm7 ; [ 8-11]
+ punpckhwd xmm3, xmm7 ; [12-15]
+
+ ; accumulator
+ movdqa xmm4, [rdi]
+ movdqa xmm5, [rdi+16]
+ movdqa xmm6, [rdi+32]
+ movdqa xmm7, [rdi+48]
+ ; += modifier
+ paddd xmm4, xmm0
+ paddd xmm5, xmm2
+ paddd xmm6, xmm1
+ paddd xmm7, xmm3
+ ; write back
+ movdqa [rdi], xmm4
+ movdqa [rdi+16], xmm5
+ movdqa [rdi+32], xmm6
+ movdqa [rdi+48], xmm7
+ lea rdi, [rdi + 16*4] ; accumulator += 16*(sizeof(int))
+
+ cmp rdx, rcx
+ je .temporal_filter_apply_epilog
+ pxor xmm7, xmm7 ; zero for extraction
+ cmp dword ptr [rsp + block_size], 16
+ je .temporal_filter_apply_load_16
+ jmp .temporal_filter_apply_load_8
+
+.temporal_filter_apply_epilog:
+ ; begin epilog
+ mov rbp, [rsp + rbp_backup]
+ add rsp, stack_size
+ pop rsp
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+SECTION_RODATA
+align 16
+_const_3w:
+ times 8 dw 3
+align 16
+_const_top_bit:
+ times 8 dw 1<<15
+align 16
+_const_16w
+ times 8 dw 16
diff --git a/libvpx/vp9/encoder/x86/vp9_variance_impl_mmx.asm b/libvpx/vp9/encoder/x86/vp9_variance_impl_mmx.asm
new file mode 100644
index 0000000..9f140c9
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_variance_impl_mmx.asm
@@ -0,0 +1,851 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+;unsigned int vp9_get_mb_ss_mmx( short *src_ptr )
+global sym(vp9_get_mb_ss_mmx) PRIVATE
+sym(vp9_get_mb_ss_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ sub rsp, 8
+ ; end prolog
+
+ mov rax, arg(0) ;src_ptr
+ mov rcx, 16
+ pxor mm4, mm4
+
+.NEXTROW:
+ movq mm0, [rax]
+ movq mm1, [rax+8]
+ movq mm2, [rax+16]
+ movq mm3, [rax+24]
+ pmaddwd mm0, mm0
+ pmaddwd mm1, mm1
+ pmaddwd mm2, mm2
+ pmaddwd mm3, mm3
+
+ paddd mm4, mm0
+ paddd mm4, mm1
+ paddd mm4, mm2
+ paddd mm4, mm3
+
+ add rax, 32
+ dec rcx
+ ja .NEXTROW
+ movq QWORD PTR [rsp], mm4
+
+ ;return sum[0]+sum[1];
+ movsxd rax, dword ptr [rsp]
+ movsxd rcx, dword ptr [rsp+4]
+ add rax, rcx
+
+
+ ; begin epilog
+ add rsp, 8
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;unsigned int vp9_get8x8var_mmx
+;(
+; unsigned char *src_ptr,
+; int source_stride,
+; unsigned char *ref_ptr,
+; int recon_stride,
+; unsigned int *SSE,
+; int *Sum
+;)
+global sym(vp9_get8x8var_mmx) PRIVATE
+sym(vp9_get8x8var_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ push rsi
+ push rdi
+ push rbx
+ sub rsp, 16
+ ; end prolog
+
+
+ pxor mm5, mm5 ; Blank mmx6
+ pxor mm6, mm6 ; Blank mmx7
+ pxor mm7, mm7 ; Blank mmx7
+
+ mov rax, arg(0) ;[src_ptr] ; Load base addresses
+ mov rbx, arg(2) ;[ref_ptr]
+ movsxd rcx, dword ptr arg(1) ;[source_stride]
+ movsxd rdx, dword ptr arg(3) ;[recon_stride]
+
+ ; Row 1
+ movq mm0, [rax] ; Copy eight bytes to mm0
+ movq mm1, [rbx] ; Copy eight bytes to mm1
+ movq mm2, mm0 ; Take copies
+ movq mm3, mm1 ; Take copies
+
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ punpckhbw mm2, mm6 ; unpack to higher prrcision
+ punpckhbw mm3, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ psubsw mm2, mm3 ; A-B (high order) to MM2
+
+ paddw mm5, mm0 ; accumulate differences in mm5
+ paddw mm5, mm2 ; accumulate differences in mm5
+
+ pmaddwd mm0, mm0 ; square and accumulate
+ pmaddwd mm2, mm2 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movq mm1, [rbx] ; Copy eight bytes to mm1
+ paddd mm7, mm0 ; accumulate in mm7
+ paddd mm7, mm2 ; accumulate in mm7
+
+
+ ; Row 2
+ movq mm0, [rax] ; Copy eight bytes to mm0
+ movq mm2, mm0 ; Take copies
+ movq mm3, mm1 ; Take copies
+
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ punpckhbw mm2, mm6 ; unpack to higher prrcision
+ punpckhbw mm3, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ psubsw mm2, mm3 ; A-B (high order) to MM2
+
+ paddw mm5, mm0 ; accumulate differences in mm5
+ paddw mm5, mm2 ; accumulate differences in mm5
+
+ pmaddwd mm0, mm0 ; square and accumulate
+ pmaddwd mm2, mm2 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movq mm1, [rbx] ; Copy eight bytes to mm1
+ paddd mm7, mm0 ; accumulate in mm7
+ paddd mm7, mm2 ; accumulate in mm7
+
+ ; Row 3
+ movq mm0, [rax] ; Copy eight bytes to mm0
+ movq mm2, mm0 ; Take copies
+ movq mm3, mm1 ; Take copies
+
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ punpckhbw mm2, mm6 ; unpack to higher prrcision
+ punpckhbw mm3, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ psubsw mm2, mm3 ; A-B (high order) to MM2
+
+ paddw mm5, mm0 ; accumulate differences in mm5
+ paddw mm5, mm2 ; accumulate differences in mm5
+
+ pmaddwd mm0, mm0 ; square and accumulate
+ pmaddwd mm2, mm2 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movq mm1, [rbx] ; Copy eight bytes to mm1
+ paddd mm7, mm0 ; accumulate in mm7
+ paddd mm7, mm2 ; accumulate in mm7
+
+ ; Row 4
+ movq mm0, [rax] ; Copy eight bytes to mm0
+ movq mm2, mm0 ; Take copies
+ movq mm3, mm1 ; Take copies
+
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ punpckhbw mm2, mm6 ; unpack to higher prrcision
+ punpckhbw mm3, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ psubsw mm2, mm3 ; A-B (high order) to MM2
+
+ paddw mm5, mm0 ; accumulate differences in mm5
+ paddw mm5, mm2 ; accumulate differences in mm5
+
+ pmaddwd mm0, mm0 ; square and accumulate
+ pmaddwd mm2, mm2 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movq mm1, [rbx] ; Copy eight bytes to mm1
+ paddd mm7, mm0 ; accumulate in mm7
+ paddd mm7, mm2 ; accumulate in mm7
+
+ ; Row 5
+ movq mm0, [rax] ; Copy eight bytes to mm0
+ movq mm2, mm0 ; Take copies
+ movq mm3, mm1 ; Take copies
+
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ punpckhbw mm2, mm6 ; unpack to higher prrcision
+ punpckhbw mm3, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ psubsw mm2, mm3 ; A-B (high order) to MM2
+
+ paddw mm5, mm0 ; accumulate differences in mm5
+ paddw mm5, mm2 ; accumulate differences in mm5
+
+ pmaddwd mm0, mm0 ; square and accumulate
+ pmaddwd mm2, mm2 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movq mm1, [rbx] ; Copy eight bytes to mm1
+ ; movq mm4, [rbx + rdx]
+ paddd mm7, mm0 ; accumulate in mm7
+ paddd mm7, mm2 ; accumulate in mm7
+
+ ; Row 6
+ movq mm0, [rax] ; Copy eight bytes to mm0
+ movq mm2, mm0 ; Take copies
+ movq mm3, mm1 ; Take copies
+
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ punpckhbw mm2, mm6 ; unpack to higher prrcision
+ punpckhbw mm3, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ psubsw mm2, mm3 ; A-B (high order) to MM2
+
+ paddw mm5, mm0 ; accumulate differences in mm5
+ paddw mm5, mm2 ; accumulate differences in mm5
+
+ pmaddwd mm0, mm0 ; square and accumulate
+ pmaddwd mm2, mm2 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movq mm1, [rbx] ; Copy eight bytes to mm1
+ paddd mm7, mm0 ; accumulate in mm7
+ paddd mm7, mm2 ; accumulate in mm7
+
+ ; Row 7
+ movq mm0, [rax] ; Copy eight bytes to mm0
+ movq mm2, mm0 ; Take copies
+ movq mm3, mm1 ; Take copies
+
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ punpckhbw mm2, mm6 ; unpack to higher prrcision
+ punpckhbw mm3, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ psubsw mm2, mm3 ; A-B (high order) to MM2
+
+ paddw mm5, mm0 ; accumulate differences in mm5
+ paddw mm5, mm2 ; accumulate differences in mm5
+
+ pmaddwd mm0, mm0 ; square and accumulate
+ pmaddwd mm2, mm2 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movq mm1, [rbx] ; Copy eight bytes to mm1
+ paddd mm7, mm0 ; accumulate in mm7
+ paddd mm7, mm2 ; accumulate in mm7
+
+ ; Row 8
+ movq mm0, [rax] ; Copy eight bytes to mm0
+ movq mm2, mm0 ; Take copies
+ movq mm3, mm1 ; Take copies
+
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ punpckhbw mm2, mm6 ; unpack to higher prrcision
+ punpckhbw mm3, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ psubsw mm2, mm3 ; A-B (high order) to MM2
+
+ paddw mm5, mm0 ; accumulate differences in mm5
+ paddw mm5, mm2 ; accumulate differences in mm5
+
+ pmaddwd mm0, mm0 ; square and accumulate
+ pmaddwd mm2, mm2 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ paddd mm7, mm0 ; accumulate in mm7
+ paddd mm7, mm2 ; accumulate in mm7
+
+ ; Now accumulate the final results.
+ movq QWORD PTR [rsp+8], mm5 ; copy back accumulated results into normal memory
+ movq QWORD PTR [rsp], mm7 ; copy back accumulated results into normal memory
+ movsx rdx, WORD PTR [rsp+8]
+ movsx rcx, WORD PTR [rsp+10]
+ movsx rbx, WORD PTR [rsp+12]
+ movsx rax, WORD PTR [rsp+14]
+ add rdx, rcx
+ add rbx, rax
+ add rdx, rbx ;XSum
+ movsxd rax, DWORD PTR [rsp]
+ movsxd rcx, DWORD PTR [rsp+4]
+ add rax, rcx ;XXSum
+ mov rsi, arg(4) ;SSE
+ mov rdi, arg(5) ;Sum
+ mov dword ptr [rsi], eax
+ mov dword ptr [rdi], edx
+ xor rax, rax ; return 0
+
+
+ ; begin epilog
+ add rsp, 16
+ pop rbx
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+
+;unsigned int
+;vp9_get4x4var_mmx
+;(
+; unsigned char *src_ptr,
+; int source_stride,
+; unsigned char *ref_ptr,
+; int recon_stride,
+; unsigned int *SSE,
+; int *Sum
+;)
+global sym(vp9_get4x4var_mmx) PRIVATE
+sym(vp9_get4x4var_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ push rsi
+ push rdi
+ push rbx
+ sub rsp, 16
+ ; end prolog
+
+
+ pxor mm5, mm5 ; Blank mmx6
+ pxor mm6, mm6 ; Blank mmx7
+ pxor mm7, mm7 ; Blank mmx7
+
+ mov rax, arg(0) ;[src_ptr] ; Load base addresses
+ mov rbx, arg(2) ;[ref_ptr]
+ movsxd rcx, dword ptr arg(1) ;[source_stride]
+ movsxd rdx, dword ptr arg(3) ;[recon_stride]
+
+ ; Row 1
+ movq mm0, [rax] ; Copy eight bytes to mm0
+ movq mm1, [rbx] ; Copy eight bytes to mm1
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ paddw mm5, mm0 ; accumulate differences in mm5
+ pmaddwd mm0, mm0 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movq mm1, [rbx] ; Copy eight bytes to mm1
+ paddd mm7, mm0 ; accumulate in mm7
+
+
+ ; Row 2
+ movq mm0, [rax] ; Copy eight bytes to mm0
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ paddw mm5, mm0 ; accumulate differences in mm5
+
+ pmaddwd mm0, mm0 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movq mm1, [rbx] ; Copy eight bytes to mm1
+ paddd mm7, mm0 ; accumulate in mm7
+
+ ; Row 3
+ movq mm0, [rax] ; Copy eight bytes to mm0
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ paddw mm5, mm0 ; accumulate differences in mm5
+
+ pmaddwd mm0, mm0 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movq mm1, [rbx] ; Copy eight bytes to mm1
+ paddd mm7, mm0 ; accumulate in mm7
+
+ ; Row 4
+ movq mm0, [rax] ; Copy eight bytes to mm0
+
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+
+ paddw mm5, mm0 ; accumulate differences in mm5
+
+ pmaddwd mm0, mm0 ; square and accumulate
+ paddd mm7, mm0 ; accumulate in mm7
+
+
+ ; Now accumulate the final results.
+ movq QWORD PTR [rsp+8], mm5 ; copy back accumulated results into normal memory
+ movq QWORD PTR [rsp], mm7 ; copy back accumulated results into normal memory
+ movsx rdx, WORD PTR [rsp+8]
+ movsx rcx, WORD PTR [rsp+10]
+ movsx rbx, WORD PTR [rsp+12]
+ movsx rax, WORD PTR [rsp+14]
+ add rdx, rcx
+ add rbx, rax
+ add rdx, rbx ;XSum
+ movsxd rax, DWORD PTR [rsp]
+ movsxd rcx, DWORD PTR [rsp+4]
+ add rax, rcx ;XXSum
+ mov rsi, arg(4) ;SSE
+ mov rdi, arg(5) ;Sum
+ mov dword ptr [rsi], eax
+ mov dword ptr [rdi], edx
+ xor rax, rax ; return 0
+
+
+ ; begin epilog
+ add rsp, 16
+ pop rbx
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+
+;unsigned int
+;vp9_get4x4sse_cs_mmx
+;(
+; unsigned char *src_ptr,
+; int source_stride,
+; unsigned char *ref_ptr,
+; int recon_stride
+;)
+global sym(vp9_get4x4sse_cs_mmx) PRIVATE
+sym(vp9_get4x4sse_cs_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 4
+ push rsi
+ push rdi
+ push rbx
+ ; end prolog
+
+
+ pxor mm6, mm6 ; Blank mmx7
+ pxor mm7, mm7 ; Blank mmx7
+
+ mov rax, arg(0) ;[src_ptr] ; Load base addresses
+ mov rbx, arg(2) ;[ref_ptr]
+ movsxd rcx, dword ptr arg(1) ;[source_stride]
+ movsxd rdx, dword ptr arg(3) ;[recon_stride]
+ ; Row 1
+ movd mm0, [rax] ; Copy eight bytes to mm0
+ movd mm1, [rbx] ; Copy eight bytes to mm1
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ pmaddwd mm0, mm0 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movd mm1, [rbx] ; Copy eight bytes to mm1
+ paddd mm7, mm0 ; accumulate in mm7
+
+ ; Row 2
+ movd mm0, [rax] ; Copy eight bytes to mm0
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ pmaddwd mm0, mm0 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movd mm1, [rbx] ; Copy eight bytes to mm1
+ paddd mm7, mm0 ; accumulate in mm7
+
+ ; Row 3
+ movd mm0, [rax] ; Copy eight bytes to mm0
+ punpcklbw mm1, mm6
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+
+ pmaddwd mm0, mm0 ; square and accumulate
+ add rbx,rdx ; Inc pointer into ref data
+ add rax,rcx ; Inc pointer into the new data
+ movd mm1, [rbx] ; Copy eight bytes to mm1
+ paddd mm7, mm0 ; accumulate in mm7
+
+ ; Row 4
+ movd mm0, [rax] ; Copy eight bytes to mm0
+ punpcklbw mm0, mm6 ; unpack to higher prrcision
+ punpcklbw mm1, mm6
+ psubsw mm0, mm1 ; A-B (low order) to MM0
+ pmaddwd mm0, mm0 ; square and accumulate
+ paddd mm7, mm0 ; accumulate in mm7
+
+ movq mm0, mm7 ;
+ psrlq mm7, 32
+
+ paddd mm0, mm7
+ movq rax, mm0
+
+
+ ; begin epilog
+ pop rbx
+ pop rdi
+ pop rsi
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+%define mmx_filter_shift 7
+
+;void vp9_filter_block2d_bil4x4_var_mmx
+;(
+; unsigned char *ref_ptr,
+; int ref_pixels_per_line,
+; unsigned char *src_ptr,
+; int src_pixels_per_line,
+; unsigned short *HFilter,
+; unsigned short *VFilter,
+; int *sum,
+; unsigned int *sumsquared
+;)
+global sym(vp9_filter_block2d_bil4x4_var_mmx) PRIVATE
+sym(vp9_filter_block2d_bil4x4_var_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 8
+ GET_GOT rbx
+ push rsi
+ push rdi
+ sub rsp, 16
+ ; end prolog
+
+
+ pxor mm6, mm6 ;
+ pxor mm7, mm7 ;
+
+ mov rax, arg(4) ;HFilter ;
+ mov rdx, arg(5) ;VFilter ;
+
+ mov rsi, arg(0) ;ref_ptr ;
+ mov rdi, arg(2) ;src_ptr ;
+
+ mov rcx, 4 ;
+ pxor mm0, mm0 ;
+
+ movd mm1, [rsi] ;
+ movd mm3, [rsi+1] ;
+
+ punpcklbw mm1, mm0 ;
+ pmullw mm1, [rax] ;
+
+ punpcklbw mm3, mm0 ;
+ pmullw mm3, [rax+8] ;
+
+ paddw mm1, mm3 ;
+ paddw mm1, [GLOBAL(mmx_bi_rd)] ;
+
+ psraw mm1, mmx_filter_shift ;
+ movq mm5, mm1
+
+%if ABI_IS_32BIT
+ add rsi, dword ptr arg(1) ;ref_pixels_per_line ;
+%else
+ movsxd r8, dword ptr arg(1) ;ref_pixels_per_line ;
+ add rsi, r8
+%endif
+
+.filter_block2d_bil4x4_var_mmx_loop:
+
+ movd mm1, [rsi] ;
+ movd mm3, [rsi+1] ;
+
+ punpcklbw mm1, mm0 ;
+ pmullw mm1, [rax] ;
+
+ punpcklbw mm3, mm0 ;
+ pmullw mm3, [rax+8] ;
+
+ paddw mm1, mm3 ;
+ paddw mm1, [GLOBAL(mmx_bi_rd)] ;
+
+ psraw mm1, mmx_filter_shift ;
+ movq mm3, mm5 ;
+
+ movq mm5, mm1 ;
+ pmullw mm3, [rdx] ;
+
+ pmullw mm1, [rdx+8] ;
+ paddw mm1, mm3 ;
+
+
+ paddw mm1, [GLOBAL(mmx_bi_rd)] ;
+ psraw mm1, mmx_filter_shift ;
+
+ movd mm3, [rdi] ;
+ punpcklbw mm3, mm0 ;
+
+ psubw mm1, mm3 ;
+ paddw mm6, mm1 ;
+
+ pmaddwd mm1, mm1 ;
+ paddd mm7, mm1 ;
+
+%if ABI_IS_32BIT
+ add rsi, dword ptr arg(1) ;ref_pixels_per_line ;
+ add rdi, dword ptr arg(3) ;src_pixels_per_line ;
+%else
+ movsxd r8, dword ptr arg(1) ;ref_pixels_per_line
+ movsxd r9, dword ptr arg(3) ;src_pixels_per_line
+ add rsi, r8
+ add rdi, r9
+%endif
+ sub rcx, 1 ;
+ jnz .filter_block2d_bil4x4_var_mmx_loop ;
+
+
+ pxor mm3, mm3 ;
+ pxor mm2, mm2 ;
+
+ punpcklwd mm2, mm6 ;
+ punpckhwd mm3, mm6 ;
+
+ paddd mm2, mm3 ;
+ movq mm6, mm2 ;
+
+ psrlq mm6, 32 ;
+ paddd mm2, mm6 ;
+
+ psrad mm2, 16 ;
+ movq mm4, mm7 ;
+
+ psrlq mm4, 32 ;
+ paddd mm4, mm7 ;
+
+ mov rdi, arg(6) ;sum
+ mov rsi, arg(7) ;sumsquared
+
+ movd dword ptr [rdi], mm2 ;
+ movd dword ptr [rsi], mm4 ;
+
+
+
+ ; begin epilog
+ add rsp, 16
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+
+
+;void vp9_filter_block2d_bil_var_mmx
+;(
+; unsigned char *ref_ptr,
+; int ref_pixels_per_line,
+; unsigned char *src_ptr,
+; int src_pixels_per_line,
+; unsigned int Height,
+; unsigned short *HFilter,
+; unsigned short *VFilter,
+; int *sum,
+; unsigned int *sumsquared
+;)
+global sym(vp9_filter_block2d_bil_var_mmx) PRIVATE
+sym(vp9_filter_block2d_bil_var_mmx):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 9
+ GET_GOT rbx
+ push rsi
+ push rdi
+ sub rsp, 16
+ ; end prolog
+
+ pxor mm6, mm6 ;
+ pxor mm7, mm7 ;
+ mov rax, arg(5) ;HFilter ;
+
+ mov rdx, arg(6) ;VFilter ;
+ mov rsi, arg(0) ;ref_ptr ;
+
+ mov rdi, arg(2) ;src_ptr ;
+ movsxd rcx, dword ptr arg(4) ;Height ;
+
+ pxor mm0, mm0 ;
+ movq mm1, [rsi] ;
+
+ movq mm3, [rsi+1] ;
+ movq mm2, mm1 ;
+
+ movq mm4, mm3 ;
+ punpcklbw mm1, mm0 ;
+
+ punpckhbw mm2, mm0 ;
+ pmullw mm1, [rax] ;
+
+ pmullw mm2, [rax] ;
+ punpcklbw mm3, mm0 ;
+
+ punpckhbw mm4, mm0 ;
+ pmullw mm3, [rax+8] ;
+
+ pmullw mm4, [rax+8] ;
+ paddw mm1, mm3 ;
+
+ paddw mm2, mm4 ;
+ paddw mm1, [GLOBAL(mmx_bi_rd)] ;
+
+ psraw mm1, mmx_filter_shift ;
+ paddw mm2, [GLOBAL(mmx_bi_rd)] ;
+
+ psraw mm2, mmx_filter_shift ;
+ movq mm5, mm1
+
+ packuswb mm5, mm2 ;
+%if ABI_IS_32BIT
+ add rsi, dword ptr arg(1) ;ref_pixels_per_line
+%else
+ movsxd r8, dword ptr arg(1) ;ref_pixels_per_line
+ add rsi, r8
+%endif
+
+.filter_block2d_bil_var_mmx_loop:
+
+ movq mm1, [rsi] ;
+ movq mm3, [rsi+1] ;
+
+ movq mm2, mm1 ;
+ movq mm4, mm3 ;
+
+ punpcklbw mm1, mm0 ;
+ punpckhbw mm2, mm0 ;
+
+ pmullw mm1, [rax] ;
+ pmullw mm2, [rax] ;
+
+ punpcklbw mm3, mm0 ;
+ punpckhbw mm4, mm0 ;
+
+ pmullw mm3, [rax+8] ;
+ pmullw mm4, [rax+8] ;
+
+ paddw mm1, mm3 ;
+ paddw mm2, mm4 ;
+
+ paddw mm1, [GLOBAL(mmx_bi_rd)] ;
+ psraw mm1, mmx_filter_shift ;
+
+ paddw mm2, [GLOBAL(mmx_bi_rd)] ;
+ psraw mm2, mmx_filter_shift ;
+
+ movq mm3, mm5 ;
+ movq mm4, mm5 ;
+
+ punpcklbw mm3, mm0 ;
+ punpckhbw mm4, mm0 ;
+
+ movq mm5, mm1 ;
+ packuswb mm5, mm2 ;
+
+ pmullw mm3, [rdx] ;
+ pmullw mm4, [rdx] ;
+
+ pmullw mm1, [rdx+8] ;
+ pmullw mm2, [rdx+8] ;
+
+ paddw mm1, mm3 ;
+ paddw mm2, mm4 ;
+
+ paddw mm1, [GLOBAL(mmx_bi_rd)] ;
+ paddw mm2, [GLOBAL(mmx_bi_rd)] ;
+
+ psraw mm1, mmx_filter_shift ;
+ psraw mm2, mmx_filter_shift ;
+
+ movq mm3, [rdi] ;
+ movq mm4, mm3 ;
+
+ punpcklbw mm3, mm0 ;
+ punpckhbw mm4, mm0 ;
+
+ psubw mm1, mm3 ;
+ psubw mm2, mm4 ;
+
+ paddw mm6, mm1 ;
+ pmaddwd mm1, mm1 ;
+
+ paddw mm6, mm2 ;
+ pmaddwd mm2, mm2 ;
+
+ paddd mm7, mm1 ;
+ paddd mm7, mm2 ;
+
+%if ABI_IS_32BIT
+ add rsi, dword ptr arg(1) ;ref_pixels_per_line ;
+ add rdi, dword ptr arg(3) ;src_pixels_per_line ;
+%else
+ movsxd r8, dword ptr arg(1) ;ref_pixels_per_line ;
+ movsxd r9, dword ptr arg(3) ;src_pixels_per_line ;
+ add rsi, r8
+ add rdi, r9
+%endif
+ sub rcx, 1 ;
+ jnz .filter_block2d_bil_var_mmx_loop ;
+
+
+ pxor mm3, mm3 ;
+ pxor mm2, mm2 ;
+
+ punpcklwd mm2, mm6 ;
+ punpckhwd mm3, mm6 ;
+
+ paddd mm2, mm3 ;
+ movq mm6, mm2 ;
+
+ psrlq mm6, 32 ;
+ paddd mm2, mm6 ;
+
+ psrad mm2, 16 ;
+ movq mm4, mm7 ;
+
+ psrlq mm4, 32 ;
+ paddd mm4, mm7 ;
+
+ mov rdi, arg(7) ;sum
+ mov rsi, arg(8) ;sumsquared
+
+ movd dword ptr [rdi], mm2 ;
+ movd dword ptr [rsi], mm4 ;
+
+ ; begin epilog
+ add rsp, 16
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+SECTION_RODATA
+;short mmx_bi_rd[4] = { 64, 64, 64, 64};
+align 16
+mmx_bi_rd:
+ times 4 dw 64
diff --git a/libvpx/vp9/encoder/x86/vp9_variance_impl_sse2.asm b/libvpx/vp9/encoder/x86/vp9_variance_impl_sse2.asm
new file mode 100644
index 0000000..896dd18
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_variance_impl_sse2.asm
@@ -0,0 +1,761 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+%define xmm_filter_shift 7
+
+;unsigned int vp9_get_mb_ss_sse2
+;(
+; short *src_ptr
+;)
+global sym(vp9_get_mb_ss_sse2) PRIVATE
+sym(vp9_get_mb_ss_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 1
+ GET_GOT rbx
+ push rsi
+ push rdi
+ sub rsp, 16
+ ; end prolog
+
+
+ mov rax, arg(0) ;[src_ptr]
+ mov rcx, 8
+ pxor xmm4, xmm4
+
+.NEXTROW:
+ movdqa xmm0, [rax]
+ movdqa xmm1, [rax+16]
+ movdqa xmm2, [rax+32]
+ movdqa xmm3, [rax+48]
+ pmaddwd xmm0, xmm0
+ pmaddwd xmm1, xmm1
+ pmaddwd xmm2, xmm2
+ pmaddwd xmm3, xmm3
+
+ paddd xmm0, xmm1
+ paddd xmm2, xmm3
+ paddd xmm4, xmm0
+ paddd xmm4, xmm2
+
+ add rax, 0x40
+ dec rcx
+ ja .NEXTROW
+
+ movdqa xmm3,xmm4
+ psrldq xmm4,8
+ paddd xmm4,xmm3
+ movdqa xmm3,xmm4
+ psrldq xmm4,4
+ paddd xmm4,xmm3
+ movq rax,xmm4
+
+
+ ; begin epilog
+ add rsp, 16
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;unsigned int vp9_get16x16var_sse2
+;(
+; unsigned char * src_ptr,
+; int source_stride,
+; unsigned char * ref_ptr,
+; int recon_stride,
+; unsigned int * SSE,
+; int * Sum
+;)
+global sym(vp9_get16x16var_sse2) PRIVATE
+sym(vp9_get16x16var_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ push rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;[src_ptr]
+ mov rdi, arg(2) ;[ref_ptr]
+
+ movsxd rax, DWORD PTR arg(1) ;[source_stride]
+ movsxd rdx, DWORD PTR arg(3) ;[recon_stride]
+
+ ; Prefetch data
+ lea rcx, [rax+rax*2]
+ prefetcht0 [rsi]
+ prefetcht0 [rsi+rax]
+ prefetcht0 [rsi+rax*2]
+ prefetcht0 [rsi+rcx]
+ lea rbx, [rsi+rax*4]
+ prefetcht0 [rbx]
+ prefetcht0 [rbx+rax]
+ prefetcht0 [rbx+rax*2]
+ prefetcht0 [rbx+rcx]
+
+ lea rcx, [rdx+rdx*2]
+ prefetcht0 [rdi]
+ prefetcht0 [rdi+rdx]
+ prefetcht0 [rdi+rdx*2]
+ prefetcht0 [rdi+rcx]
+ lea rbx, [rdi+rdx*4]
+ prefetcht0 [rbx]
+ prefetcht0 [rbx+rdx]
+ prefetcht0 [rbx+rdx*2]
+ prefetcht0 [rbx+rcx]
+
+ pxor xmm0, xmm0 ; clear xmm0 for unpack
+ pxor xmm7, xmm7 ; clear xmm7 for accumulating diffs
+
+ pxor xmm6, xmm6 ; clear xmm6 for accumulating sse
+ mov rcx, 16
+
+.var16loop:
+ movdqu xmm1, XMMWORD PTR [rsi]
+ movdqu xmm2, XMMWORD PTR [rdi]
+
+ prefetcht0 [rsi+rax*8]
+ prefetcht0 [rdi+rdx*8]
+
+ movdqa xmm3, xmm1
+ movdqa xmm4, xmm2
+
+
+ punpcklbw xmm1, xmm0
+ punpckhbw xmm3, xmm0
+
+ punpcklbw xmm2, xmm0
+ punpckhbw xmm4, xmm0
+
+
+ psubw xmm1, xmm2
+ psubw xmm3, xmm4
+
+ paddw xmm7, xmm1
+ pmaddwd xmm1, xmm1
+
+ paddw xmm7, xmm3
+ pmaddwd xmm3, xmm3
+
+ paddd xmm6, xmm1
+ paddd xmm6, xmm3
+
+ add rsi, rax
+ add rdi, rdx
+
+ sub rcx, 1
+ jnz .var16loop
+
+
+ movdqa xmm1, xmm6
+ pxor xmm6, xmm6
+
+ pxor xmm5, xmm5
+ punpcklwd xmm6, xmm7
+
+ punpckhwd xmm5, xmm7
+ psrad xmm5, 16
+
+ psrad xmm6, 16
+ paddd xmm6, xmm5
+
+ movdqa xmm2, xmm1
+ punpckldq xmm1, xmm0
+
+ punpckhdq xmm2, xmm0
+ movdqa xmm7, xmm6
+
+ paddd xmm1, xmm2
+ punpckldq xmm6, xmm0
+
+ punpckhdq xmm7, xmm0
+ paddd xmm6, xmm7
+
+ movdqa xmm2, xmm1
+ movdqa xmm7, xmm6
+
+ psrldq xmm1, 8
+ psrldq xmm6, 8
+
+ paddd xmm7, xmm6
+ paddd xmm1, xmm2
+
+ mov rax, arg(5) ;[Sum]
+ mov rdi, arg(4) ;[SSE]
+
+ movd DWORD PTR [rax], xmm7
+ movd DWORD PTR [rdi], xmm1
+
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ pop rbx
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+
+
+;unsigned int vp9_get8x8var_sse2
+;(
+; unsigned char * src_ptr,
+; int source_stride,
+; unsigned char * ref_ptr,
+; int recon_stride,
+; unsigned int * SSE,
+; int * Sum
+;)
+global sym(vp9_get8x8var_sse2) PRIVATE
+sym(vp9_get8x8var_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ sub rsp, 16
+ ; end prolog
+
+ mov rsi, arg(0) ;[src_ptr]
+ mov rdi, arg(2) ;[ref_ptr]
+
+ movsxd rax, DWORD PTR arg(1) ;[source_stride]
+ movsxd rdx, DWORD PTR arg(3) ;[recon_stride]
+
+ pxor xmm0, xmm0 ; clear xmm0 for unpack
+ pxor xmm7, xmm7 ; clear xmm7 for accumulating diffs
+
+ movq xmm1, QWORD PTR [rsi]
+ movq xmm2, QWORD PTR [rdi]
+
+ punpcklbw xmm1, xmm0
+ punpcklbw xmm2, xmm0
+
+ psubsw xmm1, xmm2
+ paddw xmm7, xmm1
+
+ pmaddwd xmm1, xmm1
+
+ movq xmm2, QWORD PTR[rsi + rax]
+ movq xmm3, QWORD PTR[rdi + rdx]
+
+ punpcklbw xmm2, xmm0
+ punpcklbw xmm3, xmm0
+
+ psubsw xmm2, xmm3
+ paddw xmm7, xmm2
+
+ pmaddwd xmm2, xmm2
+ paddd xmm1, xmm2
+
+
+ movq xmm2, QWORD PTR[rsi + rax * 2]
+ movq xmm3, QWORD PTR[rdi + rdx * 2]
+
+ punpcklbw xmm2, xmm0
+ punpcklbw xmm3, xmm0
+
+ psubsw xmm2, xmm3
+ paddw xmm7, xmm2
+
+ pmaddwd xmm2, xmm2
+ paddd xmm1, xmm2
+
+
+ lea rsi, [rsi + rax * 2]
+ lea rdi, [rdi + rdx * 2]
+ movq xmm2, QWORD PTR[rsi + rax]
+ movq xmm3, QWORD PTR[rdi + rdx]
+
+ punpcklbw xmm2, xmm0
+ punpcklbw xmm3, xmm0
+
+ psubsw xmm2, xmm3
+ paddw xmm7, xmm2
+
+ pmaddwd xmm2, xmm2
+ paddd xmm1, xmm2
+
+ movq xmm2, QWORD PTR[rsi + rax *2]
+ movq xmm3, QWORD PTR[rdi + rdx *2]
+
+ punpcklbw xmm2, xmm0
+ punpcklbw xmm3, xmm0
+
+ psubsw xmm2, xmm3
+ paddw xmm7, xmm2
+
+ pmaddwd xmm2, xmm2
+ paddd xmm1, xmm2
+
+
+ lea rsi, [rsi + rax * 2]
+ lea rdi, [rdi + rdx * 2]
+
+
+ movq xmm2, QWORD PTR[rsi + rax]
+ movq xmm3, QWORD PTR[rdi + rdx]
+
+ punpcklbw xmm2, xmm0
+ punpcklbw xmm3, xmm0
+
+ psubsw xmm2, xmm3
+ paddw xmm7, xmm2
+
+ pmaddwd xmm2, xmm2
+ paddd xmm1, xmm2
+
+ movq xmm2, QWORD PTR[rsi + rax *2]
+ movq xmm3, QWORD PTR[rdi + rdx *2]
+
+ punpcklbw xmm2, xmm0
+ punpcklbw xmm3, xmm0
+
+ psubsw xmm2, xmm3
+ paddw xmm7, xmm2
+
+ pmaddwd xmm2, xmm2
+ paddd xmm1, xmm2
+
+
+ lea rsi, [rsi + rax * 2]
+ lea rdi, [rdi + rdx * 2]
+
+ movq xmm2, QWORD PTR[rsi + rax]
+ movq xmm3, QWORD PTR[rdi + rdx]
+
+ punpcklbw xmm2, xmm0
+ punpcklbw xmm3, xmm0
+
+ psubsw xmm2, xmm3
+ paddw xmm7, xmm2
+
+ pmaddwd xmm2, xmm2
+ paddd xmm1, xmm2
+
+
+ movdqa xmm6, xmm7
+ punpcklwd xmm6, xmm0
+
+ punpckhwd xmm7, xmm0
+ movdqa xmm2, xmm1
+
+ paddw xmm6, xmm7
+ punpckldq xmm1, xmm0
+
+ punpckhdq xmm2, xmm0
+ movdqa xmm7, xmm6
+
+ paddd xmm1, xmm2
+ punpckldq xmm6, xmm0
+
+ punpckhdq xmm7, xmm0
+ paddw xmm6, xmm7
+
+ movdqa xmm2, xmm1
+ movdqa xmm7, xmm6
+
+ psrldq xmm1, 8
+ psrldq xmm6, 8
+
+ paddw xmm7, xmm6
+ paddd xmm1, xmm2
+
+ mov rax, arg(5) ;[Sum]
+ mov rdi, arg(4) ;[SSE]
+
+ movq rdx, xmm7
+ movsx rcx, dx
+
+ mov dword ptr [rax], ecx
+ movd DWORD PTR [rdi], xmm1
+
+ ; begin epilog
+ add rsp, 16
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp9_half_horiz_vert_variance8x_h_sse2
+;(
+; unsigned char *ref_ptr,
+; int ref_pixels_per_line,
+; unsigned char *src_ptr,
+; int src_pixels_per_line,
+; unsigned int Height,
+; int *sum,
+; unsigned int *sumsquared
+;)
+global sym(vp9_half_horiz_vert_variance8x_h_sse2) PRIVATE
+sym(vp9_half_horiz_vert_variance8x_h_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 7
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+%if ABI_IS_32BIT=0
+ movsxd r8, dword ptr arg(1) ;ref_pixels_per_line
+ movsxd r9, dword ptr arg(3) ;src_pixels_per_line
+%endif
+
+ pxor xmm6, xmm6 ; error accumulator
+ pxor xmm7, xmm7 ; sse eaccumulator
+ mov rsi, arg(0) ;ref_ptr ;
+
+ mov rdi, arg(2) ;src_ptr ;
+ movsxd rcx, dword ptr arg(4) ;Height ;
+ movsxd rax, dword ptr arg(1) ;ref_pixels_per_line
+
+ pxor xmm0, xmm0 ;
+
+ movq xmm5, QWORD PTR [rsi] ; xmm5 = s0,s1,s2..s8
+ movq xmm3, QWORD PTR [rsi+1] ; xmm3 = s1,s2,s3..s9
+ pavgb xmm5, xmm3 ; xmm5 = avg(xmm1,xmm3) horizontal line 1
+
+%if ABI_IS_32BIT
+ add rsi, dword ptr arg(1) ;ref_pixels_per_line ; next source
+%else
+ add rsi, r8
+%endif
+
+.half_horiz_vert_variance8x_h_1:
+
+ movq xmm1, QWORD PTR [rsi] ;
+ movq xmm2, QWORD PTR [rsi+1] ;
+ pavgb xmm1, xmm2 ; xmm1 = avg(xmm1,xmm3) horizontal line i+1
+
+ pavgb xmm5, xmm1 ; xmm = vertical average of the above
+ punpcklbw xmm5, xmm0 ; xmm5 = words of above
+
+ movq xmm3, QWORD PTR [rdi] ; xmm3 = d0,d1,d2..d8
+ punpcklbw xmm3, xmm0 ; xmm3 = words of above
+
+ psubw xmm5, xmm3 ; xmm5 -= xmm3
+ paddw xmm6, xmm5 ; xmm6 += accumulated column differences
+ pmaddwd xmm5, xmm5 ; xmm5 *= xmm5
+ paddd xmm7, xmm5 ; xmm7 += accumulated square column differences
+
+ movdqa xmm5, xmm1 ; save xmm1 for use on the next row
+
+%if ABI_IS_32BIT
+ add esi, dword ptr arg(1) ;ref_pixels_per_line ; next source
+ add edi, dword ptr arg(3) ;src_pixels_per_line ; next destination
+%else
+ add rsi, r8
+ add rdi, r9
+%endif
+
+ sub rcx, 1 ;
+ jnz .half_horiz_vert_variance8x_h_1 ;
+
+ movdq2q mm6, xmm6 ;
+ movdq2q mm7, xmm7 ;
+
+ psrldq xmm6, 8
+ psrldq xmm7, 8
+
+ movdq2q mm2, xmm6
+ movdq2q mm3, xmm7
+
+ paddw mm6, mm2
+ paddd mm7, mm3
+
+ pxor mm3, mm3 ;
+ pxor mm2, mm2 ;
+
+ punpcklwd mm2, mm6 ;
+ punpckhwd mm3, mm6 ;
+
+ paddd mm2, mm3 ;
+ movq mm6, mm2 ;
+
+ psrlq mm6, 32 ;
+ paddd mm2, mm6 ;
+
+ psrad mm2, 16 ;
+ movq mm4, mm7 ;
+
+ psrlq mm4, 32 ;
+ paddd mm4, mm7 ;
+
+ mov rsi, arg(5) ; sum
+ mov rdi, arg(6) ; sumsquared
+
+ movd [rsi], mm2 ;
+ movd [rdi], mm4 ;
+
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void vp9_half_vert_variance8x_h_sse2
+;(
+; unsigned char *ref_ptr,
+; int ref_pixels_per_line,
+; unsigned char *src_ptr,
+; int src_pixels_per_line,
+; unsigned int Height,
+; int *sum,
+; unsigned int *sumsquared
+;)
+global sym(vp9_half_vert_variance8x_h_sse2) PRIVATE
+sym(vp9_half_vert_variance8x_h_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 7
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+%if ABI_IS_32BIT=0
+ movsxd r8, dword ptr arg(1) ;ref_pixels_per_line
+ movsxd r9, dword ptr arg(3) ;src_pixels_per_line
+%endif
+
+ pxor xmm6, xmm6 ; error accumulator
+ pxor xmm7, xmm7 ; sse eaccumulator
+ mov rsi, arg(0) ;ref_ptr ;
+
+ mov rdi, arg(2) ;src_ptr ;
+ movsxd rcx, dword ptr arg(4) ;Height ;
+ movsxd rax, dword ptr arg(1) ;ref_pixels_per_line
+
+ pxor xmm0, xmm0 ;
+.half_vert_variance8x_h_1:
+ movq xmm5, QWORD PTR [rsi] ; xmm5 = s0,s1,s2..s8
+ movq xmm3, QWORD PTR [rsi+rax] ; xmm3 = s1,s2,s3..s9
+
+ pavgb xmm5, xmm3 ; xmm5 = avg(xmm1,xmm3)
+ punpcklbw xmm5, xmm0 ; xmm5 = words of above
+
+ movq xmm3, QWORD PTR [rdi] ; xmm3 = d0,d1,d2..d8
+ punpcklbw xmm3, xmm0 ; xmm3 = words of above
+
+ psubw xmm5, xmm3 ; xmm5 -= xmm3
+ paddw xmm6, xmm5 ; xmm6 += accumulated column differences
+ pmaddwd xmm5, xmm5 ; xmm5 *= xmm5
+ paddd xmm7, xmm5 ; xmm7 += accumulated square column differences
+
+%if ABI_IS_32BIT
+ add esi, dword ptr arg(1) ;ref_pixels_per_line ; next source
+ add edi, dword ptr arg(3) ;src_pixels_per_line ; next destination
+%else
+ add rsi, r8
+ add rdi, r9
+%endif
+
+ sub rcx, 1 ;
+ jnz .half_vert_variance8x_h_1 ;
+
+ movdq2q mm6, xmm6 ;
+ movdq2q mm7, xmm7 ;
+
+ psrldq xmm6, 8
+ psrldq xmm7, 8
+
+ movdq2q mm2, xmm6
+ movdq2q mm3, xmm7
+
+ paddw mm6, mm2
+ paddd mm7, mm3
+
+ pxor mm3, mm3 ;
+ pxor mm2, mm2 ;
+
+ punpcklwd mm2, mm6 ;
+ punpckhwd mm3, mm6 ;
+
+ paddd mm2, mm3 ;
+ movq mm6, mm2 ;
+
+ psrlq mm6, 32 ;
+ paddd mm2, mm6 ;
+
+ psrad mm2, 16 ;
+ movq mm4, mm7 ;
+
+ psrlq mm4, 32 ;
+ paddd mm4, mm7 ;
+
+ mov rsi, arg(5) ; sum
+ mov rdi, arg(6) ; sumsquared
+
+ movd [rsi], mm2 ;
+ movd [rdi], mm4 ;
+
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+;void vp9_half_horiz_variance8x_h_sse2
+;(
+; unsigned char *ref_ptr,
+; int ref_pixels_per_line,
+; unsigned char *src_ptr,
+; int src_pixels_per_line,
+; unsigned int Height,
+; int *sum,
+; unsigned int *sumsquared
+;)
+global sym(vp9_half_horiz_variance8x_h_sse2) PRIVATE
+sym(vp9_half_horiz_variance8x_h_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 7
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+%if ABI_IS_32BIT=0
+ movsxd r8, dword ptr arg(1) ;ref_pixels_per_line
+ movsxd r9, dword ptr arg(3) ;src_pixels_per_line
+%endif
+
+ pxor xmm6, xmm6 ; error accumulator
+ pxor xmm7, xmm7 ; sse eaccumulator
+ mov rsi, arg(0) ;ref_ptr ;
+
+ mov rdi, arg(2) ;src_ptr ;
+ movsxd rcx, dword ptr arg(4) ;Height ;
+
+ pxor xmm0, xmm0 ;
+.half_horiz_variance8x_h_1:
+ movq xmm5, QWORD PTR [rsi] ; xmm5 = s0,s1,s2..s8
+ movq xmm3, QWORD PTR [rsi+1] ; xmm3 = s1,s2,s3..s9
+
+ pavgb xmm5, xmm3 ; xmm5 = avg(xmm1,xmm3)
+ punpcklbw xmm5, xmm0 ; xmm5 = words of above
+
+ movq xmm3, QWORD PTR [rdi] ; xmm3 = d0,d1,d2..d8
+ punpcklbw xmm3, xmm0 ; xmm3 = words of above
+
+ psubw xmm5, xmm3 ; xmm5 -= xmm3
+ paddw xmm6, xmm5 ; xmm6 += accumulated column differences
+ pmaddwd xmm5, xmm5 ; xmm5 *= xmm5
+ paddd xmm7, xmm5 ; xmm7 += accumulated square column differences
+
+%if ABI_IS_32BIT
+ add esi, dword ptr arg(1) ;ref_pixels_per_line ; next source
+ add edi, dword ptr arg(3) ;src_pixels_per_line ; next destination
+%else
+ add rsi, r8
+ add rdi, r9
+%endif
+ sub rcx, 1 ;
+ jnz .half_horiz_variance8x_h_1 ;
+
+ movdq2q mm6, xmm6 ;
+ movdq2q mm7, xmm7 ;
+
+ psrldq xmm6, 8
+ psrldq xmm7, 8
+
+ movdq2q mm2, xmm6
+ movdq2q mm3, xmm7
+
+ paddw mm6, mm2
+ paddd mm7, mm3
+
+ pxor mm3, mm3 ;
+ pxor mm2, mm2 ;
+
+ punpcklwd mm2, mm6 ;
+ punpckhwd mm3, mm6 ;
+
+ paddd mm2, mm3 ;
+ movq mm6, mm2 ;
+
+ psrlq mm6, 32 ;
+ paddd mm2, mm6 ;
+
+ psrad mm2, 16 ;
+ movq mm4, mm7 ;
+
+ psrlq mm4, 32 ;
+ paddd mm4, mm7 ;
+
+ mov rsi, arg(5) ; sum
+ mov rdi, arg(6) ; sumsquared
+
+ movd [rsi], mm2 ;
+ movd [rdi], mm4 ;
+
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+SECTION_RODATA
+; short xmm_bi_rd[8] = { 64, 64, 64, 64,64, 64, 64, 64};
+align 16
+xmm_bi_rd:
+ times 8 dw 64
+align 16
+bilinear_filters_sse2:
+ dw 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0
+ dw 120, 120, 120, 120, 120, 120, 120, 120, 8, 8, 8, 8, 8, 8, 8, 8
+ dw 112, 112, 112, 112, 112, 112, 112, 112, 16, 16, 16, 16, 16, 16, 16, 16
+ dw 104, 104, 104, 104, 104, 104, 104, 104, 24, 24, 24, 24, 24, 24, 24, 24
+ dw 96, 96, 96, 96, 96, 96, 96, 96, 32, 32, 32, 32, 32, 32, 32, 32
+ dw 88, 88, 88, 88, 88, 88, 88, 88, 40, 40, 40, 40, 40, 40, 40, 40
+ dw 80, 80, 80, 80, 80, 80, 80, 80, 48, 48, 48, 48, 48, 48, 48, 48
+ dw 72, 72, 72, 72, 72, 72, 72, 72, 56, 56, 56, 56, 56, 56, 56, 56
+ dw 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64
+ dw 56, 56, 56, 56, 56, 56, 56, 56, 72, 72, 72, 72, 72, 72, 72, 72
+ dw 48, 48, 48, 48, 48, 48, 48, 48, 80, 80, 80, 80, 80, 80, 80, 80
+ dw 40, 40, 40, 40, 40, 40, 40, 40, 88, 88, 88, 88, 88, 88, 88, 88
+ dw 32, 32, 32, 32, 32, 32, 32, 32, 96, 96, 96, 96, 96, 96, 96, 96
+ dw 24, 24, 24, 24, 24, 24, 24, 24, 104, 104, 104, 104, 104, 104, 104, 104
+ dw 16, 16, 16, 16, 16, 16, 16, 16, 112, 112, 112, 112, 112, 112, 112, 112
+ dw 8, 8, 8, 8, 8, 8, 8, 8, 120, 120, 120, 120, 120, 120, 120, 120
diff --git a/libvpx/vp9/encoder/x86/vp9_variance_impl_ssse3.asm b/libvpx/vp9/encoder/x86/vp9_variance_impl_ssse3.asm
new file mode 100644
index 0000000..98a4a16
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_variance_impl_ssse3.asm
@@ -0,0 +1,372 @@
+;
+; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+%define xmm_filter_shift 7
+
+
+;void vp9_filter_block2d_bil_var_ssse3
+;(
+; unsigned char *ref_ptr,
+; int ref_pixels_per_line,
+; unsigned char *src_ptr,
+; int src_pixels_per_line,
+; unsigned int Height,
+; int xoffset,
+; int yoffset,
+; int *sum,
+; unsigned int *sumsquared;;
+;
+;)
+;Note: The filter coefficient at offset=0 is 128. Since the second register
+;for Pmaddubsw is signed bytes, we must calculate zero offset seperately.
+global sym(vp9_filter_block2d_bil_var_ssse3) PRIVATE
+sym(vp9_filter_block2d_bil_var_ssse3):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 9
+ SAVE_XMM 7
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ pxor xmm6, xmm6
+ pxor xmm7, xmm7
+
+ lea rcx, [GLOBAL(bilinear_filters_ssse3)]
+ movsxd rax, dword ptr arg(5) ; xoffset
+
+ cmp rax, 0 ; skip first_pass filter if xoffset=0
+ je .filter_block2d_bil_var_ssse3_sp_only
+
+ shl rax, 4 ; point to filter coeff with xoffset
+ lea rax, [rax + rcx] ; HFilter
+
+ movsxd rdx, dword ptr arg(6) ; yoffset
+
+ cmp rdx, 0 ; skip second_pass filter if yoffset=0
+ je .filter_block2d_bil_var_ssse3_fp_only
+
+ shl rdx, 4
+ lea rdx, [rdx + rcx] ; VFilter
+
+ mov rsi, arg(0) ;ref_ptr
+ mov rdi, arg(2) ;src_ptr
+ movsxd rcx, dword ptr arg(4) ;Height
+
+ movdqu xmm0, XMMWORD PTR [rsi]
+ movdqu xmm1, XMMWORD PTR [rsi+1]
+ movdqa xmm2, xmm0
+
+ punpcklbw xmm0, xmm1
+ punpckhbw xmm2, xmm1
+ pmaddubsw xmm0, [rax]
+ pmaddubsw xmm2, [rax]
+
+ paddw xmm0, [GLOBAL(xmm_bi_rd)]
+ paddw xmm2, [GLOBAL(xmm_bi_rd)]
+ psraw xmm0, xmm_filter_shift
+ psraw xmm2, xmm_filter_shift
+
+ packuswb xmm0, xmm2
+
+%if ABI_IS_32BIT
+ add rsi, dword ptr arg(1) ;ref_pixels_per_line
+%else
+ movsxd r8, dword ptr arg(1) ;ref_pixels_per_line
+ movsxd r9, dword ptr arg(3) ;src_pixels_per_line
+ lea rsi, [rsi + r8]
+%endif
+
+.filter_block2d_bil_var_ssse3_loop:
+ movdqu xmm1, XMMWORD PTR [rsi]
+ movdqu xmm2, XMMWORD PTR [rsi+1]
+ movdqa xmm3, xmm1
+
+ punpcklbw xmm1, xmm2
+ punpckhbw xmm3, xmm2
+ pmaddubsw xmm1, [rax]
+ pmaddubsw xmm3, [rax]
+
+ paddw xmm1, [GLOBAL(xmm_bi_rd)]
+ paddw xmm3, [GLOBAL(xmm_bi_rd)]
+ psraw xmm1, xmm_filter_shift
+ psraw xmm3, xmm_filter_shift
+ packuswb xmm1, xmm3
+
+ movdqa xmm2, xmm0
+ movdqa xmm0, xmm1
+ movdqa xmm3, xmm2
+
+ punpcklbw xmm2, xmm1
+ punpckhbw xmm3, xmm1
+ pmaddubsw xmm2, [rdx]
+ pmaddubsw xmm3, [rdx]
+
+ paddw xmm2, [GLOBAL(xmm_bi_rd)]
+ paddw xmm3, [GLOBAL(xmm_bi_rd)]
+ psraw xmm2, xmm_filter_shift
+ psraw xmm3, xmm_filter_shift
+
+ movq xmm1, QWORD PTR [rdi]
+ pxor xmm4, xmm4
+ punpcklbw xmm1, xmm4
+ movq xmm5, QWORD PTR [rdi+8]
+ punpcklbw xmm5, xmm4
+
+ psubw xmm2, xmm1
+ psubw xmm3, xmm5
+ paddw xmm6, xmm2
+ paddw xmm6, xmm3
+ pmaddwd xmm2, xmm2
+ pmaddwd xmm3, xmm3
+ paddd xmm7, xmm2
+ paddd xmm7, xmm3
+
+%if ABI_IS_32BIT
+ add rsi, dword ptr arg(1) ;ref_pixels_per_line
+ add rdi, dword ptr arg(3) ;src_pixels_per_line
+%else
+ lea rsi, [rsi + r8]
+ lea rdi, [rdi + r9]
+%endif
+
+ sub rcx, 1
+ jnz .filter_block2d_bil_var_ssse3_loop
+
+ jmp .filter_block2d_bil_variance
+
+.filter_block2d_bil_var_ssse3_sp_only:
+ movsxd rdx, dword ptr arg(6) ; yoffset
+
+ cmp rdx, 0 ; Both xoffset =0 and yoffset=0
+ je .filter_block2d_bil_var_ssse3_full_pixel
+
+ shl rdx, 4
+ lea rdx, [rdx + rcx] ; VFilter
+
+ mov rsi, arg(0) ;ref_ptr
+ mov rdi, arg(2) ;src_ptr
+ movsxd rcx, dword ptr arg(4) ;Height
+ movsxd rax, dword ptr arg(1) ;ref_pixels_per_line
+
+ movdqu xmm1, XMMWORD PTR [rsi]
+ movdqa xmm0, xmm1
+
+%if ABI_IS_32BIT=0
+ movsxd r9, dword ptr arg(3) ;src_pixels_per_line
+%endif
+
+ lea rsi, [rsi + rax]
+
+.filter_block2d_bil_sp_only_loop:
+ movdqu xmm3, XMMWORD PTR [rsi]
+ movdqa xmm2, xmm1
+ movdqa xmm0, xmm3
+
+ punpcklbw xmm1, xmm3
+ punpckhbw xmm2, xmm3
+ pmaddubsw xmm1, [rdx]
+ pmaddubsw xmm2, [rdx]
+
+ paddw xmm1, [GLOBAL(xmm_bi_rd)]
+ paddw xmm2, [GLOBAL(xmm_bi_rd)]
+ psraw xmm1, xmm_filter_shift
+ psraw xmm2, xmm_filter_shift
+
+ movq xmm3, QWORD PTR [rdi]
+ pxor xmm4, xmm4
+ punpcklbw xmm3, xmm4
+ movq xmm5, QWORD PTR [rdi+8]
+ punpcklbw xmm5, xmm4
+
+ psubw xmm1, xmm3
+ psubw xmm2, xmm5
+ paddw xmm6, xmm1
+ paddw xmm6, xmm2
+ pmaddwd xmm1, xmm1
+ pmaddwd xmm2, xmm2
+ paddd xmm7, xmm1
+ paddd xmm7, xmm2
+
+ movdqa xmm1, xmm0
+ lea rsi, [rsi + rax] ;ref_pixels_per_line
+
+%if ABI_IS_32BIT
+ add rdi, dword ptr arg(3) ;src_pixels_per_line
+%else
+ lea rdi, [rdi + r9]
+%endif
+
+ sub rcx, 1
+ jnz .filter_block2d_bil_sp_only_loop
+
+ jmp .filter_block2d_bil_variance
+
+.filter_block2d_bil_var_ssse3_full_pixel:
+ mov rsi, arg(0) ;ref_ptr
+ mov rdi, arg(2) ;src_ptr
+ movsxd rcx, dword ptr arg(4) ;Height
+ movsxd rax, dword ptr arg(1) ;ref_pixels_per_line
+ movsxd rdx, dword ptr arg(3) ;src_pixels_per_line
+ pxor xmm0, xmm0
+
+.filter_block2d_bil_full_pixel_loop:
+ movq xmm1, QWORD PTR [rsi]
+ punpcklbw xmm1, xmm0
+ movq xmm2, QWORD PTR [rsi+8]
+ punpcklbw xmm2, xmm0
+
+ movq xmm3, QWORD PTR [rdi]
+ punpcklbw xmm3, xmm0
+ movq xmm4, QWORD PTR [rdi+8]
+ punpcklbw xmm4, xmm0
+
+ psubw xmm1, xmm3
+ psubw xmm2, xmm4
+ paddw xmm6, xmm1
+ paddw xmm6, xmm2
+ pmaddwd xmm1, xmm1
+ pmaddwd xmm2, xmm2
+ paddd xmm7, xmm1
+ paddd xmm7, xmm2
+
+ lea rsi, [rsi + rax] ;ref_pixels_per_line
+ lea rdi, [rdi + rdx] ;src_pixels_per_line
+ sub rcx, 1
+ jnz .filter_block2d_bil_full_pixel_loop
+
+ jmp .filter_block2d_bil_variance
+
+.filter_block2d_bil_var_ssse3_fp_only:
+ mov rsi, arg(0) ;ref_ptr
+ mov rdi, arg(2) ;src_ptr
+ movsxd rcx, dword ptr arg(4) ;Height
+ movsxd rdx, dword ptr arg(1) ;ref_pixels_per_line
+
+ pxor xmm0, xmm0
+
+%if ABI_IS_32BIT=0
+ movsxd r9, dword ptr arg(3) ;src_pixels_per_line
+%endif
+
+.filter_block2d_bil_fp_only_loop:
+ movdqu xmm1, XMMWORD PTR [rsi]
+ movdqu xmm2, XMMWORD PTR [rsi+1]
+ movdqa xmm3, xmm1
+
+ punpcklbw xmm1, xmm2
+ punpckhbw xmm3, xmm2
+ pmaddubsw xmm1, [rax]
+ pmaddubsw xmm3, [rax]
+
+ paddw xmm1, [GLOBAL(xmm_bi_rd)]
+ paddw xmm3, [GLOBAL(xmm_bi_rd)]
+ psraw xmm1, xmm_filter_shift
+ psraw xmm3, xmm_filter_shift
+
+ movq xmm2, XMMWORD PTR [rdi]
+ pxor xmm4, xmm4
+ punpcklbw xmm2, xmm4
+ movq xmm5, QWORD PTR [rdi+8]
+ punpcklbw xmm5, xmm4
+
+ psubw xmm1, xmm2
+ psubw xmm3, xmm5
+ paddw xmm6, xmm1
+ paddw xmm6, xmm3
+ pmaddwd xmm1, xmm1
+ pmaddwd xmm3, xmm3
+ paddd xmm7, xmm1
+ paddd xmm7, xmm3
+
+ lea rsi, [rsi + rdx]
+%if ABI_IS_32BIT
+ add rdi, dword ptr arg(3) ;src_pixels_per_line
+%else
+ lea rdi, [rdi + r9]
+%endif
+
+ sub rcx, 1
+ jnz .filter_block2d_bil_fp_only_loop
+
+ jmp .filter_block2d_bil_variance
+
+.filter_block2d_bil_variance:
+ pxor xmm0, xmm0
+ pxor xmm1, xmm1
+ pxor xmm5, xmm5
+
+ punpcklwd xmm0, xmm6
+ punpckhwd xmm1, xmm6
+ psrad xmm0, 16
+ psrad xmm1, 16
+ paddd xmm0, xmm1
+ movdqa xmm1, xmm0
+
+ movdqa xmm6, xmm7
+ punpckldq xmm6, xmm5
+ punpckhdq xmm7, xmm5
+ paddd xmm6, xmm7
+
+ punpckldq xmm0, xmm5
+ punpckhdq xmm1, xmm5
+ paddd xmm0, xmm1
+
+ movdqa xmm7, xmm6
+ movdqa xmm1, xmm0
+
+ psrldq xmm7, 8
+ psrldq xmm1, 8
+
+ paddd xmm6, xmm7
+ paddd xmm0, xmm1
+
+ mov rsi, arg(7) ;[Sum]
+ mov rdi, arg(8) ;[SSE]
+
+ movd [rsi], xmm0
+ movd [rdi], xmm6
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+
+SECTION_RODATA
+align 16
+xmm_bi_rd:
+ times 8 dw 64
+align 16
+bilinear_filters_ssse3:
+ times 8 db 128, 0
+ times 8 db 120, 8
+ times 8 db 112, 16
+ times 8 db 104, 24
+ times 8 db 96, 32
+ times 8 db 88, 40
+ times 8 db 80, 48
+ times 8 db 72, 56
+ times 8 db 64, 64
+ times 8 db 56, 72
+ times 8 db 48, 80
+ times 8 db 40, 88
+ times 8 db 32, 96
+ times 8 db 24, 104
+ times 8 db 16, 112
+ times 8 db 8, 120
diff --git a/libvpx/vp9/encoder/x86/vp9_variance_mmx.c b/libvpx/vp9/encoder/x86/vp9_variance_mmx.c
new file mode 100644
index 0000000..bad1cfa
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_variance_mmx.c
@@ -0,0 +1,382 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vp9/encoder/vp9_variance.h"
+#include "vp9/common/vp9_pragmas.h"
+#include "vpx_ports/mem.h"
+
+extern void filter_block1d_h6_mmx
+(
+ const unsigned char *src_ptr,
+ unsigned short *output_ptr,
+ unsigned int src_pixels_per_line,
+ unsigned int pixel_step,
+ unsigned int output_height,
+ unsigned int output_width,
+ short *vp7_filter
+);
+extern void filter_block1d_v6_mmx
+(
+ const short *src_ptr,
+ unsigned char *output_ptr,
+ unsigned int pixels_per_line,
+ unsigned int pixel_step,
+ unsigned int output_height,
+ unsigned int output_width,
+ short *vp7_filter
+);
+
+extern unsigned int vp9_get_mb_ss_mmx(const short *src_ptr);
+extern unsigned int vp9_get8x8var_mmx
+(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *SSE,
+ int *Sum
+);
+extern unsigned int vp9_get4x4var_mmx
+(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *SSE,
+ int *Sum
+);
+extern void vp9_filter_block2d_bil4x4_var_mmx
+(
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ const short *HFilter,
+ const short *VFilter,
+ int *sum,
+ unsigned int *sumsquared
+);
+extern void vp9_filter_block2d_bil_var_mmx
+(
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ const short *HFilter,
+ const short *VFilter,
+ int *sum,
+ unsigned int *sumsquared
+);
+
+
+unsigned int vp9_variance4x4_mmx(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ vp9_get4x4var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 4));
+
+}
+
+unsigned int vp9_variance8x8_mmx(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg);
+ *sse = var;
+
+ return (var - (((unsigned int)avg * avg) >> 6));
+
+}
+
+unsigned int vp9_mse16x16_mmx(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int sse0, sse1, sse2, sse3, var;
+ int sum0, sum1, sum2, sum3;
+
+
+ vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
+ vp9_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
+ vp9_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2);
+ vp9_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride, ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3);
+
+ var = sse0 + sse1 + sse2 + sse3;
+ *sse = var;
+ return var;
+}
+
+
+unsigned int vp9_variance16x16_mmx(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int sse0, sse1, sse2, sse3, var;
+ int sum0, sum1, sum2, sum3, avg;
+
+
+ vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
+ vp9_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
+ vp9_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2);
+ vp9_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride, ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3);
+
+ var = sse0 + sse1 + sse2 + sse3;
+ avg = sum0 + sum1 + sum2 + sum3;
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 8));
+}
+
+unsigned int vp9_variance16x8_mmx(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int sse0, sse1, var;
+ int sum0, sum1, avg;
+
+ vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
+ vp9_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
+
+ var = sse0 + sse1;
+ avg = sum0 + sum1;
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 7));
+
+}
+
+
+unsigned int vp9_variance8x16_mmx(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int sse0, sse1, var;
+ int sum0, sum1, avg;
+
+ vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0);
+ vp9_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse1, &sum1);
+
+ var = sse0 + sse1;
+ avg = sum0 + sum1;
+ *sse = var;
+
+ return (var - (((unsigned int)avg * avg) >> 7));
+
+}
+
+DECLARE_ALIGNED(16, extern const short, vp9_bilinear_filters_mmx[16][8]);
+
+unsigned int vp9_sub_pixel_variance4x4_mmx
+(
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse)
+
+{
+ int xsum;
+ unsigned int xxsum;
+ vp9_filter_block2d_bil4x4_var_mmx(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line,
+ vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset],
+ &xsum, &xxsum
+ );
+ *sse = xxsum;
+ return (xxsum - (((unsigned int)xsum * xsum) >> 4));
+}
+
+
+unsigned int vp9_sub_pixel_variance8x8_mmx
+(
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+
+ int xsum;
+ unsigned int xxsum;
+ vp9_filter_block2d_bil_var_mmx(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset],
+ &xsum, &xxsum
+ );
+ *sse = xxsum;
+ return (xxsum - (((unsigned int)xsum * xsum) >> 6));
+}
+
+unsigned int vp9_sub_pixel_variance16x16_mmx
+(
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+
+ int xsum0, xsum1;
+ unsigned int xxsum0, xxsum1;
+
+ vp9_filter_block2d_bil_var_mmx(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset],
+ &xsum0, &xxsum0
+ );
+
+ vp9_filter_block2d_bil_var_mmx(
+ src_ptr + 8, src_pixels_per_line,
+ dst_ptr + 8, dst_pixels_per_line, 16,
+ vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset],
+ &xsum1, &xxsum1
+ );
+
+ xsum0 += xsum1;
+ xxsum0 += xxsum1;
+
+ *sse = xxsum0;
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
+
+
+}
+
+unsigned int vp9_sub_pixel_mse16x16_mmx(
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+ vp9_sub_pixel_variance16x16_mmx(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
+ return *sse;
+}
+
+unsigned int vp9_sub_pixel_variance16x8_mmx
+(
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+ int xsum0, xsum1;
+ unsigned int xxsum0, xxsum1;
+
+
+ vp9_filter_block2d_bil_var_mmx(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset],
+ &xsum0, &xxsum0
+ );
+
+
+ vp9_filter_block2d_bil_var_mmx(
+ src_ptr + 8, src_pixels_per_line,
+ dst_ptr + 8, dst_pixels_per_line, 8,
+ vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset],
+ &xsum1, &xxsum1
+ );
+
+ xsum0 += xsum1;
+ xxsum0 += xxsum1;
+
+ *sse = xxsum0;
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 7));
+}
+
+unsigned int vp9_sub_pixel_variance8x16_mmx
+(
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+ int xsum;
+ unsigned int xxsum;
+ vp9_filter_block2d_bil_var_mmx(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset],
+ &xsum, &xxsum
+ );
+ *sse = xxsum;
+ return (xxsum - (((unsigned int)xsum * xsum) >> 7));
+}
+
+
+unsigned int vp9_variance_halfpixvar16x16_h_mmx(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ return vp9_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 8, 0,
+ ref_ptr, recon_stride, sse);
+}
+
+
+unsigned int vp9_variance_halfpixvar16x16_v_mmx(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ return vp9_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 0, 8,
+ ref_ptr, recon_stride, sse);
+}
+
+
+unsigned int vp9_variance_halfpixvar16x16_hv_mmx(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ return vp9_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 8, 8,
+ ref_ptr, recon_stride, sse);
+}
diff --git a/libvpx/vp9/encoder/x86/vp9_variance_sse2.c b/libvpx/vp9/encoder/x86/vp9_variance_sse2.c
new file mode 100644
index 0000000..67ca925
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_variance_sse2.c
@@ -0,0 +1,773 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vp9/encoder/vp9_variance.h"
+#include "vp9/common/vp9_pragmas.h"
+#include "vpx_ports/mem.h"
+
+#define HALFNDX 8
+
+extern void filter_block1d_h6_mmx(const unsigned char *src_ptr, unsigned short *output_ptr, unsigned int src_pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *vp7_filter);
+extern void filter_block1d_v6_mmx(const short *src_ptr, unsigned char *output_ptr, unsigned int pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *vp7_filter);
+extern void filter_block1d8_h6_sse2(const unsigned char *src_ptr, unsigned short *output_ptr, unsigned int src_pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *vp7_filter);
+extern void filter_block1d8_v6_sse2(const short *src_ptr, unsigned char *output_ptr, unsigned int pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *vp7_filter);
+
+extern void vp9_filter_block2d_bil4x4_var_mmx
+(
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ const short *HFilter,
+ const short *VFilter,
+ int *sum,
+ unsigned int *sumsquared
+);
+
+extern unsigned int vp9_get4x4var_mmx
+(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *SSE,
+ int *Sum
+);
+
+unsigned int vp9_get_mb_ss_sse2
+(
+ const short *src_ptr
+);
+unsigned int vp9_get16x16var_sse2
+(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *SSE,
+ int *Sum
+);
+unsigned int vp9_get8x8var_sse2
+(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *SSE,
+ int *Sum
+);
+void vp9_filter_block2d_bil_var_sse2
+(
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int xoffset,
+ int yoffset,
+ int *sum,
+ unsigned int *sumsquared
+);
+void vp9_half_horiz_vert_variance8x_h_sse2
+(
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int *sum,
+ unsigned int *sumsquared
+);
+void vp9_half_horiz_vert_variance16x_h_sse2
+(
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int *sum,
+ unsigned int *sumsquared
+);
+void vp9_half_horiz_variance8x_h_sse2
+(
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int *sum,
+ unsigned int *sumsquared
+);
+void vp9_half_horiz_variance16x_h_sse2
+(
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int *sum,
+ unsigned int *sumsquared
+);
+void vp9_half_vert_variance8x_h_sse2
+(
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int *sum,
+ unsigned int *sumsquared
+);
+void vp9_half_vert_variance16x_h_sse2
+(
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int *sum,
+ unsigned int *sumsquared
+);
+
+DECLARE_ALIGNED(16, extern const short, vp9_bilinear_filters_mmx[16][8]);
+
+typedef unsigned int (*get_var_sse2) (
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *SSE,
+ int *Sum
+);
+
+static void variance_sse2(const unsigned char *src_ptr, int source_stride,
+ const unsigned char *ref_ptr, int recon_stride,
+ int w, int h, unsigned int *sse, int *sum,
+ get_var_sse2 var_fn, int block_size) {
+ unsigned int sse0;
+ int sum0;
+ int i, j;
+
+ *sse = 0;
+ *sum = 0;
+
+ for (i = 0; i < h; i += block_size) {
+ for (j = 0; j < w; j += block_size) {
+ var_fn(src_ptr + source_stride * i + j, source_stride,
+ ref_ptr + recon_stride * i + j, recon_stride, &sse0, &sum0);
+ *sse += sse0;
+ *sum += sum0;
+ }
+ }
+}
+
+unsigned int vp9_variance4x4_sse2(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 4, 4,
+ &var, &avg, vp9_get4x4var_mmx, 4);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 4));
+}
+
+unsigned int vp9_variance8x4_sse2(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 8, 4,
+ &var, &avg, vp9_get4x4var_mmx, 4);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 5));
+}
+
+unsigned int vp9_variance4x8_sse2(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 4, 8,
+ &var, &avg, vp9_get4x4var_mmx, 4);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 5));
+}
+
+unsigned int vp9_variance8x8_sse2
+(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 8, 8,
+ &var, &avg, vp9_get8x8var_sse2, 8);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 6));
+}
+
+unsigned int vp9_variance16x8_sse2
+(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 16, 8,
+ &var, &avg, vp9_get8x8var_sse2, 8);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 7));
+}
+
+unsigned int vp9_variance8x16_sse2
+(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 8, 16,
+ &var, &avg, vp9_get8x8var_sse2, 8);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 7));
+}
+
+unsigned int vp9_variance16x16_sse2
+(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 16, 16,
+ &var, &avg, vp9_get16x16var_sse2, 16);
+ *sse = var;
+ return (var - (((unsigned int)avg * avg) >> 8));
+}
+
+unsigned int vp9_mse16x16_wmt(
+ const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+
+ unsigned int sse0;
+ int sum0;
+ vp9_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0,
+ &sum0);
+ *sse = sse0;
+ return sse0;
+}
+
+unsigned int vp9_variance32x32_sse2(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 32, 32,
+ &var, &avg, vp9_get16x16var_sse2, 16);
+ *sse = var;
+ return (var - (((int64_t)avg * avg) >> 10));
+}
+
+unsigned int vp9_variance32x16_sse2(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 32, 16,
+ &var, &avg, vp9_get16x16var_sse2, 16);
+ *sse = var;
+ return (var - (((int64_t)avg * avg) >> 9));
+}
+
+unsigned int vp9_variance16x32_sse2(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 16, 32,
+ &var, &avg, vp9_get16x16var_sse2, 16);
+ *sse = var;
+ return (var - (((int64_t)avg * avg) >> 9));
+}
+
+unsigned int vp9_variance64x64_sse2(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 64, 64,
+ &var, &avg, vp9_get16x16var_sse2, 16);
+ *sse = var;
+ return (var - (((int64_t)avg * avg) >> 12));
+}
+
+unsigned int vp9_variance64x32_sse2(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 64, 32,
+ &var, &avg, vp9_get16x16var_sse2, 16);
+ *sse = var;
+ return (var - (((int64_t)avg * avg) >> 11));
+}
+
+unsigned int vp9_variance32x64_sse2(const uint8_t *src_ptr,
+ int source_stride,
+ const uint8_t *ref_ptr,
+ int recon_stride,
+ unsigned int *sse) {
+ unsigned int var;
+ int avg;
+
+ variance_sse2(src_ptr, source_stride, ref_ptr, recon_stride, 32, 64,
+ &var, &avg, vp9_get16x16var_sse2, 16);
+ *sse = var;
+ return (var - (((int64_t)avg * avg) >> 11));
+}
+
+unsigned int vp9_sub_pixel_variance4x4_wmt
+(
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+ int xsum;
+ unsigned int xxsum;
+ vp9_filter_block2d_bil4x4_var_mmx(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line,
+ vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset],
+ &xsum, &xxsum
+ );
+ *sse = xxsum;
+ return (xxsum - (((unsigned int)xsum * xsum) >> 4));
+}
+
+
+unsigned int vp9_sub_pixel_variance8x8_wmt
+(
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+ int xsum;
+ unsigned int xxsum;
+
+ if (xoffset == HALFNDX && yoffset == 0) {
+ vp9_half_horiz_variance8x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ &xsum, &xxsum);
+ } else if (xoffset == 0 && yoffset == HALFNDX) {
+ vp9_half_vert_variance8x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ &xsum, &xxsum);
+ } else if (xoffset == HALFNDX && yoffset == HALFNDX) {
+ vp9_half_horiz_vert_variance8x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ &xsum, &xxsum);
+ } else {
+ vp9_filter_block2d_bil_var_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ xoffset, yoffset,
+ &xsum, &xxsum);
+ }
+
+ *sse = xxsum;
+ return (xxsum - (((unsigned int)xsum * xsum) >> 6));
+}
+
+static void sub_pixel_variance16x16_sse2(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse, int *avg) {
+ int xsum0, xsum1;
+ unsigned int xxsum0, xxsum1;
+
+ // note we could avoid these if statements if the calling function
+ // just called the appropriate functions inside.
+ if (xoffset == HALFNDX && yoffset == 0) {
+ vp9_half_horiz_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum0, &xxsum0);
+ } else if (xoffset == 0 && yoffset == HALFNDX) {
+ vp9_half_vert_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum0, &xxsum0);
+ } else if (xoffset == HALFNDX && yoffset == HALFNDX) {
+ vp9_half_horiz_vert_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum0, &xxsum0);
+ } else {
+ vp9_filter_block2d_bil_var_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ xoffset, yoffset,
+ &xsum0, &xxsum0
+ );
+
+ vp9_filter_block2d_bil_var_sse2(
+ src_ptr + 8, src_pixels_per_line,
+ dst_ptr + 8, dst_pixels_per_line, 16,
+ xoffset, yoffset,
+ &xsum1, &xxsum1
+ );
+ xsum0 += xsum1;
+ xxsum0 += xxsum1;
+ }
+
+ *sse = xxsum0;
+ *avg = xsum0;
+}
+
+unsigned int vp9_sub_pixel_variance16x16_sse2(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse_ptr) {
+ int avg;
+ unsigned int sse;
+
+ sub_pixel_variance16x16_sse2(src_ptr, src_pixels_per_line, xoffset,
+ yoffset, dst_ptr, dst_pixels_per_line,
+ &sse, &avg);
+ *sse_ptr = sse;
+
+ return (sse - (((unsigned int) avg * avg) >> 8));
+}
+
+unsigned int vp9_sub_pixel_variance32x32_sse2(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse_ptr) {
+ int avg0, avg1, avg2, avg3;
+ unsigned int sse0, sse1, sse2, sse3;
+
+ sub_pixel_variance16x16_sse2(src_ptr, src_pixels_per_line, xoffset,
+ yoffset, dst_ptr, dst_pixels_per_line,
+ &sse0, &avg0);
+ sub_pixel_variance16x16_sse2(src_ptr + 16, src_pixels_per_line, xoffset,
+ yoffset, dst_ptr + 16, dst_pixels_per_line,
+ &sse1, &avg1);
+ src_ptr += 16 * src_pixels_per_line;
+ dst_ptr += 16 * dst_pixels_per_line;
+ sub_pixel_variance16x16_sse2(src_ptr, src_pixels_per_line, xoffset,
+ yoffset, dst_ptr, dst_pixels_per_line,
+ &sse2, &avg2);
+ sub_pixel_variance16x16_sse2(src_ptr + 16, src_pixels_per_line, xoffset,
+ yoffset, dst_ptr + 16, dst_pixels_per_line,
+ &sse3, &avg3);
+ sse0 += sse1 + sse2 + sse3;
+ avg0 += avg1 + avg2 + avg3;
+ *sse_ptr = sse0;
+
+ return (sse0 - (((unsigned int) avg0 * avg0) >> 10));
+}
+
+unsigned int vp9_sub_pixel_variance64x64_sse2(const uint8_t *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse_ptr) {
+ int avg0, avg1, avg2, avg3, avg4;
+ unsigned int sse0, sse1, sse2, sse3, sse4;
+
+ sub_pixel_variance16x16_sse2(src_ptr, src_pixels_per_line, xoffset,
+ yoffset, dst_ptr, dst_pixels_per_line,
+ &sse0, &avg0);
+ sub_pixel_variance16x16_sse2(src_ptr + 16, src_pixels_per_line, xoffset,
+ yoffset, dst_ptr + 16, dst_pixels_per_line,
+ &sse1, &avg1);
+ sub_pixel_variance16x16_sse2(src_ptr + 32, src_pixels_per_line, xoffset,
+ yoffset, dst_ptr + 32, dst_pixels_per_line,
+ &sse2, &avg2);
+ sub_pixel_variance16x16_sse2(src_ptr + 48, src_pixels_per_line, xoffset,
+ yoffset, dst_ptr + 48, dst_pixels_per_line,
+ &sse3, &avg3);
+ src_ptr += 16 * src_pixels_per_line;
+ dst_ptr += 16 * dst_pixels_per_line;
+ avg0 += avg1 + avg2 + avg3;
+ sse0 += sse1 + sse2 + sse3;
+ sub_pixel_variance16x16_sse2(src_ptr, src_pixels_per_line, xoffset,
+ yoffset, dst_ptr, dst_pixels_per_line,
+ &sse1, &avg1);
+ sub_pixel_variance16x16_sse2(src_ptr + 16, src_pixels_per_line, xoffset,
+ yoffset, dst_ptr + 16, dst_pixels_per_line,
+ &sse2, &avg2);
+ sub_pixel_variance16x16_sse2(src_ptr + 32, src_pixels_per_line, xoffset,
+ yoffset, dst_ptr + 32, dst_pixels_per_line,
+ &sse3, &avg3);
+ sub_pixel_variance16x16_sse2(src_ptr + 48, src_pixels_per_line, xoffset,
+ yoffset, dst_ptr + 48, dst_pixels_per_line,
+ &sse4, &avg4);
+ src_ptr += 16 * src_pixels_per_line;
+ dst_ptr += 16 * dst_pixels_per_line;
+ avg0 += avg1 + avg2 + avg3 + avg4;
+ sse0 += sse1 + sse2 + sse3 + sse4;
+ sub_pixel_variance16x16_sse2(src_ptr, src_pixels_per_line, xoffset,
+ yoffset, dst_ptr, dst_pixels_per_line,
+ &sse1, &avg1);
+ sub_pixel_variance16x16_sse2(src_ptr + 16, src_pixels_per_line, xoffset,
+ yoffset, dst_ptr + 16, dst_pixels_per_line,
+ &sse2, &avg2);
+ sub_pixel_variance16x16_sse2(src_ptr + 32, src_pixels_per_line, xoffset,
+ yoffset, dst_ptr + 32, dst_pixels_per_line,
+ &sse3, &avg3);
+ sub_pixel_variance16x16_sse2(src_ptr + 48, src_pixels_per_line, xoffset,
+ yoffset, dst_ptr + 48, dst_pixels_per_line,
+ &sse4, &avg4);
+ src_ptr += 16 * src_pixels_per_line;
+ dst_ptr += 16 * dst_pixels_per_line;
+ avg0 += avg1 + avg2 + avg3 + avg4;
+ sse0 += sse1 + sse2 + sse3 + sse4;
+ sub_pixel_variance16x16_sse2(src_ptr, src_pixels_per_line, xoffset,
+ yoffset, dst_ptr, dst_pixels_per_line,
+ &sse1, &avg1);
+ sub_pixel_variance16x16_sse2(src_ptr + 16, src_pixels_per_line, xoffset,
+ yoffset, dst_ptr + 16, dst_pixels_per_line,
+ &sse2, &avg2);
+ sub_pixel_variance16x16_sse2(src_ptr + 32, src_pixels_per_line, xoffset,
+ yoffset, dst_ptr + 32, dst_pixels_per_line,
+ &sse3, &avg3);
+ sub_pixel_variance16x16_sse2(src_ptr + 48, src_pixels_per_line, xoffset,
+ yoffset, dst_ptr + 48, dst_pixels_per_line,
+ &sse4, &avg4);
+ avg0 += avg1 + avg2 + avg3 + avg4;
+ sse0 += sse1 + sse2 + sse3 + sse4;
+ *sse_ptr = sse0;
+
+ return (sse0 - (((unsigned int) avg0 * avg0) >> 12));
+}
+
+unsigned int vp9_sub_pixel_mse16x16_sse2(
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+ vp9_sub_pixel_variance16x16_sse2(src_ptr, src_pixels_per_line, xoffset,
+ yoffset, dst_ptr, dst_pixels_per_line, sse);
+ return *sse;
+}
+
+unsigned int vp9_sub_pixel_variance16x8_wmt
+(
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+
+) {
+ int xsum0, xsum1;
+ unsigned int xxsum0, xxsum1;
+
+ if (xoffset == HALFNDX && yoffset == 0) {
+ vp9_half_horiz_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ &xsum0, &xxsum0);
+ } else if (xoffset == 0 && yoffset == HALFNDX) {
+ vp9_half_vert_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ &xsum0, &xxsum0);
+ } else if (xoffset == HALFNDX && yoffset == HALFNDX) {
+ vp9_half_horiz_vert_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ &xsum0, &xxsum0);
+ } else {
+ vp9_filter_block2d_bil_var_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ xoffset, yoffset,
+ &xsum0, &xxsum0);
+
+ vp9_filter_block2d_bil_var_sse2(
+ src_ptr + 8, src_pixels_per_line,
+ dst_ptr + 8, dst_pixels_per_line, 8,
+ xoffset, yoffset,
+ &xsum1, &xxsum1);
+ xsum0 += xsum1;
+ xxsum0 += xxsum1;
+ }
+
+ *sse = xxsum0;
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 7));
+}
+
+unsigned int vp9_sub_pixel_variance8x16_wmt
+(
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+ int xsum;
+ unsigned int xxsum;
+
+ if (xoffset == HALFNDX && yoffset == 0) {
+ vp9_half_horiz_variance8x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum, &xxsum);
+ } else if (xoffset == 0 && yoffset == HALFNDX) {
+ vp9_half_vert_variance8x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum, &xxsum);
+ } else if (xoffset == HALFNDX && yoffset == HALFNDX) {
+ vp9_half_horiz_vert_variance8x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum, &xxsum);
+ } else {
+ vp9_filter_block2d_bil_var_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ xoffset, yoffset,
+ &xsum, &xxsum);
+ }
+
+ *sse = xxsum;
+ return (xxsum - (((unsigned int)xsum * xsum) >> 7));
+}
+
+
+unsigned int vp9_variance_halfpixvar16x16_h_wmt(
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ int xsum0;
+ unsigned int xxsum0;
+
+ vp9_half_horiz_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum0, &xxsum0);
+
+ *sse = xxsum0;
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
+}
+
+
+unsigned int vp9_variance_halfpixvar16x16_v_wmt(
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ int xsum0;
+ unsigned int xxsum0;
+ vp9_half_vert_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum0, &xxsum0);
+
+ *sse = xxsum0;
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
+}
+
+
+unsigned int vp9_variance_halfpixvar16x16_hv_wmt(
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse) {
+ int xsum0;
+ unsigned int xxsum0;
+
+ vp9_half_horiz_vert_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum0, &xxsum0);
+
+ *sse = xxsum0;
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
+}
diff --git a/libvpx/vp9/encoder/x86/vp9_variance_ssse3.c b/libvpx/vp9/encoder/x86/vp9_variance_ssse3.c
new file mode 100644
index 0000000..882acad
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_variance_ssse3.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+#include "vp9/encoder/vp9_variance.h"
+#include "vp9/common/vp9_pragmas.h"
+#include "vpx_ports/mem.h"
+
+#define HALFNDX 8
+
+extern void vp9_half_horiz_vert_variance16x_h_sse2
+(
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int *sum,
+ unsigned int *sumsquared
+);
+extern void vp9_half_horiz_variance16x_h_sse2
+(
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int *sum,
+ unsigned int *sumsquared
+);
+extern void vp9_half_vert_variance16x_h_sse2
+(
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int *sum,
+ unsigned int *sumsquared
+);
+extern void vp9_filter_block2d_bil_var_ssse3
+(
+ const unsigned char *ref_ptr,
+ int ref_pixels_per_line,
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ unsigned int Height,
+ int xoffset,
+ int yoffset,
+ int *sum,
+ unsigned int *sumsquared
+);
+
+unsigned int vp9_sub_pixel_variance16x16_ssse3
+(
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+) {
+ int xsum0;
+ unsigned int xxsum0;
+
+ // note we could avoid these if statements if the calling function
+ // just called the appropriate functions inside.
+ if (xoffset == HALFNDX && yoffset == 0) {
+ vp9_half_horiz_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum0, &xxsum0);
+ } else if (xoffset == 0 && yoffset == HALFNDX) {
+ vp9_half_vert_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum0, &xxsum0);
+ } else if (xoffset == HALFNDX && yoffset == HALFNDX) {
+ vp9_half_horiz_vert_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ &xsum0, &xxsum0);
+ } else {
+ vp9_filter_block2d_bil_var_ssse3(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 16,
+ xoffset, yoffset,
+ &xsum0, &xxsum0);
+ }
+
+ *sse = xxsum0;
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
+}
+
+unsigned int vp9_sub_pixel_variance16x8_ssse3
+(
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+
+) {
+ int xsum0;
+ unsigned int xxsum0;
+
+ if (xoffset == HALFNDX && yoffset == 0) {
+ vp9_half_horiz_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ &xsum0, &xxsum0);
+ } else if (xoffset == 0 && yoffset == HALFNDX) {
+ vp9_half_vert_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ &xsum0, &xxsum0);
+ } else if (xoffset == HALFNDX && yoffset == HALFNDX) {
+ vp9_half_horiz_vert_variance16x_h_sse2(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ &xsum0, &xxsum0);
+ } else {
+ vp9_filter_block2d_bil_var_ssse3(
+ src_ptr, src_pixels_per_line,
+ dst_ptr, dst_pixels_per_line, 8,
+ xoffset, yoffset,
+ &xsum0, &xxsum0);
+ }
+
+ *sse = xxsum0;
+ return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 7));
+}
diff --git a/libvpx/vp9/encoder/x86/vp9_x86_csystemdependent.c b/libvpx/vp9/encoder/x86/vp9_x86_csystemdependent.c
new file mode 100644
index 0000000..6016e14
--- /dev/null
+++ b/libvpx/vp9/encoder/x86/vp9_x86_csystemdependent.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "./vpx_config.h"
+#include "vpx_ports/x86.h"
+#include "vp9/encoder/vp9_variance.h"
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vp9/encoder/x86/vp9_dct_mmx.h"
+
+// TODO(jimbankoski) Consider rewriting the c to take the same values rather
+// than going through these pointer conversions
+#if 0 && HAVE_MMX
+void vp9_short_fdct8x4_mmx(short *input, short *output, int pitch) {
+ vp9_short_fdct4x4_mmx(input, output, pitch);
+ vp9_short_fdct4x4_mmx(input + 4, output + 16, pitch);
+}
+
+void vp9_subtract_b_mmx_impl(unsigned char *z, int src_stride,
+ short *diff, unsigned char *predictor,
+ int pitch);
+void vp9_subtract_b_mmx(BLOCK *be, BLOCKD *bd, int pitch) {
+ unsigned char *z = *(be->base_src) + be->src;
+ unsigned int src_stride = be->src_stride;
+ short *diff = &be->src_diff[0];
+ unsigned char *predictor = *(bd->base_dst) + bd->dst;
+ // TODO(jingning): The prototype function in c has been changed. Need to
+ // modify the mmx and sse versions.
+ vp9_subtract_b_mmx_impl(z, src_stride, diff, predictor, pitch);
+}
+
+#endif
+
+#if 0 && HAVE_SSE2
+void vp9_subtract_b_sse2_impl(unsigned char *z, int src_stride,
+ short *diff, unsigned char *predictor,
+ int pitch);
+void vp9_subtract_b_sse2(BLOCK *be, BLOCKD *bd, int pitch) {
+ unsigned char *z = *(be->base_src) + be->src;
+ unsigned int src_stride = be->src_stride;
+ short *diff = &be->src_diff[0];
+ unsigned char *predictor = *(bd->base_dst) + bd->dst;
+ // TODO(jingning): The prototype function in c has been changed. Need to
+ // modify the mmx and sse versions.
+ vp9_subtract_b_sse2_impl(z, src_stride, diff, predictor, pitch);
+}
+
+#endif
diff --git a/libvpx/vp9/exports_dec b/libvpx/vp9/exports_dec
new file mode 100644
index 0000000..0a61fde
--- /dev/null
+++ b/libvpx/vp9/exports_dec
@@ -0,0 +1,2 @@
+data vpx_codec_vp9_dx_algo
+text vpx_codec_vp9_dx
diff --git a/libvpx/vp9/exports_enc b/libvpx/vp9/exports_enc
new file mode 100644
index 0000000..25156e8
--- /dev/null
+++ b/libvpx/vp9/exports_enc
@@ -0,0 +1,4 @@
+data vpx_codec_vp9_cx_algo
+text vpx_codec_vp9_cx
+data vpx_codec_vp9x_cx_algo
+text vpx_codec_vp9x_cx
diff --git a/libvpx/vp9/vp9_common.mk b/libvpx/vp9/vp9_common.mk
new file mode 100644
index 0000000..7a74833
--- /dev/null
+++ b/libvpx/vp9/vp9_common.mk
@@ -0,0 +1,106 @@
+##
+## Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+##
+## Use of this source code is governed by a BSD-style license
+## that can be found in the LICENSE file in the root of the source
+## tree. An additional intellectual property rights grant can be found
+## in the file PATENTS. All contributing project authors may
+## be found in the AUTHORS file in the root of the source tree.
+##
+
+VP9_COMMON_SRCS-yes += vp9_common.mk
+VP9_COMMON_SRCS-yes += vp9_iface_common.h
+VP9_COMMON_SRCS-yes += common/vp9_pragmas.h
+VP9_COMMON_SRCS-yes += common/vp9_ppflags.h
+VP9_COMMON_SRCS-yes += common/vp9_onyx.h
+VP9_COMMON_SRCS-yes += common/vp9_alloccommon.c
+VP9_COMMON_SRCS-yes += common/vp9_asm_com_offsets.c
+VP9_COMMON_SRCS-yes += common/vp9_convolve.c
+VP9_COMMON_SRCS-yes += common/vp9_convolve.h
+VP9_COMMON_SRCS-yes += common/vp9_debugmodes.c
+VP9_COMMON_SRCS-yes += common/vp9_default_coef_probs.h
+VP9_COMMON_SRCS-yes += common/vp9_entropy.c
+VP9_COMMON_SRCS-yes += common/vp9_entropymode.c
+VP9_COMMON_SRCS-yes += common/vp9_entropymv.c
+VP9_COMMON_SRCS-yes += common/vp9_extend.c
+VP9_COMMON_SRCS-yes += common/vp9_filter.c
+VP9_COMMON_SRCS-yes += common/vp9_filter.h
+VP9_COMMON_SRCS-yes += common/vp9_findnearmv.c
+VP9_COMMON_SRCS-yes += common/generic/vp9_systemdependent.c
+VP9_COMMON_SRCS-yes += common/vp9_idct.c
+VP9_COMMON_SRCS-yes += common/vp9_alloccommon.h
+VP9_COMMON_SRCS-yes += common/vp9_blockd.h
+VP9_COMMON_SRCS-yes += common/vp9_common.h
+VP9_COMMON_SRCS-yes += common/vp9_entropy.h
+VP9_COMMON_SRCS-yes += common/vp9_entropymode.h
+VP9_COMMON_SRCS-yes += common/vp9_entropymv.h
+VP9_COMMON_SRCS-yes += common/vp9_enums.h
+VP9_COMMON_SRCS-yes += common/vp9_extend.h
+VP9_COMMON_SRCS-yes += common/vp9_findnearmv.h
+VP9_COMMON_SRCS-yes += common/vp9_idct.h
+VP9_COMMON_SRCS-yes += common/vp9_loopfilter.h
+VP9_COMMON_SRCS-yes += common/vp9_modecont.h
+VP9_COMMON_SRCS-yes += common/vp9_mv.h
+VP9_COMMON_SRCS-yes += common/vp9_onyxc_int.h
+VP9_COMMON_SRCS-yes += common/vp9_pred_common.h
+VP9_COMMON_SRCS-yes += common/vp9_pred_common.c
+VP9_COMMON_SRCS-yes += common/vp9_quant_common.h
+VP9_COMMON_SRCS-yes += common/vp9_reconinter.h
+VP9_COMMON_SRCS-yes += common/vp9_reconintra.h
+VP9_COMMON_SRCS-yes += common/vp9_rtcd.c
+VP9_COMMON_SRCS-yes += common/vp9_rtcd_defs.sh
+VP9_COMMON_SRCS-yes += common/vp9_sadmxn.h
+VP9_COMMON_SRCS-yes += common/vp9_subpelvar.h
+VP9_COMMON_SRCS-yes += common/vp9_seg_common.h
+VP9_COMMON_SRCS-yes += common/vp9_seg_common.c
+VP9_COMMON_SRCS-yes += common/vp9_systemdependent.h
+VP9_COMMON_SRCS-yes += common/vp9_textblit.h
+VP9_COMMON_SRCS-yes += common/vp9_tile_common.h
+VP9_COMMON_SRCS-yes += common/vp9_tile_common.c
+VP9_COMMON_SRCS-yes += common/vp9_treecoder.h
+VP9_COMMON_SRCS-yes += common/vp9_loopfilter.c
+VP9_COMMON_SRCS-yes += common/vp9_loopfilter_filters.c
+VP9_COMMON_SRCS-yes += common/vp9_mbpitch.c
+VP9_COMMON_SRCS-yes += common/vp9_modecont.c
+VP9_COMMON_SRCS-yes += common/vp9_modecontext.c
+VP9_COMMON_SRCS-yes += common/vp9_mvref_common.c
+VP9_COMMON_SRCS-yes += common/vp9_mvref_common.h
+VP9_COMMON_SRCS-yes += common/vp9_quant_common.c
+VP9_COMMON_SRCS-yes += common/vp9_reconinter.c
+VP9_COMMON_SRCS-yes += common/vp9_reconintra.c
+VP9_COMMON_SRCS-$(CONFIG_POSTPROC_VISUALIZER) += common/vp9_textblit.c
+VP9_COMMON_SRCS-yes += common/vp9_treecoder.c
+VP9_COMMON_SRCS-$(CONFIG_IMPLICIT_SEGMENTATION) += common/vp9_implicit_segmentation.c
+
+VP9_COMMON_SRCS-$(ARCH_X86)$(ARCH_X86_64) += common/x86/vp9_loopfilter_x86.h
+VP9_COMMON_SRCS-$(ARCH_X86)$(ARCH_X86_64) += common/x86/vp9_postproc_x86.h
+VP9_COMMON_SRCS-$(ARCH_X86)$(ARCH_X86_64) += common/x86/vp9_asm_stubs.c
+VP9_COMMON_SRCS-$(ARCH_X86)$(ARCH_X86_64) += common/x86/vp9_loopfilter_intrin_sse2.c
+VP9_COMMON_SRCS-$(CONFIG_POSTPROC) += common/vp9_postproc.h
+VP9_COMMON_SRCS-$(CONFIG_POSTPROC) += common/vp9_postproc.c
+VP9_COMMON_SRCS-$(HAVE_MMX) += common/x86/vp9_iwalsh_mmx.asm
+VP9_COMMON_SRCS-$(HAVE_MMX) += common/x86/vp9_recon_mmx.asm
+VP9_COMMON_SRCS-$(HAVE_MMX) += common/x86/vp9_loopfilter_mmx.asm
+VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_iwalsh_sse2.asm
+VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_loopfilter_sse2.asm
+VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_recon_sse2.asm
+VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_recon_wrapper_sse2.c
+VP9_COMMON_SRCS-$(HAVE_SSSE3) += common/x86/vp9_subpixel_8t_ssse3.asm
+ifeq ($(CONFIG_POSTPROC),yes)
+VP9_COMMON_SRCS-$(HAVE_MMX) += common/x86/vp9_postproc_mmx.asm
+VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_postproc_sse2.asm
+endif
+
+# common (c)
+ifeq ($(CONFIG_CSM),yes)
+VP9_COMMON_SRCS-yes += common/vp9_maskingmv.c
+VP9_COMMON_SRCS-$(HAVE_SSE3) += common/x86/vp9_mask_sse3.asm
+endif
+
+VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_idct_intrin_sse2.c
+VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_sadmxn_sse2.c
+
+$(eval $(call asm_offsets_template,\
+ vp9_asm_com_offsets.asm, $(VP9_PREFIX)common/vp9_asm_com_offsets.c))
+
+$(eval $(call rtcd_h_template,vp9_rtcd,vp9/common/vp9_rtcd_defs.sh))
diff --git a/libvpx/vp9/vp9_cx_iface.c b/libvpx/vp9/vp9_cx_iface.c
new file mode 100644
index 0000000..e5b5089
--- /dev/null
+++ b/libvpx/vp9/vp9_cx_iface.c
@@ -0,0 +1,1153 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vpx/vpx_codec.h"
+#include "vpx/internal/vpx_codec_internal.h"
+#include "vpx_version.h"
+#include "vp9/encoder/vp9_onyx_int.h"
+#include "vpx/vp8cx.h"
+#include "vp9/encoder/vp9_firstpass.h"
+#include "vp9/common/vp9_onyx.h"
+#include "vp9/vp9_iface_common.h"
+#include <stdlib.h>
+#include <string.h>
+
+struct vp9_extracfg {
+ struct vpx_codec_pkt_list *pkt_list;
+ int cpu_used; /** available cpu percentage in 1/16*/
+ unsigned int enable_auto_alt_ref; /** if encoder decides to uses alternate reference frame */
+ unsigned int noise_sensitivity;
+ unsigned int Sharpness;
+ unsigned int static_thresh;
+ unsigned int tile_columns;
+ unsigned int tile_rows;
+ unsigned int arnr_max_frames; /* alt_ref Noise Reduction Max Frame Count */
+ unsigned int arnr_strength; /* alt_ref Noise Reduction Strength */
+ unsigned int arnr_type; /* alt_ref filter type */
+ unsigned int experimental;
+ vp8e_tuning tuning;
+ unsigned int cq_level; /* constrained quality level */
+ unsigned int rc_max_intra_bitrate_pct;
+ unsigned int lossless;
+ unsigned int frame_parallel_decoding_mode;
+};
+
+struct extraconfig_map {
+ int usage;
+ struct vp9_extracfg cfg;
+};
+
+static const struct extraconfig_map extracfg_map[] = {
+ {
+ 0,
+ {
+ NULL,
+ 0, /* cpu_used */
+ 0, /* enable_auto_alt_ref */
+ 0, /* noise_sensitivity */
+ 0, /* Sharpness */
+ 0, /* static_thresh */
+ 0, /* tile_columns */
+ 0, /* tile_rows */
+ 0, /* arnr_max_frames */
+ 3, /* arnr_strength */
+ 3, /* arnr_type*/
+ 0, /* experimental mode */
+ 0, /* tuning*/
+ 10, /* cq_level */
+ 0, /* rc_max_intra_bitrate_pct */
+ 0, /* lossless */
+ 0, /* frame_parallel_decoding_mode */
+ }
+ }
+};
+
+struct vpx_codec_alg_priv {
+ vpx_codec_priv_t base;
+ vpx_codec_enc_cfg_t cfg;
+ struct vp9_extracfg vp8_cfg;
+ VP9_CONFIG oxcf;
+ VP9_PTR cpi;
+ unsigned char *cx_data;
+ unsigned int cx_data_sz;
+ unsigned char *pending_cx_data;
+ unsigned int pending_cx_data_sz;
+ int pending_frame_count;
+ uint32_t pending_frame_sizes[8];
+ uint32_t pending_frame_magnitude;
+ vpx_image_t preview_img;
+ vp8_postproc_cfg_t preview_ppcfg;
+ vpx_codec_pkt_list_decl(64) pkt_list; // changed to accomendate the maximum number of lagged frames allowed
+ unsigned int fixed_kf_cntr;
+};
+
+
+static vpx_codec_err_t
+update_error_state(vpx_codec_alg_priv_t *ctx,
+ const struct vpx_internal_error_info *error) {
+ vpx_codec_err_t res;
+
+ if ((res = error->error_code))
+ ctx->base.err_detail = error->has_detail
+ ? error->detail
+ : NULL;
+
+ return res;
+}
+
+
+#undef ERROR
+#define ERROR(str) do {\
+ ctx->base.err_detail = str;\
+ return VPX_CODEC_INVALID_PARAM;\
+ } while(0)
+
+#define RANGE_CHECK(p,memb,lo,hi) do {\
+ if(!(((p)->memb == lo || (p)->memb > (lo)) && (p)->memb <= hi)) \
+ ERROR(#memb " out of range ["#lo".."#hi"]");\
+ } while(0)
+
+#define RANGE_CHECK_HI(p,memb,hi) do {\
+ if(!((p)->memb <= (hi))) \
+ ERROR(#memb " out of range [.."#hi"]");\
+ } while(0)
+
+#define RANGE_CHECK_LO(p,memb,lo) do {\
+ if(!((p)->memb >= (lo))) \
+ ERROR(#memb " out of range ["#lo"..]");\
+ } while(0)
+
+#define RANGE_CHECK_BOOL(p,memb) do {\
+ if(!!((p)->memb) != (p)->memb) ERROR(#memb " expected boolean");\
+ } while(0)
+
+static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
+ const vpx_codec_enc_cfg_t *cfg,
+ const struct vp9_extracfg *vp8_cfg) {
+ RANGE_CHECK(cfg, g_w, 1, 65535); /* 16 bits available */
+ RANGE_CHECK(cfg, g_h, 1, 65535); /* 16 bits available */
+ RANGE_CHECK(cfg, g_timebase.den, 1, 1000000000);
+ RANGE_CHECK(cfg, g_timebase.num, 1, cfg->g_timebase.den);
+ RANGE_CHECK_HI(cfg, g_profile, 3);
+
+ RANGE_CHECK_HI(cfg, rc_max_quantizer, 63);
+ RANGE_CHECK_HI(cfg, rc_min_quantizer, cfg->rc_max_quantizer);
+ RANGE_CHECK_BOOL(vp8_cfg, lossless);
+ if (vp8_cfg->lossless) {
+ RANGE_CHECK_HI(cfg, rc_max_quantizer, 0);
+ RANGE_CHECK_HI(cfg, rc_min_quantizer, 0);
+ }
+
+ RANGE_CHECK_HI(cfg, g_threads, 64);
+ RANGE_CHECK_HI(cfg, g_lag_in_frames, MAX_LAG_BUFFERS);
+ RANGE_CHECK(cfg, rc_end_usage, VPX_VBR, VPX_CQ);
+ RANGE_CHECK_HI(cfg, rc_undershoot_pct, 1000);
+ RANGE_CHECK_HI(cfg, rc_overshoot_pct, 1000);
+ RANGE_CHECK_HI(cfg, rc_2pass_vbr_bias_pct, 100);
+ RANGE_CHECK(cfg, kf_mode, VPX_KF_DISABLED, VPX_KF_AUTO);
+ // RANGE_CHECK_BOOL(cfg, g_delete_firstpassfile);
+ RANGE_CHECK_BOOL(cfg, rc_resize_allowed);
+ RANGE_CHECK_HI(cfg, rc_dropframe_thresh, 100);
+ RANGE_CHECK_HI(cfg, rc_resize_up_thresh, 100);
+ RANGE_CHECK_HI(cfg, rc_resize_down_thresh, 100);
+ RANGE_CHECK(cfg, g_pass, VPX_RC_ONE_PASS, VPX_RC_LAST_PASS);
+
+ /* VP8 does not support a lower bound on the keyframe interval in
+ * automatic keyframe placement mode.
+ */
+ if (cfg->kf_mode != VPX_KF_DISABLED && cfg->kf_min_dist != cfg->kf_max_dist
+ && cfg->kf_min_dist > 0)
+ ERROR("kf_min_dist not supported in auto mode, use 0 "
+ "or kf_max_dist instead.");
+
+ RANGE_CHECK_BOOL(vp8_cfg, enable_auto_alt_ref);
+ RANGE_CHECK(vp8_cfg, cpu_used, -16, 16);
+
+ RANGE_CHECK_HI(vp8_cfg, noise_sensitivity, 6);
+
+ RANGE_CHECK(vp8_cfg, tile_columns, 0, 6);
+ RANGE_CHECK(vp8_cfg, tile_rows, 0, 2);
+ RANGE_CHECK_HI(vp8_cfg, Sharpness, 7);
+ RANGE_CHECK(vp8_cfg, arnr_max_frames, 0, 15);
+ RANGE_CHECK_HI(vp8_cfg, arnr_strength, 6);
+ RANGE_CHECK(vp8_cfg, arnr_type, 1, 3);
+ RANGE_CHECK(vp8_cfg, cq_level, 0, 63);
+
+ if (cfg->g_pass == VPX_RC_LAST_PASS) {
+ size_t packet_sz = sizeof(FIRSTPASS_STATS);
+ int n_packets = (int)(cfg->rc_twopass_stats_in.sz / packet_sz);
+ FIRSTPASS_STATS *stats;
+
+ if (!cfg->rc_twopass_stats_in.buf)
+ ERROR("rc_twopass_stats_in.buf not set.");
+
+ if (cfg->rc_twopass_stats_in.sz % packet_sz)
+ ERROR("rc_twopass_stats_in.sz indicates truncated packet.");
+
+ if (cfg->rc_twopass_stats_in.sz < 2 * packet_sz)
+ ERROR("rc_twopass_stats_in requires at least two packets.");
+
+ stats = (void *)((char *)cfg->rc_twopass_stats_in.buf
+ + (n_packets - 1) * packet_sz);
+
+ if ((int)(stats->count + 0.5) != n_packets - 1)
+ ERROR("rc_twopass_stats_in missing EOS stats packet");
+ }
+
+ return VPX_CODEC_OK;
+}
+
+
+static vpx_codec_err_t validate_img(vpx_codec_alg_priv_t *ctx,
+ const vpx_image_t *img) {
+ switch (img->fmt) {
+ case VPX_IMG_FMT_YV12:
+ case VPX_IMG_FMT_I420:
+ case VPX_IMG_FMT_I422:
+ case VPX_IMG_FMT_I444:
+ break;
+ default:
+ ERROR("Invalid image format. Only YV12, I420, I422, I444 images are "
+ "supported.");
+ }
+
+ if ((img->d_w != ctx->cfg.g_w) || (img->d_h != ctx->cfg.g_h))
+ ERROR("Image size must match encoder init configuration size");
+
+ return VPX_CODEC_OK;
+}
+
+
+static vpx_codec_err_t set_vp9e_config(VP9_CONFIG *oxcf,
+ vpx_codec_enc_cfg_t cfg,
+ struct vp9_extracfg vp8_cfg) {
+ oxcf->version = cfg.g_profile | (vp8_cfg.experimental ? 0x4 : 0);
+ oxcf->width = cfg.g_w;
+ oxcf->height = cfg.g_h;
+ /* guess a frame rate if out of whack, use 30 */
+ oxcf->frame_rate = (double)(cfg.g_timebase.den) / (double)(cfg.g_timebase.num);
+
+ if (oxcf->frame_rate > 180) {
+ oxcf->frame_rate = 30;
+ }
+
+ switch (cfg.g_pass) {
+ case VPX_RC_ONE_PASS:
+ oxcf->Mode = MODE_BESTQUALITY;
+ break;
+ case VPX_RC_FIRST_PASS:
+ oxcf->Mode = MODE_FIRSTPASS;
+ break;
+ case VPX_RC_LAST_PASS:
+ oxcf->Mode = MODE_SECONDPASS_BEST;
+ break;
+ }
+
+ if (cfg.g_pass == VPX_RC_FIRST_PASS) {
+ oxcf->allow_lag = 0;
+ oxcf->lag_in_frames = 0;
+ } else {
+ oxcf->allow_lag = (cfg.g_lag_in_frames) > 0;
+ oxcf->lag_in_frames = cfg.g_lag_in_frames;
+ }
+
+ // VBR only supported for now.
+ // CBR code has been deprectated for experimental phase.
+ // CQ mode not yet tested
+ oxcf->end_usage = USAGE_LOCAL_FILE_PLAYBACK;
+ /*if (cfg.rc_end_usage == VPX_CQ)
+ oxcf->end_usage = USAGE_CONSTRAINED_QUALITY;
+ else
+ oxcf->end_usage = USAGE_LOCAL_FILE_PLAYBACK;*/
+
+ oxcf->target_bandwidth = cfg.rc_target_bitrate;
+ oxcf->rc_max_intra_bitrate_pct = vp8_cfg.rc_max_intra_bitrate_pct;
+
+ oxcf->best_allowed_q = cfg.rc_min_quantizer;
+ oxcf->worst_allowed_q = cfg.rc_max_quantizer;
+ oxcf->cq_level = vp8_cfg.cq_level;
+ oxcf->fixed_q = -1;
+
+ oxcf->under_shoot_pct = cfg.rc_undershoot_pct;
+ oxcf->over_shoot_pct = cfg.rc_overshoot_pct;
+
+ oxcf->maximum_buffer_size = cfg.rc_buf_sz;
+ oxcf->starting_buffer_level = cfg.rc_buf_initial_sz;
+ oxcf->optimal_buffer_level = cfg.rc_buf_optimal_sz;
+
+ oxcf->two_pass_vbrbias = cfg.rc_2pass_vbr_bias_pct;
+ oxcf->two_pass_vbrmin_section = cfg.rc_2pass_vbr_minsection_pct;
+ oxcf->two_pass_vbrmax_section = cfg.rc_2pass_vbr_maxsection_pct;
+
+ oxcf->auto_key = cfg.kf_mode == VPX_KF_AUTO
+ && cfg.kf_min_dist != cfg.kf_max_dist;
+ // oxcf->kf_min_dist = cfg.kf_min_dis;
+ oxcf->key_freq = cfg.kf_max_dist;
+
+ // oxcf->delete_first_pass_file = cfg.g_delete_firstpassfile;
+ // strcpy(oxcf->first_pass_file, cfg.g_firstpass_file);
+
+ oxcf->cpu_used = vp8_cfg.cpu_used;
+ oxcf->encode_breakout = vp8_cfg.static_thresh;
+ oxcf->play_alternate = vp8_cfg.enable_auto_alt_ref;
+ oxcf->noise_sensitivity = vp8_cfg.noise_sensitivity;
+ oxcf->Sharpness = vp8_cfg.Sharpness;
+
+ oxcf->two_pass_stats_in = cfg.rc_twopass_stats_in;
+ oxcf->output_pkt_list = vp8_cfg.pkt_list;
+
+ oxcf->arnr_max_frames = vp8_cfg.arnr_max_frames;
+ oxcf->arnr_strength = vp8_cfg.arnr_strength;
+ oxcf->arnr_type = vp8_cfg.arnr_type;
+
+ oxcf->tuning = vp8_cfg.tuning;
+
+ oxcf->tile_columns = vp8_cfg.tile_columns;
+ oxcf->tile_rows = vp8_cfg.tile_rows;
+
+ oxcf->lossless = vp8_cfg.lossless;
+
+ oxcf->error_resilient_mode = cfg.g_error_resilient;
+ oxcf->frame_parallel_decoding_mode = vp8_cfg.frame_parallel_decoding_mode;
+ /*
+ printf("Current VP9 Settings: \n");
+ printf("target_bandwidth: %d\n", oxcf->target_bandwidth);
+ printf("noise_sensitivity: %d\n", oxcf->noise_sensitivity);
+ printf("Sharpness: %d\n", oxcf->Sharpness);
+ printf("cpu_used: %d\n", oxcf->cpu_used);
+ printf("Mode: %d\n", oxcf->Mode);
+ // printf("delete_first_pass_file: %d\n", oxcf->delete_first_pass_file);
+ printf("auto_key: %d\n", oxcf->auto_key);
+ printf("key_freq: %d\n", oxcf->key_freq);
+ printf("end_usage: %d\n", oxcf->end_usage);
+ printf("under_shoot_pct: %d\n", oxcf->under_shoot_pct);
+ printf("over_shoot_pct: %d\n", oxcf->over_shoot_pct);
+ printf("starting_buffer_level: %d\n", oxcf->starting_buffer_level);
+ printf("optimal_buffer_level: %d\n", oxcf->optimal_buffer_level);
+ printf("maximum_buffer_size: %d\n", oxcf->maximum_buffer_size);
+ printf("fixed_q: %d\n", oxcf->fixed_q);
+ printf("worst_allowed_q: %d\n", oxcf->worst_allowed_q);
+ printf("best_allowed_q: %d\n", oxcf->best_allowed_q);
+ printf("two_pass_vbrbias: %d\n", oxcf->two_pass_vbrbias);
+ printf("two_pass_vbrmin_section: %d\n", oxcf->two_pass_vbrmin_section);
+ printf("two_pass_vbrmax_section: %d\n", oxcf->two_pass_vbrmax_section);
+ printf("allow_lag: %d\n", oxcf->allow_lag);
+ printf("lag_in_frames: %d\n", oxcf->lag_in_frames);
+ printf("play_alternate: %d\n", oxcf->play_alternate);
+ printf("Version: %d\n", oxcf->Version);
+ printf("encode_breakout: %d\n", oxcf->encode_breakout);
+ printf("error resilient: %d\n", oxcf->error_resilient_mode);
+ printf("frame parallel detokenization: %d\n",
+ oxcf->frame_parallel_decoding_mode);
+ */
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_err_t vp9e_set_config(vpx_codec_alg_priv_t *ctx,
+ const vpx_codec_enc_cfg_t *cfg) {
+ vpx_codec_err_t res;
+
+ if ((cfg->g_w != ctx->cfg.g_w) || (cfg->g_h != ctx->cfg.g_h))
+ ERROR("Cannot change width or height after initialization");
+
+ /* Prevent increasing lag_in_frames. This check is stricter than it needs
+ * to be -- the limit is not increasing past the first lag_in_frames
+ * value, but we don't track the initial config, only the last successful
+ * config.
+ */
+ if ((cfg->g_lag_in_frames > ctx->cfg.g_lag_in_frames))
+ ERROR("Cannot increase lag_in_frames");
+
+ res = validate_config(ctx, cfg, &ctx->vp8_cfg);
+
+ if (!res) {
+ ctx->cfg = *cfg;
+ set_vp9e_config(&ctx->oxcf, ctx->cfg, ctx->vp8_cfg);
+ vp9_change_config(ctx->cpi, &ctx->oxcf);
+ }
+
+ return res;
+}
+
+
+int vp9_reverse_trans(int q);
+
+
+static vpx_codec_err_t get_param(vpx_codec_alg_priv_t *ctx,
+ int ctrl_id,
+ va_list args) {
+ void *arg = va_arg(args, void *);
+
+#define MAP(id, var) case id: *(RECAST(id, arg)) = var; break
+
+ if (!arg)
+ return VPX_CODEC_INVALID_PARAM;
+
+ switch (ctrl_id) {
+ MAP(VP8E_GET_LAST_QUANTIZER, vp9_get_quantizer(ctx->cpi));
+ MAP(VP8E_GET_LAST_QUANTIZER_64,
+ vp9_reverse_trans(vp9_get_quantizer(ctx->cpi)));
+ }
+
+ return VPX_CODEC_OK;
+#undef MAP
+}
+
+
+static vpx_codec_err_t set_param(vpx_codec_alg_priv_t *ctx,
+ int ctrl_id,
+ va_list args) {
+ vpx_codec_err_t res = VPX_CODEC_OK;
+ struct vp9_extracfg xcfg = ctx->vp8_cfg;
+
+#define MAP(id, var) case id: var = CAST(id, args); break;
+
+ switch (ctrl_id) {
+ MAP(VP8E_SET_CPUUSED, xcfg.cpu_used);
+ MAP(VP8E_SET_ENABLEAUTOALTREF, xcfg.enable_auto_alt_ref);
+ MAP(VP8E_SET_NOISE_SENSITIVITY, xcfg.noise_sensitivity);
+ MAP(VP8E_SET_SHARPNESS, xcfg.Sharpness);
+ MAP(VP8E_SET_STATIC_THRESHOLD, xcfg.static_thresh);
+ MAP(VP9E_SET_TILE_COLUMNS, xcfg.tile_columns);
+ MAP(VP9E_SET_TILE_ROWS, xcfg.tile_rows);
+
+ MAP(VP8E_SET_ARNR_MAXFRAMES, xcfg.arnr_max_frames);
+ MAP(VP8E_SET_ARNR_STRENGTH, xcfg.arnr_strength);
+ MAP(VP8E_SET_ARNR_TYPE, xcfg.arnr_type);
+ MAP(VP8E_SET_TUNING, xcfg.tuning);
+ MAP(VP8E_SET_CQ_LEVEL, xcfg.cq_level);
+ MAP(VP8E_SET_MAX_INTRA_BITRATE_PCT, xcfg.rc_max_intra_bitrate_pct);
+ MAP(VP9E_SET_LOSSLESS, xcfg.lossless);
+ MAP(VP9E_SET_FRAME_PARALLEL_DECODING, xcfg.frame_parallel_decoding_mode);
+ }
+
+ res = validate_config(ctx, &ctx->cfg, &xcfg);
+
+ if (!res) {
+ ctx->vp8_cfg = xcfg;
+ set_vp9e_config(&ctx->oxcf, ctx->cfg, ctx->vp8_cfg);
+ vp9_change_config(ctx->cpi, &ctx->oxcf);
+ }
+
+ return res;
+#undef MAP
+}
+
+
+static vpx_codec_err_t vp9e_common_init(vpx_codec_ctx_t *ctx,
+ int experimental) {
+ vpx_codec_err_t res = VPX_CODEC_OK;
+ struct vpx_codec_alg_priv *priv;
+ vpx_codec_enc_cfg_t *cfg;
+ unsigned int i;
+
+ VP9_PTR optr;
+
+ if (!ctx->priv) {
+ priv = calloc(1, sizeof(struct vpx_codec_alg_priv));
+
+ if (!priv) {
+ return VPX_CODEC_MEM_ERROR;
+ }
+
+ ctx->priv = &priv->base;
+ ctx->priv->sz = sizeof(*ctx->priv);
+ ctx->priv->iface = ctx->iface;
+ ctx->priv->alg_priv = priv;
+ ctx->priv->init_flags = ctx->init_flags;
+ ctx->priv->enc.total_encoders = 1;
+
+ if (ctx->config.enc) {
+ /* Update the reference to the config structure to an
+ * internal copy.
+ */
+ ctx->priv->alg_priv->cfg = *ctx->config.enc;
+ ctx->config.enc = &ctx->priv->alg_priv->cfg;
+ }
+
+ cfg = &ctx->priv->alg_priv->cfg;
+
+ /* Select the extra vp6 configuration table based on the current
+ * usage value. If the current usage value isn't found, use the
+ * values for usage case 0.
+ */
+ for (i = 0;
+ extracfg_map[i].usage && extracfg_map[i].usage != cfg->g_usage;
+ i++);
+
+ priv->vp8_cfg = extracfg_map[i].cfg;
+ priv->vp8_cfg.pkt_list = &priv->pkt_list.head;
+ priv->vp8_cfg.experimental = experimental;
+
+ // TODO(agrange) Check the limits set on this buffer, or the check that is
+ // applied in vp9e_encode.
+ priv->cx_data_sz = priv->cfg.g_w * priv->cfg.g_h * 3 / 2 * 8;
+// priv->cx_data_sz = priv->cfg.g_w * priv->cfg.g_h * 3 / 2 * 2;
+
+ if (priv->cx_data_sz < 4096) priv->cx_data_sz = 4096;
+
+ priv->cx_data = malloc(priv->cx_data_sz);
+
+ if (!priv->cx_data) {
+ return VPX_CODEC_MEM_ERROR;
+ }
+
+ vp9_initialize_enc();
+
+ res = validate_config(priv, &priv->cfg, &priv->vp8_cfg);
+
+ if (!res) {
+ set_vp9e_config(&ctx->priv->alg_priv->oxcf,
+ ctx->priv->alg_priv->cfg,
+ ctx->priv->alg_priv->vp8_cfg);
+ optr = vp9_create_compressor(&ctx->priv->alg_priv->oxcf);
+
+ if (!optr)
+ res = VPX_CODEC_MEM_ERROR;
+ else
+ ctx->priv->alg_priv->cpi = optr;
+ }
+ }
+
+ return res;
+}
+
+
+static vpx_codec_err_t vp9e_init(vpx_codec_ctx_t *ctx,
+ vpx_codec_priv_enc_mr_cfg_t *data) {
+ return vp9e_common_init(ctx, 0);
+}
+
+
+#if CONFIG_EXPERIMENTAL
+static vpx_codec_err_t vp9e_exp_init(vpx_codec_ctx_t *ctx,
+ vpx_codec_priv_enc_mr_cfg_t *data) {
+ return vp9e_common_init(ctx, 1);
+}
+#endif
+
+
+static vpx_codec_err_t vp9e_destroy(vpx_codec_alg_priv_t *ctx) {
+
+ free(ctx->cx_data);
+ vp9_remove_compressor(&ctx->cpi);
+ free(ctx);
+ return VPX_CODEC_OK;
+}
+
+static void pick_quickcompress_mode(vpx_codec_alg_priv_t *ctx,
+ unsigned long duration,
+ unsigned long deadline) {
+ unsigned int new_qc;
+
+ /* Use best quality mode if no deadline is given. */
+ if (deadline)
+ new_qc = MODE_GOODQUALITY;
+ else
+ new_qc = MODE_BESTQUALITY;
+
+ if (ctx->cfg.g_pass == VPX_RC_FIRST_PASS)
+ new_qc = MODE_FIRSTPASS;
+ else if (ctx->cfg.g_pass == VPX_RC_LAST_PASS)
+ new_qc = (new_qc == MODE_BESTQUALITY)
+ ? MODE_SECONDPASS_BEST
+ : MODE_SECONDPASS;
+
+ if (ctx->oxcf.Mode != new_qc) {
+ ctx->oxcf.Mode = new_qc;
+ vp9_change_config(ctx->cpi, &ctx->oxcf);
+ }
+}
+
+
+static int write_superframe_index(vpx_codec_alg_priv_t *ctx) {
+ uint8_t marker = 0xc0;
+ int mag, mask, index_sz;
+
+ assert(ctx->pending_frame_count);
+ assert(ctx->pending_frame_count <= 8);
+
+ /* Add the number of frames to the marker byte */
+ marker |= ctx->pending_frame_count - 1;
+
+ /* Choose the magnitude */
+ for (mag = 0, mask = 0xff; mag < 4; mag++) {
+ if (ctx->pending_frame_magnitude < mask)
+ break;
+ mask <<= 8;
+ mask |= 0xff;
+ }
+ marker |= mag << 3;
+
+ /* Write the index */
+ index_sz = 2 + (mag + 1) * ctx->pending_frame_count;
+ if (ctx->pending_cx_data_sz + index_sz < ctx->cx_data_sz) {
+ uint8_t *x = ctx->pending_cx_data + ctx->pending_cx_data_sz;
+ int i, j;
+
+ *x++ = marker;
+ for (i = 0; i < ctx->pending_frame_count; i++) {
+ int this_sz = ctx->pending_frame_sizes[i];
+
+ for (j = 0; j <= mag; j++) {
+ *x++ = this_sz & 0xff;
+ this_sz >>= 8;
+ }
+ }
+ *x++ = marker;
+ ctx->pending_cx_data_sz += index_sz;
+ }
+ return index_sz;
+}
+
+static vpx_codec_err_t vp9e_encode(vpx_codec_alg_priv_t *ctx,
+ const vpx_image_t *img,
+ vpx_codec_pts_t pts,
+ unsigned long duration,
+ vpx_enc_frame_flags_t flags,
+ unsigned long deadline) {
+ vpx_codec_err_t res = VPX_CODEC_OK;
+
+ if (img)
+ res = validate_img(ctx, img);
+
+ pick_quickcompress_mode(ctx, duration, deadline);
+ vpx_codec_pkt_list_init(&ctx->pkt_list);
+
+ /* Handle Flags */
+ if (((flags & VP8_EFLAG_NO_UPD_GF) && (flags & VP8_EFLAG_FORCE_GF))
+ || ((flags & VP8_EFLAG_NO_UPD_ARF) && (flags & VP8_EFLAG_FORCE_ARF))) {
+ ctx->base.err_detail = "Conflicting flags.";
+ return VPX_CODEC_INVALID_PARAM;
+ }
+
+ if (flags & (VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF
+ | VP8_EFLAG_NO_REF_ARF)) {
+ int ref = 7;
+
+ if (flags & VP8_EFLAG_NO_REF_LAST)
+ ref ^= VP9_LAST_FLAG;
+
+ if (flags & VP8_EFLAG_NO_REF_GF)
+ ref ^= VP9_GOLD_FLAG;
+
+ if (flags & VP8_EFLAG_NO_REF_ARF)
+ ref ^= VP9_ALT_FLAG;
+
+ vp9_use_as_reference(ctx->cpi, ref);
+ }
+
+ if (flags & (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF
+ | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_FORCE_GF
+ | VP8_EFLAG_FORCE_ARF)) {
+ int upd = 7;
+
+ if (flags & VP8_EFLAG_NO_UPD_LAST)
+ upd ^= VP9_LAST_FLAG;
+
+ if (flags & VP8_EFLAG_NO_UPD_GF)
+ upd ^= VP9_GOLD_FLAG;
+
+ if (flags & VP8_EFLAG_NO_UPD_ARF)
+ upd ^= VP9_ALT_FLAG;
+
+ vp9_update_reference(ctx->cpi, upd);
+ }
+
+ if (flags & VP8_EFLAG_NO_UPD_ENTROPY) {
+ vp9_update_entropy(ctx->cpi, 0);
+ }
+
+ /* Handle fixed keyframe intervals */
+ if (ctx->cfg.kf_mode == VPX_KF_AUTO
+ && ctx->cfg.kf_min_dist == ctx->cfg.kf_max_dist) {
+ if (++ctx->fixed_kf_cntr > ctx->cfg.kf_min_dist) {
+ flags |= VPX_EFLAG_FORCE_KF;
+ ctx->fixed_kf_cntr = 1;
+ }
+ }
+
+ /* Initialize the encoder instance on the first frame*/
+ if (!res && ctx->cpi) {
+ unsigned int lib_flags;
+ YV12_BUFFER_CONFIG sd;
+ int64_t dst_time_stamp, dst_end_time_stamp;
+ unsigned long size, cx_data_sz;
+ unsigned char *cx_data;
+
+ /* Set up internal flags */
+ if (ctx->base.init_flags & VPX_CODEC_USE_PSNR)
+ ((VP9_COMP *)ctx->cpi)->b_calculate_psnr = 1;
+
+ // if (ctx->base.init_flags & VPX_CODEC_USE_OUTPUT_PARTITION)
+ // ((VP9_COMP *)ctx->cpi)->output_partition = 1;
+
+ /* Convert API flags to internal codec lib flags */
+ lib_flags = (flags & VPX_EFLAG_FORCE_KF) ? FRAMEFLAGS_KEY : 0;
+
+ /* vp8 use 10,000,000 ticks/second as time stamp */
+ dst_time_stamp = pts * 10000000 * ctx->cfg.g_timebase.num / ctx->cfg.g_timebase.den;
+ dst_end_time_stamp = (pts + duration) * 10000000 * ctx->cfg.g_timebase.num / ctx->cfg.g_timebase.den;
+
+ if (img != NULL) {
+ res = image2yuvconfig(img, &sd);
+
+ if (vp9_receive_raw_frame(ctx->cpi, lib_flags,
+ &sd, dst_time_stamp, dst_end_time_stamp)) {
+ VP9_COMP *cpi = (VP9_COMP *)ctx->cpi;
+ res = update_error_state(ctx, &cpi->common.error);
+ }
+ }
+
+ cx_data = ctx->cx_data;
+ cx_data_sz = ctx->cx_data_sz;
+ lib_flags = 0;
+
+ /* Any pending invisible frames? */
+ if (ctx->pending_cx_data) {
+ memmove(cx_data, ctx->pending_cx_data, ctx->pending_cx_data_sz);
+ ctx->pending_cx_data = cx_data;
+ cx_data += ctx->pending_cx_data_sz;
+ cx_data_sz -= ctx->pending_cx_data_sz;
+
+ /* TODO: this is a minimal check, the underlying codec doesn't respect
+ * the buffer size anyway.
+ */
+ if (cx_data_sz < ctx->cx_data_sz / 2) {
+ ctx->base.err_detail = "Compressed data buffer too small";
+ return VPX_CODEC_ERROR;
+ }
+ }
+
+ while (cx_data_sz >= ctx->cx_data_sz / 2 &&
+ -1 != vp9_get_compressed_data(ctx->cpi, &lib_flags, &size,
+ cx_data, &dst_time_stamp,
+ &dst_end_time_stamp, !img)) {
+ if (size) {
+ vpx_codec_pts_t round, delta;
+ vpx_codec_cx_pkt_t pkt;
+ VP9_COMP *cpi = (VP9_COMP *)ctx->cpi;
+
+ /* Pack invisible frames with the next visible frame */
+ if (!cpi->common.show_frame) {
+ if (!ctx->pending_cx_data)
+ ctx->pending_cx_data = cx_data;
+ ctx->pending_cx_data_sz += size;
+ ctx->pending_frame_sizes[ctx->pending_frame_count++] = size;
+ ctx->pending_frame_magnitude |= size;
+ cx_data += size;
+ cx_data_sz -= size;
+ continue;
+ }
+
+ /* Add the frame packet to the list of returned packets. */
+ round = 1000000 * ctx->cfg.g_timebase.num / 2 - 1;
+ delta = (dst_end_time_stamp - dst_time_stamp);
+ pkt.kind = VPX_CODEC_CX_FRAME_PKT;
+ pkt.data.frame.pts =
+ (dst_time_stamp * ctx->cfg.g_timebase.den + round)
+ / ctx->cfg.g_timebase.num / 10000000;
+ pkt.data.frame.duration = (unsigned long)
+ ((delta * ctx->cfg.g_timebase.den + round)
+ / ctx->cfg.g_timebase.num / 10000000);
+ pkt.data.frame.flags = lib_flags << 16;
+
+ if (lib_flags & FRAMEFLAGS_KEY)
+ pkt.data.frame.flags |= VPX_FRAME_IS_KEY;
+
+ if (!cpi->common.show_frame) {
+ pkt.data.frame.flags |= VPX_FRAME_IS_INVISIBLE;
+
+ // This timestamp should be as close as possible to the
+ // prior PTS so that if a decoder uses pts to schedule when
+ // to do this, we start right after last frame was decoded.
+ // Invisible frames have no duration.
+ pkt.data.frame.pts = ((cpi->last_time_stamp_seen
+ * ctx->cfg.g_timebase.den + round)
+ / ctx->cfg.g_timebase.num / 10000000) + 1;
+ pkt.data.frame.duration = 0;
+ }
+
+ if (cpi->droppable)
+ pkt.data.frame.flags |= VPX_FRAME_IS_DROPPABLE;
+
+ /*if (cpi->output_partition)
+ {
+ int i;
+ const int num_partitions = 1;
+
+ pkt.data.frame.flags |= VPX_FRAME_IS_FRAGMENT;
+
+ for (i = 0; i < num_partitions; ++i)
+ {
+ pkt.data.frame.buf = cx_data;
+ pkt.data.frame.sz = cpi->partition_sz[i];
+ pkt.data.frame.partition_id = i;
+ // don't set the fragment bit for the last partition
+ if (i == (num_partitions - 1))
+ pkt.data.frame.flags &= ~VPX_FRAME_IS_FRAGMENT;
+ vpx_codec_pkt_list_add(&ctx->pkt_list.head, &pkt);
+ cx_data += cpi->partition_sz[i];
+ cx_data_sz -= cpi->partition_sz[i];
+ }
+ }
+ else*/
+ {
+ if (ctx->pending_cx_data) {
+ ctx->pending_frame_sizes[ctx->pending_frame_count++] = size;
+ ctx->pending_frame_magnitude |= size;
+ ctx->pending_cx_data_sz += size;
+ size += write_superframe_index(ctx);
+ pkt.data.frame.buf = ctx->pending_cx_data;
+ pkt.data.frame.sz = ctx->pending_cx_data_sz;
+ ctx->pending_cx_data = NULL;
+ ctx->pending_cx_data_sz = 0;
+ ctx->pending_frame_count = 0;
+ ctx->pending_frame_magnitude = 0;
+ } else {
+ pkt.data.frame.buf = cx_data;
+ pkt.data.frame.sz = size;
+ }
+ pkt.data.frame.partition_id = -1;
+ vpx_codec_pkt_list_add(&ctx->pkt_list.head, &pkt);
+ cx_data += size;
+ cx_data_sz -= size;
+ }
+
+ // printf("timestamp: %lld, duration: %d\n", pkt->data.frame.pts, pkt->data.frame.duration);
+ }
+ }
+ }
+
+ return res;
+}
+
+
+static const vpx_codec_cx_pkt_t *vp9e_get_cxdata(vpx_codec_alg_priv_t *ctx,
+ vpx_codec_iter_t *iter) {
+ return vpx_codec_pkt_list_get(&ctx->pkt_list.head, iter);
+}
+
+static vpx_codec_err_t vp9e_set_reference(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+ vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *);
+
+ if (data) {
+ vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data;
+ YV12_BUFFER_CONFIG sd;
+
+ image2yuvconfig(&frame->img, &sd);
+ vp9_set_reference_enc(ctx->cpi, frame->frame_type, &sd);
+ return VPX_CODEC_OK;
+ } else
+ return VPX_CODEC_INVALID_PARAM;
+
+}
+
+static vpx_codec_err_t vp9e_copy_reference(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+
+ vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *);
+
+ if (data) {
+ vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data;
+ YV12_BUFFER_CONFIG sd;
+
+ image2yuvconfig(&frame->img, &sd);
+ vp9_copy_reference_enc(ctx->cpi, frame->frame_type, &sd);
+ return VPX_CODEC_OK;
+ } else
+ return VPX_CODEC_INVALID_PARAM;
+}
+
+static vpx_codec_err_t get_reference(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+ vp9_ref_frame_t *data = va_arg(args, vp9_ref_frame_t *);
+
+ if (data) {
+ YV12_BUFFER_CONFIG* fb;
+
+ vp9_get_reference_enc(ctx->cpi, data->idx, &fb);
+ yuvconfig2image(&data->img, fb, NULL);
+ return VPX_CODEC_OK;
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+}
+
+static vpx_codec_err_t vp9e_set_previewpp(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+#if CONFIG_POSTPROC
+ vp8_postproc_cfg_t *data = va_arg(args, vp8_postproc_cfg_t *);
+ (void)ctr_id;
+
+ if (data) {
+ ctx->preview_ppcfg = *((vp8_postproc_cfg_t *)data);
+ return VPX_CODEC_OK;
+ } else
+ return VPX_CODEC_INVALID_PARAM;
+#else
+ (void)ctx;
+ (void)ctr_id;
+ (void)args;
+ return VPX_CODEC_INCAPABLE;
+#endif
+}
+
+
+static vpx_image_t *vp9e_get_preview(vpx_codec_alg_priv_t *ctx) {
+
+ YV12_BUFFER_CONFIG sd;
+ vp9_ppflags_t flags = {0};
+
+ if (ctx->preview_ppcfg.post_proc_flag) {
+ flags.post_proc_flag = ctx->preview_ppcfg.post_proc_flag;
+ flags.deblocking_level = ctx->preview_ppcfg.deblocking_level;
+ flags.noise_level = ctx->preview_ppcfg.noise_level;
+ }
+
+ if (0 == vp9_get_preview_raw_frame(ctx->cpi, &sd, &flags)) {
+ yuvconfig2image(&ctx->preview_img, &sd, NULL);
+ return &ctx->preview_img;
+ } else
+ return NULL;
+}
+
+static vpx_codec_err_t vp9e_update_entropy(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+ int update = va_arg(args, int);
+ vp9_update_entropy(ctx->cpi, update);
+ return VPX_CODEC_OK;
+
+}
+
+static vpx_codec_err_t vp9e_update_reference(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+ int update = va_arg(args, int);
+ vp9_update_reference(ctx->cpi, update);
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_err_t vp9e_use_reference(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+ int reference_flag = va_arg(args, int);
+ vp9_use_as_reference(ctx->cpi, reference_flag);
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_err_t vp9e_set_roi_map(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+ vpx_roi_map_t *data = va_arg(args, vpx_roi_map_t *);
+
+ if (data) {
+ vpx_roi_map_t *roi = (vpx_roi_map_t *)data;
+
+ if (!vp9_set_roimap(ctx->cpi, roi->roi_map, roi->rows, roi->cols,
+ roi->delta_q, roi->delta_lf, roi->static_threshold))
+ return VPX_CODEC_OK;
+ else
+ return VPX_CODEC_INVALID_PARAM;
+ } else
+ return VPX_CODEC_INVALID_PARAM;
+}
+
+
+static vpx_codec_err_t vp9e_set_activemap(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+ vpx_active_map_t *data = va_arg(args, vpx_active_map_t *);
+
+ if (data) {
+
+ vpx_active_map_t *map = (vpx_active_map_t *)data;
+
+ if (!vp9_set_active_map(ctx->cpi, map->active_map, map->rows, map->cols))
+ return VPX_CODEC_OK;
+ else
+ return VPX_CODEC_INVALID_PARAM;
+ } else
+ return VPX_CODEC_INVALID_PARAM;
+}
+
+static vpx_codec_err_t vp9e_set_scalemode(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+
+ vpx_scaling_mode_t *data = va_arg(args, vpx_scaling_mode_t *);
+
+ if (data) {
+ int res;
+ vpx_scaling_mode_t scalemode = *(vpx_scaling_mode_t *)data;
+ res = vp9_set_internal_size(ctx->cpi, scalemode.h_scaling_mode,
+ scalemode.v_scaling_mode);
+
+ if (!res) {
+ return VPX_CODEC_OK;
+ } else
+ return VPX_CODEC_INVALID_PARAM;
+ } else
+ return VPX_CODEC_INVALID_PARAM;
+}
+
+
+static vpx_codec_ctrl_fn_map_t vp9e_ctf_maps[] = {
+ {VP8_SET_REFERENCE, vp9e_set_reference},
+ {VP8_COPY_REFERENCE, vp9e_copy_reference},
+ {VP8_SET_POSTPROC, vp9e_set_previewpp},
+ {VP8E_UPD_ENTROPY, vp9e_update_entropy},
+ {VP8E_UPD_REFERENCE, vp9e_update_reference},
+ {VP8E_USE_REFERENCE, vp9e_use_reference},
+ {VP8E_SET_ROI_MAP, vp9e_set_roi_map},
+ {VP8E_SET_ACTIVEMAP, vp9e_set_activemap},
+ {VP8E_SET_SCALEMODE, vp9e_set_scalemode},
+ {VP8E_SET_CPUUSED, set_param},
+ {VP8E_SET_NOISE_SENSITIVITY, set_param},
+ {VP8E_SET_ENABLEAUTOALTREF, set_param},
+ {VP8E_SET_SHARPNESS, set_param},
+ {VP8E_SET_STATIC_THRESHOLD, set_param},
+ {VP9E_SET_TILE_COLUMNS, set_param},
+ {VP9E_SET_TILE_ROWS, set_param},
+ {VP8E_GET_LAST_QUANTIZER, get_param},
+ {VP8E_GET_LAST_QUANTIZER_64, get_param},
+ {VP8E_SET_ARNR_MAXFRAMES, set_param},
+ {VP8E_SET_ARNR_STRENGTH, set_param},
+ {VP8E_SET_ARNR_TYPE, set_param},
+ {VP8E_SET_TUNING, set_param},
+ {VP8E_SET_CQ_LEVEL, set_param},
+ {VP8E_SET_MAX_INTRA_BITRATE_PCT, set_param},
+ {VP9E_SET_LOSSLESS, set_param},
+ {VP9_GET_REFERENCE, get_reference},
+ { -1, NULL},
+};
+
+static vpx_codec_enc_cfg_map_t vp9e_usage_cfg_map[] = {
+ {
+ 0,
+ {
+ 0, /* g_usage */
+ 0, /* g_threads */
+ 0, /* g_profile */
+
+ 320, /* g_width */
+ 240, /* g_height */
+ {1, 30}, /* g_timebase */
+
+ 0, /* g_error_resilient */
+
+ VPX_RC_ONE_PASS, /* g_pass */
+
+ 0, /* g_lag_in_frames */
+
+ 0, /* rc_dropframe_thresh */
+ 0, /* rc_resize_allowed */
+ 60, /* rc_resize_down_thresold */
+ 30, /* rc_resize_up_thresold */
+
+ VPX_VBR, /* rc_end_usage */
+#if VPX_ENCODER_ABI_VERSION > (1 + VPX_CODEC_ABI_VERSION)
+ {0}, /* rc_twopass_stats_in */
+#endif
+ 256, /* rc_target_bandwidth */
+ 4, /* rc_min_quantizer */
+ 63, /* rc_max_quantizer */
+ 100, /* rc_undershoot_pct */
+ 100, /* rc_overshoot_pct */
+
+ 6000, /* rc_max_buffer_size */
+ 4000, /* rc_buffer_initial_size; */
+ 5000, /* rc_buffer_optimal_size; */
+
+ 50, /* rc_two_pass_vbrbias */
+ 0, /* rc_two_pass_vbrmin_section */
+ 400, /* rc_two_pass_vbrmax_section */
+
+ /* keyframing settings (kf) */
+ VPX_KF_AUTO, /* g_kfmode*/
+ 0, /* kf_min_dist */
+ 9999, /* kf_max_dist */
+
+#if VPX_ENCODER_ABI_VERSION == (1 + VPX_CODEC_ABI_VERSION)
+ 1, /* g_delete_first_pass_file */
+ "vp8.fpf" /* first pass filename */
+#endif
+ }
+ },
+ { -1, {NOT_IMPLEMENTED}}
+};
+
+
+#ifndef VERSION_STRING
+#define VERSION_STRING
+#endif
+CODEC_INTERFACE(vpx_codec_vp9_cx) = {
+ "WebM Project VP9 Encoder" VERSION_STRING,
+ VPX_CODEC_INTERNAL_ABI_VERSION,
+ VPX_CODEC_CAP_ENCODER | VPX_CODEC_CAP_PSNR |
+ VPX_CODEC_CAP_OUTPUT_PARTITION,
+ /* vpx_codec_caps_t caps; */
+ vp9e_init, /* vpx_codec_init_fn_t init; */
+ vp9e_destroy, /* vpx_codec_destroy_fn_t destroy; */
+ vp9e_ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */
+ NOT_IMPLEMENTED, /* vpx_codec_get_mmap_fn_t get_mmap; */
+ NOT_IMPLEMENTED, /* vpx_codec_set_mmap_fn_t set_mmap; */
+ {
+ NOT_IMPLEMENTED, /* vpx_codec_peek_si_fn_t peek_si; */
+ NOT_IMPLEMENTED, /* vpx_codec_get_si_fn_t get_si; */
+ NOT_IMPLEMENTED, /* vpx_codec_decode_fn_t decode; */
+ NOT_IMPLEMENTED, /* vpx_codec_frame_get_fn_t frame_get; */
+ },
+ {
+ vp9e_usage_cfg_map, /* vpx_codec_enc_cfg_map_t peek_si; */
+ vp9e_encode, /* vpx_codec_encode_fn_t encode; */
+ vp9e_get_cxdata, /* vpx_codec_get_cx_data_fn_t frame_get; */
+ vp9e_set_config,
+ NOT_IMPLEMENTED,
+ vp9e_get_preview,
+ } /* encoder functions */
+};
+
+
+#if CONFIG_EXPERIMENTAL
+
+CODEC_INTERFACE(vpx_codec_vp9x_cx) = {
+ "VP8 Experimental Encoder" VERSION_STRING,
+ VPX_CODEC_INTERNAL_ABI_VERSION,
+ VPX_CODEC_CAP_ENCODER | VPX_CODEC_CAP_PSNR,
+ /* vpx_codec_caps_t caps; */
+ vp9e_exp_init, /* vpx_codec_init_fn_t init; */
+ vp9e_destroy, /* vpx_codec_destroy_fn_t destroy; */
+ vp9e_ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */
+ NOT_IMPLEMENTED, /* vpx_codec_get_mmap_fn_t get_mmap; */
+ NOT_IMPLEMENTED, /* vpx_codec_set_mmap_fn_t set_mmap; */
+ {
+ NOT_IMPLEMENTED, /* vpx_codec_peek_si_fn_t peek_si; */
+ NOT_IMPLEMENTED, /* vpx_codec_get_si_fn_t get_si; */
+ NOT_IMPLEMENTED, /* vpx_codec_decode_fn_t decode; */
+ NOT_IMPLEMENTED, /* vpx_codec_frame_get_fn_t frame_get; */
+ },
+ {
+ vp9e_usage_cfg_map, /* vpx_codec_enc_cfg_map_t peek_si; */
+ vp9e_encode, /* vpx_codec_encode_fn_t encode; */
+ vp9e_get_cxdata, /* vpx_codec_get_cx_data_fn_t frame_get; */
+ vp9e_set_config,
+ NOT_IMPLEMENTED,
+ vp9e_get_preview,
+ } /* encoder functions */
+};
+#endif
diff --git a/libvpx/vp9/vp9_dx_iface.c b/libvpx/vp9/vp9_dx_iface.c
new file mode 100644
index 0000000..ea6946b
--- /dev/null
+++ b/libvpx/vp9/vp9_dx_iface.c
@@ -0,0 +1,750 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include <stdlib.h>
+#include <string.h>
+#include "vpx/vpx_decoder.h"
+#include "vpx/vp8dx.h"
+#include "vpx/internal/vpx_codec_internal.h"
+#include "vpx_version.h"
+#include "decoder/vp9_onyxd.h"
+#include "decoder/vp9_onyxd_int.h"
+#include "vp9/vp9_iface_common.h"
+
+#define VP8_CAP_POSTPROC (CONFIG_POSTPROC ? VPX_CODEC_CAP_POSTPROC : 0)
+typedef vpx_codec_stream_info_t vp8_stream_info_t;
+
+/* Structures for handling memory allocations */
+typedef enum {
+ VP8_SEG_ALG_PRIV = 256,
+ VP8_SEG_MAX
+} mem_seg_id_t;
+#define NELEMENTS(x) ((int)(sizeof(x)/sizeof(x[0])))
+
+static unsigned long vp8_priv_sz(const vpx_codec_dec_cfg_t *si, vpx_codec_flags_t);
+
+typedef struct {
+ unsigned int id;
+ unsigned long sz;
+ unsigned int align;
+ unsigned int flags;
+ unsigned long(*calc_sz)(const vpx_codec_dec_cfg_t *, vpx_codec_flags_t);
+} mem_req_t;
+
+static const mem_req_t vp8_mem_req_segs[] = {
+ {VP8_SEG_ALG_PRIV, 0, 8, VPX_CODEC_MEM_ZERO, vp8_priv_sz},
+ {VP8_SEG_MAX, 0, 0, 0, NULL}
+};
+
+struct vpx_codec_alg_priv {
+ vpx_codec_priv_t base;
+ vpx_codec_mmap_t mmaps[NELEMENTS(vp8_mem_req_segs) - 1];
+ vpx_codec_dec_cfg_t cfg;
+ vp8_stream_info_t si;
+ int defer_alloc;
+ int decoder_init;
+ VP9D_PTR pbi;
+ int postproc_cfg_set;
+ vp8_postproc_cfg_t postproc_cfg;
+#if CONFIG_POSTPROC_VISUALIZER
+ unsigned int dbg_postproc_flag;
+ int dbg_color_ref_frame_flag;
+ int dbg_color_mb_modes_flag;
+ int dbg_color_b_modes_flag;
+ int dbg_display_mv_flag;
+#endif
+ vpx_image_t img;
+ int img_setup;
+ int img_avail;
+ int invert_tile_order;
+};
+
+static unsigned long vp8_priv_sz(const vpx_codec_dec_cfg_t *si,
+ vpx_codec_flags_t flags) {
+ /* Although this declaration is constant, we can't use it in the requested
+ * segments list because we want to define the requested segments list
+ * before defining the private type (so that the number of memory maps is
+ * known)
+ */
+ (void)si;
+ return sizeof(vpx_codec_alg_priv_t);
+}
+
+
+static void vp8_mmap_dtor(vpx_codec_mmap_t *mmap) {
+ free(mmap->priv);
+}
+
+static vpx_codec_err_t vp8_mmap_alloc(vpx_codec_mmap_t *mmap) {
+ vpx_codec_err_t res;
+ unsigned int align;
+
+ align = mmap->align ? mmap->align - 1 : 0;
+
+ if (mmap->flags & VPX_CODEC_MEM_ZERO)
+ mmap->priv = calloc(1, mmap->sz + align);
+ else
+ mmap->priv = malloc(mmap->sz + align);
+
+ res = (mmap->priv) ? VPX_CODEC_OK : VPX_CODEC_MEM_ERROR;
+ mmap->base = (void *)((((uintptr_t)mmap->priv) + align) & ~(uintptr_t)align);
+ mmap->dtor = vp8_mmap_dtor;
+ return res;
+}
+
+static vpx_codec_err_t vp8_validate_mmaps(const vp8_stream_info_t *si,
+ const vpx_codec_mmap_t *mmaps,
+ vpx_codec_flags_t init_flags) {
+ int i;
+ vpx_codec_err_t res = VPX_CODEC_OK;
+
+ for (i = 0; i < NELEMENTS(vp8_mem_req_segs) - 1; i++) {
+ /* Ensure the segment has been allocated */
+ if (!mmaps[i].base) {
+ res = VPX_CODEC_MEM_ERROR;
+ break;
+ }
+
+ /* Verify variable size segment is big enough for the current si. */
+ if (vp8_mem_req_segs[i].calc_sz) {
+ vpx_codec_dec_cfg_t cfg;
+
+ cfg.w = si->w;
+ cfg.h = si->h;
+
+ if (mmaps[i].sz < vp8_mem_req_segs[i].calc_sz(&cfg, init_flags)) {
+ res = VPX_CODEC_MEM_ERROR;
+ break;
+ }
+ }
+ }
+
+ return res;
+}
+
+static void vp8_init_ctx(vpx_codec_ctx_t *ctx, const vpx_codec_mmap_t *mmap) {
+ int i;
+
+ ctx->priv = mmap->base;
+ ctx->priv->sz = sizeof(*ctx->priv);
+ ctx->priv->iface = ctx->iface;
+ ctx->priv->alg_priv = mmap->base;
+
+ for (i = 0; i < NELEMENTS(ctx->priv->alg_priv->mmaps); i++)
+ ctx->priv->alg_priv->mmaps[i].id = vp8_mem_req_segs[i].id;
+
+ ctx->priv->alg_priv->mmaps[0] = *mmap;
+ ctx->priv->alg_priv->si.sz = sizeof(ctx->priv->alg_priv->si);
+ ctx->priv->init_flags = ctx->init_flags;
+
+ if (ctx->config.dec) {
+ /* Update the reference to the config structure to an internal copy. */
+ ctx->priv->alg_priv->cfg = *ctx->config.dec;
+ ctx->config.dec = &ctx->priv->alg_priv->cfg;
+ }
+}
+
+static void *mmap_lkup(vpx_codec_alg_priv_t *ctx, unsigned int id) {
+ int i;
+
+ for (i = 0; i < NELEMENTS(ctx->mmaps); i++)
+ if (ctx->mmaps[i].id == id)
+ return ctx->mmaps[i].base;
+
+ return NULL;
+}
+static void vp8_finalize_mmaps(vpx_codec_alg_priv_t *ctx) {
+ /* nothing to clean up */
+}
+
+static vpx_codec_err_t vp8_init(vpx_codec_ctx_t *ctx,
+ vpx_codec_priv_enc_mr_cfg_t *data) {
+ vpx_codec_err_t res = VPX_CODEC_OK;
+
+ /* This function only allocates space for the vpx_codec_alg_priv_t
+ * structure. More memory may be required at the time the stream
+ * information becomes known.
+ */
+ if (!ctx->priv) {
+ vpx_codec_mmap_t mmap;
+
+ mmap.id = vp8_mem_req_segs[0].id;
+ mmap.sz = sizeof(vpx_codec_alg_priv_t);
+ mmap.align = vp8_mem_req_segs[0].align;
+ mmap.flags = vp8_mem_req_segs[0].flags;
+
+ res = vp8_mmap_alloc(&mmap);
+
+ if (!res) {
+ vp8_init_ctx(ctx, &mmap);
+
+ ctx->priv->alg_priv->defer_alloc = 1;
+ /*post processing level initialized to do nothing */
+ }
+ }
+
+ return res;
+}
+
+static vpx_codec_err_t vp8_destroy(vpx_codec_alg_priv_t *ctx) {
+ int i;
+
+ vp9_remove_decompressor(ctx->pbi);
+
+ for (i = NELEMENTS(ctx->mmaps) - 1; i >= 0; i--) {
+ if (ctx->mmaps[i].dtor)
+ ctx->mmaps[i].dtor(&ctx->mmaps[i]);
+ }
+
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_err_t vp8_peek_si(const uint8_t *data,
+ unsigned int data_sz,
+ vpx_codec_stream_info_t *si) {
+ vpx_codec_err_t res = VPX_CODEC_OK;
+
+ if (data + data_sz <= data)
+ res = VPX_CODEC_INVALID_PARAM;
+ else {
+ si->is_kf = 0;
+
+ if (data_sz >= 8 && (data[0] & 0xD8) == 0x80) { /* I-Frame */
+ const uint8_t *c = data + 1;
+ si->is_kf = 1;
+
+ if (c[0] != SYNC_CODE_0 || c[1] != SYNC_CODE_1 || c[2] != SYNC_CODE_2)
+ res = VPX_CODEC_UNSUP_BITSTREAM;
+
+ si->w = (c[3] << 8) | c[4];
+ si->h = (c[5] << 8) | c[6];
+
+ // printf("w=%d, h=%d\n", si->w, si->h);
+ if (!(si->h | si->w))
+ res = VPX_CODEC_UNSUP_BITSTREAM;
+ } else
+ res = VPX_CODEC_UNSUP_BITSTREAM;
+ }
+
+ return res;
+}
+
+static vpx_codec_err_t vp8_get_si(vpx_codec_alg_priv_t *ctx,
+ vpx_codec_stream_info_t *si) {
+
+ unsigned int sz;
+
+ if (si->sz >= sizeof(vp8_stream_info_t))
+ sz = sizeof(vp8_stream_info_t);
+ else
+ sz = sizeof(vpx_codec_stream_info_t);
+
+ memcpy(si, &ctx->si, sz);
+ si->sz = sz;
+
+ return VPX_CODEC_OK;
+}
+
+
+static vpx_codec_err_t
+update_error_state(vpx_codec_alg_priv_t *ctx,
+ const struct vpx_internal_error_info *error) {
+ vpx_codec_err_t res;
+
+ if ((res = error->error_code))
+ ctx->base.err_detail = error->has_detail
+ ? error->detail
+ : NULL;
+
+ return res;
+}
+
+static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx,
+ const uint8_t **data,
+ unsigned int data_sz,
+ void *user_priv,
+ long deadline) {
+ vpx_codec_err_t res = VPX_CODEC_OK;
+
+ ctx->img_avail = 0;
+
+ /* Determine the stream parameters. Note that we rely on peek_si to
+ * validate that we have a buffer that does not wrap around the top
+ * of the heap.
+ */
+ if (!ctx->si.h)
+ res = ctx->base.iface->dec.peek_si(*data, data_sz, &ctx->si);
+
+
+ /* Perform deferred allocations, if required */
+ if (!res && ctx->defer_alloc) {
+ int i;
+
+ for (i = 1; !res && i < NELEMENTS(ctx->mmaps); i++) {
+ vpx_codec_dec_cfg_t cfg;
+
+ cfg.w = ctx->si.w;
+ cfg.h = ctx->si.h;
+ ctx->mmaps[i].id = vp8_mem_req_segs[i].id;
+ ctx->mmaps[i].sz = vp8_mem_req_segs[i].sz;
+ ctx->mmaps[i].align = vp8_mem_req_segs[i].align;
+ ctx->mmaps[i].flags = vp8_mem_req_segs[i].flags;
+
+ if (!ctx->mmaps[i].sz)
+ ctx->mmaps[i].sz = vp8_mem_req_segs[i].calc_sz(&cfg,
+ ctx->base.init_flags);
+
+ res = vp8_mmap_alloc(&ctx->mmaps[i]);
+ }
+
+ if (!res)
+ vp8_finalize_mmaps(ctx);
+
+ ctx->defer_alloc = 0;
+ }
+
+ /* Initialize the decoder instance on the first frame*/
+ if (!res && !ctx->decoder_init) {
+ res = vp8_validate_mmaps(&ctx->si, ctx->mmaps, ctx->base.init_flags);
+
+ if (!res) {
+ VP9D_CONFIG oxcf;
+ VP9D_PTR optr;
+
+ vp9_initialize_dec();
+
+ oxcf.width = ctx->si.w;
+ oxcf.height = ctx->si.h;
+ oxcf.version = 9;
+ oxcf.postprocess = 0;
+ oxcf.max_threads = ctx->cfg.threads;
+ oxcf.inv_tile_order = ctx->invert_tile_order;
+ optr = vp9_create_decompressor(&oxcf);
+
+ /* If postprocessing was enabled by the application and a
+ * configuration has not been provided, default it.
+ */
+ if (!ctx->postproc_cfg_set
+ && (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC)) {
+ ctx->postproc_cfg.post_proc_flag =
+ VP8_DEBLOCK | VP8_DEMACROBLOCK;
+ ctx->postproc_cfg.deblocking_level = 4;
+ ctx->postproc_cfg.noise_level = 0;
+ }
+
+ if (!optr)
+ res = VPX_CODEC_ERROR;
+ else
+ ctx->pbi = optr;
+ }
+
+ ctx->decoder_init = 1;
+ }
+
+ if (!res && ctx->pbi) {
+ YV12_BUFFER_CONFIG sd;
+ int64_t time_stamp = 0, time_end_stamp = 0;
+ vp9_ppflags_t flags = {0};
+
+ if (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC) {
+ flags.post_proc_flag = ctx->postproc_cfg.post_proc_flag
+#if CONFIG_POSTPROC_VISUALIZER
+
+ | ((ctx->dbg_color_ref_frame_flag != 0) ? VP9D_DEBUG_CLR_FRM_REF_BLKS : 0)
+ | ((ctx->dbg_color_mb_modes_flag != 0) ? VP9D_DEBUG_CLR_BLK_MODES : 0)
+ | ((ctx->dbg_color_b_modes_flag != 0) ? VP9D_DEBUG_CLR_BLK_MODES : 0)
+ | ((ctx->dbg_display_mv_flag != 0) ? VP9D_DEBUG_DRAW_MV : 0)
+#endif
+;
+ flags.deblocking_level = ctx->postproc_cfg.deblocking_level;
+ flags.noise_level = ctx->postproc_cfg.noise_level;
+#if CONFIG_POSTPROC_VISUALIZER
+ flags.display_ref_frame_flag = ctx->dbg_color_ref_frame_flag;
+ flags.display_mb_modes_flag = ctx->dbg_color_mb_modes_flag;
+ flags.display_b_modes_flag = ctx->dbg_color_b_modes_flag;
+ flags.display_mv_flag = ctx->dbg_display_mv_flag;
+#endif
+ }
+
+ if (vp9_receive_compressed_data(ctx->pbi, data_sz, data, deadline)) {
+ VP9D_COMP *pbi = (VP9D_COMP *)ctx->pbi;
+ res = update_error_state(ctx, &pbi->common.error);
+ }
+
+ if (!res && 0 == vp9_get_raw_frame(ctx->pbi, &sd, &time_stamp,
+ &time_end_stamp, &flags)) {
+ yuvconfig2image(&ctx->img, &sd, user_priv);
+ ctx->img_avail = 1;
+ }
+ }
+
+ return res;
+}
+
+static void parse_superframe_index(const uint8_t *data,
+ size_t data_sz,
+ uint32_t sizes[8],
+ int *count) {
+ uint8_t marker;
+
+ assert(data_sz);
+ marker = data[data_sz - 1];
+ *count = 0;
+
+ if ((marker & 0xe0) == 0xc0) {
+ const uint32_t frames = (marker & 0x7) + 1;
+ const uint32_t mag = ((marker >> 3) & 0x3) + 1;
+ const size_t index_sz = 2 + mag * frames;
+
+ if (data_sz >= index_sz && data[data_sz - index_sz] == marker) {
+ // found a valid superframe index
+ uint32_t i, j;
+ const uint8_t *x = data + data_sz - index_sz + 1;
+
+ for (i = 0; i < frames; i++) {
+ uint32_t this_sz = 0;
+
+ for (j = 0; j < mag; j++)
+ this_sz |= (*x++) << (j * 8);
+ sizes[i] = this_sz;
+ }
+
+ *count = frames;
+ }
+ }
+}
+
+static vpx_codec_err_t vp9_decode(vpx_codec_alg_priv_t *ctx,
+ const uint8_t *data,
+ unsigned int data_sz,
+ void *user_priv,
+ long deadline) {
+ const uint8_t *data_start = data;
+ const uint8_t *data_end = data + data_sz;
+ vpx_codec_err_t res = 0;
+ uint32_t sizes[8];
+ int frames_this_pts, frame_count = 0;
+
+ parse_superframe_index(data, data_sz, sizes, &frames_this_pts);
+
+ do {
+ // Skip over the superframe index, if present
+ if (data_sz && (*data_start & 0xe0) == 0xc0) {
+ const uint8_t marker = *data_start;
+ const uint32_t frames = (marker & 0x7) + 1;
+ const uint32_t mag = ((marker >> 3) & 0x3) + 1;
+ const uint32_t index_sz = 2 + mag * frames;
+
+ if (data_sz >= index_sz && data_start[index_sz - 1] == marker) {
+ data_start += index_sz;
+ data_sz -= index_sz;
+ if (data_start < data_end)
+ continue;
+ else
+ break;
+ }
+ }
+
+ // Use the correct size for this frame, if an index is present.
+ if (frames_this_pts) {
+ uint32_t this_sz = sizes[frame_count];
+
+ if (data_sz < this_sz) {
+ ctx->base.err_detail = "Invalid frame size in index";
+ return VPX_CODEC_CORRUPT_FRAME;
+ }
+
+ data_sz = this_sz;
+ frame_count++;
+ }
+
+ res = decode_one(ctx, &data_start, data_sz, user_priv, deadline);
+ assert(data_start >= data);
+ assert(data_start <= data_end);
+
+ /* Early exit if there was a decode error */
+ if (res)
+ break;
+
+ /* Account for suboptimal termination by the encoder. */
+ while (data_start < data_end && *data_start == 0)
+ data_start++;
+
+ data_sz = data_end - data_start;
+ } while (data_start < data_end);
+ return res;
+}
+
+static vpx_image_t *vp8_get_frame(vpx_codec_alg_priv_t *ctx,
+ vpx_codec_iter_t *iter) {
+ vpx_image_t *img = NULL;
+
+ if (ctx->img_avail) {
+ /* iter acts as a flip flop, so an image is only returned on the first
+ * call to get_frame.
+ */
+ if (!(*iter)) {
+ img = &ctx->img;
+ *iter = img;
+ }
+ }
+ ctx->img_avail = 0;
+
+ return img;
+}
+
+
+static
+vpx_codec_err_t vp8_xma_get_mmap(const vpx_codec_ctx_t *ctx,
+ vpx_codec_mmap_t *mmap,
+ vpx_codec_iter_t *iter) {
+ vpx_codec_err_t res;
+ const mem_req_t *seg_iter = *iter;
+
+ /* Get address of next segment request */
+ do {
+ if (!seg_iter)
+ seg_iter = vp8_mem_req_segs;
+ else if (seg_iter->id != VP8_SEG_MAX)
+ seg_iter++;
+
+ *iter = (vpx_codec_iter_t)seg_iter;
+
+ if (seg_iter->id != VP8_SEG_MAX) {
+ mmap->id = seg_iter->id;
+ mmap->sz = seg_iter->sz;
+ mmap->align = seg_iter->align;
+ mmap->flags = seg_iter->flags;
+
+ if (!seg_iter->sz)
+ mmap->sz = seg_iter->calc_sz(ctx->config.dec, ctx->init_flags);
+
+ res = VPX_CODEC_OK;
+ } else
+ res = VPX_CODEC_LIST_END;
+ } while (!mmap->sz && res != VPX_CODEC_LIST_END);
+
+ return res;
+}
+
+static vpx_codec_err_t vp8_xma_set_mmap(vpx_codec_ctx_t *ctx,
+ const vpx_codec_mmap_t *mmap) {
+ vpx_codec_err_t res = VPX_CODEC_MEM_ERROR;
+ int i, done;
+
+ if (!ctx->priv) {
+ if (mmap->id == VP8_SEG_ALG_PRIV) {
+ if (!ctx->priv) {
+ vp8_init_ctx(ctx, mmap);
+ res = VPX_CODEC_OK;
+ }
+ }
+ }
+
+ done = 1;
+
+ if (!res && ctx->priv->alg_priv) {
+ for (i = 0; i < NELEMENTS(ctx->priv->alg_priv->mmaps); i++) {
+ if (ctx->priv->alg_priv->mmaps[i].id == mmap->id)
+ if (!ctx->priv->alg_priv->mmaps[i].base) {
+ ctx->priv->alg_priv->mmaps[i] = *mmap;
+ res = VPX_CODEC_OK;
+ }
+
+ done &= (ctx->priv->alg_priv->mmaps[i].base != NULL);
+ }
+ }
+
+ if (done && !res) {
+ vp8_finalize_mmaps(ctx->priv->alg_priv);
+ res = ctx->iface->init(ctx, NULL);
+ }
+
+ return res;
+}
+
+
+static vpx_codec_err_t vp9_set_reference(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+
+ vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *);
+
+ if (data) {
+ vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data;
+ YV12_BUFFER_CONFIG sd;
+
+ image2yuvconfig(&frame->img, &sd);
+
+ return vp9_set_reference_dec(ctx->pbi,
+ (VP9_REFFRAME)frame->frame_type, &sd);
+ } else
+ return VPX_CODEC_INVALID_PARAM;
+
+}
+
+static vpx_codec_err_t vp9_copy_reference(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+
+ vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *);
+
+ if (data) {
+ vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data;
+ YV12_BUFFER_CONFIG sd;
+
+ image2yuvconfig(&frame->img, &sd);
+
+ return vp9_copy_reference_dec(ctx->pbi,
+ (VP9_REFFRAME)frame->frame_type, &sd);
+ } else
+ return VPX_CODEC_INVALID_PARAM;
+
+}
+
+static vpx_codec_err_t get_reference(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+ vp9_ref_frame_t *data = va_arg(args, vp9_ref_frame_t *);
+
+ if (data) {
+ YV12_BUFFER_CONFIG* fb;
+
+ vp9_get_reference_dec(ctx->pbi, data->idx, &fb);
+ yuvconfig2image(&data->img, fb, NULL);
+ return VPX_CODEC_OK;
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+}
+
+static vpx_codec_err_t vp8_set_postproc(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+#if CONFIG_POSTPROC
+ vp8_postproc_cfg_t *data = va_arg(args, vp8_postproc_cfg_t *);
+
+ if (data) {
+ ctx->postproc_cfg_set = 1;
+ ctx->postproc_cfg = *((vp8_postproc_cfg_t *)data);
+ return VPX_CODEC_OK;
+ } else
+ return VPX_CODEC_INVALID_PARAM;
+
+#else
+ return VPX_CODEC_INCAPABLE;
+#endif
+}
+
+static vpx_codec_err_t vp8_set_dbg_options(vpx_codec_alg_priv_t *ctx,
+ int ctrl_id,
+ va_list args) {
+#if CONFIG_POSTPROC_VISUALIZER && CONFIG_POSTPROC
+ int data = va_arg(args, int);
+
+#define MAP(id, var) case id: var = data; break;
+
+ switch (ctrl_id) {
+ MAP(VP8_SET_DBG_COLOR_REF_FRAME, ctx->dbg_color_ref_frame_flag);
+ MAP(VP8_SET_DBG_COLOR_MB_MODES, ctx->dbg_color_mb_modes_flag);
+ MAP(VP8_SET_DBG_COLOR_B_MODES, ctx->dbg_color_b_modes_flag);
+ MAP(VP8_SET_DBG_DISPLAY_MV, ctx->dbg_display_mv_flag);
+ }
+
+ return VPX_CODEC_OK;
+#else
+ return VPX_CODEC_INCAPABLE;
+#endif
+}
+
+static vpx_codec_err_t vp8_get_last_ref_updates(vpx_codec_alg_priv_t *ctx,
+ int ctrl_id,
+ va_list args) {
+ int *update_info = va_arg(args, int *);
+ VP9D_COMP *pbi = (VP9D_COMP *)ctx->pbi;
+
+ if (update_info) {
+ *update_info = pbi->refresh_frame_flags;
+
+ return VPX_CODEC_OK;
+ } else
+ return VPX_CODEC_INVALID_PARAM;
+}
+
+
+static vpx_codec_err_t vp8_get_frame_corrupted(vpx_codec_alg_priv_t *ctx,
+ int ctrl_id,
+ va_list args) {
+
+ int *corrupted = va_arg(args, int *);
+
+ if (corrupted) {
+ VP9D_COMP *pbi = (VP9D_COMP *)ctx->pbi;
+ *corrupted = pbi->common.frame_to_show->corrupted;
+
+ return VPX_CODEC_OK;
+ } else
+ return VPX_CODEC_INVALID_PARAM;
+
+}
+
+static vpx_codec_err_t set_invert_tile_order(vpx_codec_alg_priv_t *ctx,
+ int ctr_id,
+ va_list args) {
+ ctx->invert_tile_order = va_arg(args, int);
+ return VPX_CODEC_OK;
+}
+
+static vpx_codec_ctrl_fn_map_t ctf_maps[] = {
+ {VP8_SET_REFERENCE, vp9_set_reference},
+ {VP8_COPY_REFERENCE, vp9_copy_reference},
+ {VP8_SET_POSTPROC, vp8_set_postproc},
+ {VP8_SET_DBG_COLOR_REF_FRAME, vp8_set_dbg_options},
+ {VP8_SET_DBG_COLOR_MB_MODES, vp8_set_dbg_options},
+ {VP8_SET_DBG_COLOR_B_MODES, vp8_set_dbg_options},
+ {VP8_SET_DBG_DISPLAY_MV, vp8_set_dbg_options},
+ {VP8D_GET_LAST_REF_UPDATES, vp8_get_last_ref_updates},
+ {VP8D_GET_FRAME_CORRUPTED, vp8_get_frame_corrupted},
+ {VP9_GET_REFERENCE, get_reference},
+ {VP9_INVERT_TILE_DECODE_ORDER, set_invert_tile_order},
+ { -1, NULL},
+};
+
+
+#ifndef VERSION_STRING
+#define VERSION_STRING
+#endif
+CODEC_INTERFACE(vpx_codec_vp9_dx) = {
+ "WebM Project VP9 Decoder" VERSION_STRING,
+ VPX_CODEC_INTERNAL_ABI_VERSION,
+ VPX_CODEC_CAP_DECODER | VP8_CAP_POSTPROC,
+ /* vpx_codec_caps_t caps; */
+ vp8_init, /* vpx_codec_init_fn_t init; */
+ vp8_destroy, /* vpx_codec_destroy_fn_t destroy; */
+ ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */
+ vp8_xma_get_mmap, /* vpx_codec_get_mmap_fn_t get_mmap; */
+ vp8_xma_set_mmap, /* vpx_codec_set_mmap_fn_t set_mmap; */
+ {
+ vp8_peek_si, /* vpx_codec_peek_si_fn_t peek_si; */
+ vp8_get_si, /* vpx_codec_get_si_fn_t get_si; */
+ vp9_decode, /* vpx_codec_decode_fn_t decode; */
+ vp8_get_frame, /* vpx_codec_frame_get_fn_t frame_get; */
+ },
+ {
+ /* encoder functions */
+ NOT_IMPLEMENTED,
+ NOT_IMPLEMENTED,
+ NOT_IMPLEMENTED,
+ NOT_IMPLEMENTED,
+ NOT_IMPLEMENTED,
+ NOT_IMPLEMENTED
+ }
+};
diff --git a/libvpx/vp9/vp9_iface_common.h b/libvpx/vp9/vp9_iface_common.h
new file mode 100644
index 0000000..dc41d77
--- /dev/null
+++ b/libvpx/vp9/vp9_iface_common.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef VP9_VP9_IFACE_COMMON_H_
+#define VP9_VP9_IFACE_COMMON_H_
+
+static void yuvconfig2image(vpx_image_t *img, const YV12_BUFFER_CONFIG *yv12,
+ void *user_priv) {
+ /** vpx_img_wrap() doesn't allow specifying independent strides for
+ * the Y, U, and V planes, nor other alignment adjustments that
+ * might be representable by a YV12_BUFFER_CONFIG, so we just
+ * initialize all the fields.*/
+ int bps = 12;
+ if (yv12->uv_height == yv12->y_height) {
+ if (yv12->uv_width == yv12->y_width) {
+ img->fmt = VPX_IMG_FMT_I444;
+ bps = 24;
+ } else {
+ img->fmt = VPX_IMG_FMT_I422;
+ bps = 16;
+ }
+ } else {
+ img->fmt = VPX_IMG_FMT_I420;
+ }
+ img->w = yv12->y_stride;
+ img->h = multiple8(yv12->y_height + 2 * VP9BORDERINPIXELS);
+ img->d_w = yv12->y_crop_width;
+ img->d_h = yv12->y_crop_height;
+ img->x_chroma_shift = yv12->uv_width < yv12->y_width;
+ img->y_chroma_shift = yv12->uv_height < yv12->y_height;
+ img->planes[VPX_PLANE_Y] = yv12->y_buffer;
+ img->planes[VPX_PLANE_U] = yv12->u_buffer;
+ img->planes[VPX_PLANE_V] = yv12->v_buffer;
+ img->planes[VPX_PLANE_ALPHA] = yv12->alpha_buffer;
+ img->stride[VPX_PLANE_Y] = yv12->y_stride;
+ img->stride[VPX_PLANE_U] = yv12->uv_stride;
+ img->stride[VPX_PLANE_V] = yv12->uv_stride;
+ img->stride[VPX_PLANE_ALPHA] = yv12->alpha_stride;
+ img->bps = bps;
+ img->user_priv = user_priv;
+ img->img_data = yv12->buffer_alloc;
+ img->img_data_owner = 0;
+ img->self_allocd = 0;
+}
+
+static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img,
+ YV12_BUFFER_CONFIG *yv12) {
+ yv12->y_buffer = img->planes[VPX_PLANE_Y];
+ yv12->u_buffer = img->planes[VPX_PLANE_U];
+ yv12->v_buffer = img->planes[VPX_PLANE_V];
+ yv12->alpha_buffer = img->planes[VPX_PLANE_ALPHA];
+
+ yv12->y_crop_width = img->d_w;
+ yv12->y_crop_height = img->d_h;
+ yv12->y_width = img->d_w;
+ yv12->y_height = img->d_h;
+
+ yv12->uv_width = img->x_chroma_shift == 1 ? (1 + yv12->y_width) / 2
+ : yv12->y_width;
+ yv12->uv_height = img->y_chroma_shift == 1 ? (1 + yv12->y_height) / 2
+ : yv12->y_height;
+
+ yv12->alpha_width = yv12->alpha_buffer ? img->d_w : 0;
+ yv12->alpha_height = yv12->alpha_buffer ? img->d_h : 0;
+
+ yv12->y_stride = img->stride[VPX_PLANE_Y];
+ yv12->uv_stride = img->stride[VPX_PLANE_U];
+ yv12->alpha_stride = yv12->alpha_buffer ? img->stride[VPX_PLANE_ALPHA] : 0;
+
+ yv12->border = (img->stride[VPX_PLANE_Y] - img->w) / 2;
+ yv12->clrtype = REG_YUV;
+
+#if CONFIG_ALPHA
+ // For development purposes, force alpha to hold the same data a Y for now.
+ yv12->alpha_buffer = yv12->y_buffer;
+ yv12->alpha_width = yv12->y_width;
+ yv12->alpha_height = yv12->y_height;
+ yv12->alpha_stride = yv12->y_stride;
+#endif
+ return VPX_CODEC_OK;
+}
+
+#endif // VP9_VP9_IFACE_COMMON_H_
diff --git a/libvpx/vp9/vp9cx.mk b/libvpx/vp9/vp9cx.mk
new file mode 100644
index 0000000..4bed6c0
--- /dev/null
+++ b/libvpx/vp9/vp9cx.mk
@@ -0,0 +1,101 @@
+##
+## Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+##
+## Use of this source code is governed by a BSD-style license
+## that can be found in the LICENSE file in the root of the source
+## tree. An additional intellectual property rights grant can be found
+## in the file PATENTS. All contributing project authors may
+## be found in the AUTHORS file in the root of the source tree.
+##
+
+VP9_CX_EXPORTS += exports_enc
+
+VP9_CX_SRCS-yes += $(VP9_COMMON_SRCS-yes)
+VP9_CX_SRCS-no += $(VP9_COMMON_SRCS-no)
+VP9_CX_SRCS_REMOVE-yes += $(VP9_COMMON_SRCS_REMOVE-yes)
+VP9_CX_SRCS_REMOVE-no += $(VP9_COMMON_SRCS_REMOVE-no)
+
+VP9_CX_SRCS-yes += vp9_cx_iface.c
+
+VP9_CX_SRCS-yes += encoder/vp9_bitstream.c
+VP9_CX_SRCS-yes += encoder/vp9_boolhuff.c
+VP9_CX_SRCS-yes += encoder/vp9_dct.c
+VP9_CX_SRCS-yes += encoder/vp9_encodeframe.c
+VP9_CX_SRCS-yes += encoder/vp9_encodeframe.h
+VP9_CX_SRCS-yes += encoder/vp9_encodeintra.c
+VP9_CX_SRCS-yes += encoder/vp9_encodemb.c
+VP9_CX_SRCS-yes += encoder/vp9_encodemv.c
+VP9_CX_SRCS-yes += encoder/vp9_firstpass.c
+VP9_CX_SRCS-yes += encoder/vp9_block.h
+VP9_CX_SRCS-yes += encoder/vp9_boolhuff.h
+VP9_CX_SRCS-yes += encoder/vp9_write_bit_buffer.h
+VP9_CX_SRCS-yes += encoder/vp9_bitstream.h
+VP9_CX_SRCS-yes += encoder/vp9_encodeintra.h
+VP9_CX_SRCS-yes += encoder/vp9_encodemb.h
+VP9_CX_SRCS-yes += encoder/vp9_encodemv.h
+VP9_CX_SRCS-yes += encoder/vp9_firstpass.h
+VP9_CX_SRCS-yes += encoder/vp9_lookahead.c
+VP9_CX_SRCS-yes += encoder/vp9_lookahead.h
+VP9_CX_SRCS-yes += encoder/vp9_mcomp.h
+VP9_CX_SRCS-yes += encoder/vp9_modecosts.h
+VP9_CX_SRCS-yes += encoder/vp9_onyx_int.h
+VP9_CX_SRCS-yes += encoder/vp9_psnr.h
+VP9_CX_SRCS-yes += encoder/vp9_quantize.h
+VP9_CX_SRCS-yes += encoder/vp9_ratectrl.h
+VP9_CX_SRCS-yes += encoder/vp9_rdopt.h
+VP9_CX_SRCS-yes += encoder/vp9_tokenize.h
+VP9_CX_SRCS-yes += encoder/vp9_treewriter.h
+VP9_CX_SRCS-yes += encoder/vp9_variance.h
+VP9_CX_SRCS-yes += encoder/vp9_mcomp.c
+VP9_CX_SRCS-yes += encoder/vp9_modecosts.c
+VP9_CX_SRCS-yes += encoder/vp9_onyx_if.c
+VP9_CX_SRCS-yes += encoder/vp9_picklpf.c
+VP9_CX_SRCS-yes += encoder/vp9_picklpf.h
+VP9_CX_SRCS-yes += encoder/vp9_psnr.c
+VP9_CX_SRCS-yes += encoder/vp9_quantize.c
+VP9_CX_SRCS-yes += encoder/vp9_ratectrl.c
+VP9_CX_SRCS-yes += encoder/vp9_rdopt.c
+VP9_CX_SRCS-yes += encoder/vp9_sad_c.c
+VP9_CX_SRCS-yes += encoder/vp9_segmentation.c
+VP9_CX_SRCS-yes += encoder/vp9_segmentation.h
+VP9_CX_SRCS-$(CONFIG_INTERNAL_STATS) += encoder/vp9_ssim.c
+VP9_CX_SRCS-yes += encoder/vp9_tokenize.c
+VP9_CX_SRCS-yes += encoder/vp9_treewriter.c
+VP9_CX_SRCS-yes += encoder/vp9_variance_c.c
+ifeq ($(CONFIG_POSTPROC),yes)
+VP9_CX_SRCS-$(CONFIG_INTERNAL_STATS) += common/vp9_postproc.h
+VP9_CX_SRCS-$(CONFIG_INTERNAL_STATS) += common/vp9_postproc.c
+endif
+VP9_CX_SRCS-yes += encoder/vp9_temporal_filter.c
+VP9_CX_SRCS-yes += encoder/vp9_temporal_filter.h
+VP9_CX_SRCS-yes += encoder/vp9_mbgraph.c
+VP9_CX_SRCS-yes += encoder/vp9_mbgraph.h
+
+
+VP9_CX_SRCS-$(ARCH_X86)$(ARCH_X86_64) += encoder/x86/vp9_mcomp_x86.h
+VP9_CX_SRCS-$(ARCH_X86)$(ARCH_X86_64) += encoder/x86/vp9_x86_csystemdependent.c
+VP9_CX_SRCS-$(HAVE_MMX) += encoder/x86/vp9_variance_mmx.c
+VP9_CX_SRCS-$(HAVE_MMX) += encoder/x86/vp9_variance_impl_mmx.asm
+VP9_CX_SRCS-$(HAVE_MMX) += encoder/x86/vp9_sad_mmx.asm
+VP9_CX_SRCS-$(HAVE_MMX) += encoder/x86/vp9_dct_mmx.asm
+VP9_CX_SRCS-$(HAVE_MMX) += encoder/x86/vp9_dct_mmx.h
+VP9_CX_SRCS-$(HAVE_MMX) += encoder/x86/vp9_subtract_mmx.asm
+VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_variance_sse2.c
+VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_variance_impl_sse2.asm
+VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_sad_sse2.asm
+VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_sad4d_sse2.asm
+VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_fwalsh_sse2.asm
+VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_subtract_sse2.asm
+VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_subpel_variance_impl_sse2.asm
+VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_temporal_filter_apply_sse2.asm
+VP9_CX_SRCS-$(HAVE_SSE3) += encoder/x86/vp9_sad_sse3.asm
+VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_sad_ssse3.asm
+VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_variance_ssse3.c
+VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_variance_impl_ssse3.asm
+VP9_CX_SRCS-$(HAVE_SSE4_1) += encoder/x86/vp9_sad_sse4.asm
+VP9_CX_SRCS-$(ARCH_X86)$(ARCH_X86_64) += encoder/x86/vp9_encodeopt.asm
+VP9_CX_SRCS-$(ARCH_X86_64) += encoder/x86/vp9_ssim_opt.asm
+
+VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_dct_sse2.c
+
+VP9_CX_SRCS-yes := $(filter-out $(VP9_CX_SRCS_REMOVE-yes),$(VP9_CX_SRCS-yes))
diff --git a/libvpx/vp9/vp9dx.mk b/libvpx/vp9/vp9dx.mk
new file mode 100644
index 0000000..7ae3219
--- /dev/null
+++ b/libvpx/vp9/vp9dx.mk
@@ -0,0 +1,42 @@
+##
+## Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+##
+## Use of this source code is governed by a BSD-style license
+## that can be found in the LICENSE file in the root of the source
+## tree. An additional intellectual property rights grant can be found
+## in the file PATENTS. All contributing project authors may
+## be found in the AUTHORS file in the root of the source tree.
+##
+
+VP9_DX_EXPORTS += exports_dec
+
+VP9_DX_SRCS-yes += $(VP9_COMMON_SRCS-yes)
+VP9_DX_SRCS-no += $(VP9_COMMON_SRCS-no)
+VP9_DX_SRCS_REMOVE-yes += $(VP9_COMMON_SRCS_REMOVE-yes)
+VP9_DX_SRCS_REMOVE-no += $(VP9_COMMON_SRCS_REMOVE-no)
+
+VP9_DX_SRCS-yes += vp9_dx_iface.c
+
+VP9_DX_SRCS-yes += decoder/vp9_asm_dec_offsets.c
+VP9_DX_SRCS-yes += decoder/vp9_dboolhuff.c
+VP9_DX_SRCS-yes += decoder/vp9_decodemv.c
+VP9_DX_SRCS-yes += decoder/vp9_decodframe.c
+VP9_DX_SRCS-yes += decoder/vp9_decodframe.h
+VP9_DX_SRCS-yes += decoder/vp9_detokenize.c
+VP9_DX_SRCS-yes += decoder/vp9_dboolhuff.h
+VP9_DX_SRCS-yes += decoder/vp9_read_bit_buffer.h
+VP9_DX_SRCS-yes += decoder/vp9_decodemv.h
+VP9_DX_SRCS-yes += decoder/vp9_detokenize.h
+VP9_DX_SRCS-yes += decoder/vp9_onyxd.h
+VP9_DX_SRCS-yes += decoder/vp9_onyxd_int.h
+VP9_DX_SRCS-yes += decoder/vp9_treereader.h
+VP9_DX_SRCS-yes += decoder/vp9_onyxd_if.c
+VP9_DX_SRCS-yes += decoder/vp9_idct_blk.c
+VP9_DX_SRCS-yes += decoder/vp9_idct_blk.h
+
+VP9_DX_SRCS-yes := $(filter-out $(VP9_DX_SRCS_REMOVE-yes),$(VP9_DX_SRCS-yes))
+
+VP9_DX_SRCS-$(HAVE_SSE2) += decoder/x86/vp9_dequantize_sse2.c
+
+$(eval $(call asm_offsets_template,\
+ vp9_asm_dec_offsets.asm, $(VP9_PREFIX)decoder/vp9_asm_dec_offsets.c))
diff --git a/libvpx/vpx/internal/vpx_codec_internal.h b/libvpx/vpx/internal/vpx_codec_internal.h
index 4474331..d7bcd46 100644
--- a/libvpx/vpx/internal/vpx_codec_internal.h
+++ b/libvpx/vpx/internal/vpx_codec_internal.h
@@ -75,7 +75,7 @@ typedef struct vpx_codec_priv_enc_mr_cfg vpx_codec_priv_enc_mr_cfg_t;
* Memory operation failed.
*/
typedef vpx_codec_err_t (*vpx_codec_init_fn_t)(vpx_codec_ctx_t *ctx,
- vpx_codec_priv_enc_mr_cfg_t *data);
+ vpx_codec_priv_enc_mr_cfg_t *data);
/*!\brief destroy function pointer prototype
*
@@ -109,8 +109,8 @@ typedef vpx_codec_err_t (*vpx_codec_destroy_fn_t)(vpx_codec_alg_priv_t *ctx);
* Bitstream is parsable and stream information updated
*/
typedef vpx_codec_err_t (*vpx_codec_peek_si_fn_t)(const uint8_t *data,
- unsigned int data_sz,
- vpx_codec_stream_info_t *si);
+ unsigned int data_sz,
+ vpx_codec_stream_info_t *si);
/*!\brief Return information about the current stream.
*
@@ -126,7 +126,7 @@ typedef vpx_codec_err_t (*vpx_codec_peek_si_fn_t)(const uint8_t *data,
* Bitstream is parsable and stream information updated
*/
typedef vpx_codec_err_t (*vpx_codec_get_si_fn_t)(vpx_codec_alg_priv_t *ctx,
- vpx_codec_stream_info_t *si);
+ vpx_codec_stream_info_t *si);
/*!\brief control function pointer prototype
*
@@ -151,8 +151,8 @@ typedef vpx_codec_err_t (*vpx_codec_get_si_fn_t)(vpx_codec_alg_priv_t *ctx,
* The internal state data was deserialized.
*/
typedef vpx_codec_err_t (*vpx_codec_control_fn_t)(vpx_codec_alg_priv_t *ctx,
- int ctrl_id,
- va_list ap);
+ int ctrl_id,
+ va_list ap);
/*!\brief control function pointer mapping
*
@@ -165,10 +165,9 @@ typedef vpx_codec_err_t (*vpx_codec_control_fn_t)(vpx_codec_alg_priv_t *ctx,
* mapping. This implies that ctrl_id values chosen by the algorithm
* \ref MUST be non-zero.
*/
-typedef const struct vpx_codec_ctrl_fn_map
-{
- int ctrl_id;
- vpx_codec_control_fn_t fn;
+typedef const struct vpx_codec_ctrl_fn_map {
+ int ctrl_id;
+ vpx_codec_control_fn_t fn;
} vpx_codec_ctrl_fn_map_t;
/*!\brief decode data function pointer prototype
@@ -192,10 +191,10 @@ typedef const struct vpx_codec_ctrl_fn_map
* for recoverability capabilities.
*/
typedef vpx_codec_err_t (*vpx_codec_decode_fn_t)(vpx_codec_alg_priv_t *ctx,
- const uint8_t *data,
- unsigned int data_sz,
- void *user_priv,
- long deadline);
+ const uint8_t *data,
+ unsigned int data_sz,
+ void *user_priv,
+ long deadline);
/*!\brief Decoded frames iterator
*
@@ -212,8 +211,8 @@ typedef vpx_codec_err_t (*vpx_codec_decode_fn_t)(vpx_codec_alg_priv_t *ctx,
* \return Returns a pointer to an image, if one is ready for display. Frames
* produced will always be in PTS (presentation time stamp) order.
*/
-typedef vpx_image_t*(*vpx_codec_get_frame_fn_t)(vpx_codec_alg_priv_t *ctx,
- vpx_codec_iter_t *iter);
+typedef vpx_image_t *(*vpx_codec_get_frame_fn_t)(vpx_codec_alg_priv_t *ctx,
+ vpx_codec_iter_t *iter);
/*\brief eXternal Memory Allocation memory map get iterator
@@ -228,8 +227,8 @@ typedef vpx_image_t*(*vpx_codec_get_frame_fn_t)(vpx_codec_alg_priv_t *ctx,
* indicate end-of-list.
*/
typedef vpx_codec_err_t (*vpx_codec_get_mmap_fn_t)(const vpx_codec_ctx_t *ctx,
- vpx_codec_mmap_t *mmap,
- vpx_codec_iter_t *iter);
+ vpx_codec_mmap_t *mmap,
+ vpx_codec_iter_t *iter);
/*\brief eXternal Memory Allocation memory map set iterator
@@ -245,17 +244,17 @@ typedef vpx_codec_err_t (*vpx_codec_get_mmap_fn_t)(const vpx_codec_ctx_t *c
* The memory map was rejected.
*/
typedef vpx_codec_err_t (*vpx_codec_set_mmap_fn_t)(vpx_codec_ctx_t *ctx,
- const vpx_codec_mmap_t *mmap);
+ const vpx_codec_mmap_t *mmap);
typedef vpx_codec_err_t (*vpx_codec_encode_fn_t)(vpx_codec_alg_priv_t *ctx,
- const vpx_image_t *img,
- vpx_codec_pts_t pts,
- unsigned long duration,
- vpx_enc_frame_flags_t flags,
- unsigned long deadline);
-typedef const vpx_codec_cx_pkt_t*(*vpx_codec_get_cx_data_fn_t)(vpx_codec_alg_priv_t *ctx,
- vpx_codec_iter_t *iter);
+ const vpx_image_t *img,
+ vpx_codec_pts_t pts,
+ unsigned long duration,
+ vpx_enc_frame_flags_t flags,
+ unsigned long deadline);
+typedef const vpx_codec_cx_pkt_t *(*vpx_codec_get_cx_data_fn_t)(vpx_codec_alg_priv_t *ctx,
+ vpx_codec_iter_t *iter);
typedef vpx_codec_err_t
(*vpx_codec_enc_config_set_fn_t)(vpx_codec_alg_priv_t *ctx,
@@ -268,7 +267,7 @@ typedef vpx_image_t *
typedef vpx_codec_err_t
(*vpx_codec_enc_mr_get_mem_loc_fn_t)(const vpx_codec_enc_cfg_t *cfg,
- void **mem_loc);
+ void **mem_loc);
/*!\brief usage configuration mapping
*
@@ -280,10 +279,9 @@ typedef vpx_codec_err_t
* one mapping must be present, in addition to the end-of-list.
*
*/
-typedef const struct vpx_codec_enc_cfg_map
-{
- int usage;
- vpx_codec_enc_cfg_t cfg;
+typedef const struct vpx_codec_enc_cfg_map {
+ int usage;
+ vpx_codec_enc_cfg_t cfg;
} vpx_codec_enc_cfg_map_t;
#define NOT_IMPLEMENTED 0
@@ -292,44 +290,39 @@ typedef const struct vpx_codec_enc_cfg_map
*
* All decoders \ref MUST expose a variable of this type.
*/
-struct vpx_codec_iface
-{
- const char *name; /**< Identification String */
- int abi_version; /**< Implemented ABI version */
- vpx_codec_caps_t caps; /**< Decoder capabilities */
- vpx_codec_init_fn_t init; /**< \copydoc ::vpx_codec_init_fn_t */
- vpx_codec_destroy_fn_t destroy; /**< \copydoc ::vpx_codec_destroy_fn_t */
- vpx_codec_ctrl_fn_map_t *ctrl_maps; /**< \copydoc ::vpx_codec_ctrl_fn_map_t */
- vpx_codec_get_mmap_fn_t get_mmap; /**< \copydoc ::vpx_codec_get_mmap_fn_t */
- vpx_codec_set_mmap_fn_t set_mmap; /**< \copydoc ::vpx_codec_set_mmap_fn_t */
- struct vpx_codec_dec_iface
- {
- vpx_codec_peek_si_fn_t peek_si; /**< \copydoc ::vpx_codec_peek_si_fn_t */
- vpx_codec_get_si_fn_t get_si; /**< \copydoc ::vpx_codec_peek_si_fn_t */
- vpx_codec_decode_fn_t decode; /**< \copydoc ::vpx_codec_decode_fn_t */
- vpx_codec_get_frame_fn_t get_frame; /**< \copydoc ::vpx_codec_get_frame_fn_t */
- } dec;
- struct vpx_codec_enc_iface
- {
- vpx_codec_enc_cfg_map_t *cfg_maps; /**< \copydoc ::vpx_codec_enc_cfg_map_t */
- vpx_codec_encode_fn_t encode; /**< \copydoc ::vpx_codec_encode_fn_t */
- vpx_codec_get_cx_data_fn_t get_cx_data; /**< \copydoc ::vpx_codec_get_cx_data_fn_t */
- vpx_codec_enc_config_set_fn_t cfg_set; /**< \copydoc ::vpx_codec_enc_config_set_fn_t */
- vpx_codec_get_global_headers_fn_t get_glob_hdrs; /**< \copydoc ::vpx_codec_get_global_headers_fn_t */
- vpx_codec_get_preview_frame_fn_t get_preview; /**< \copydoc ::vpx_codec_get_preview_frame_fn_t */
- vpx_codec_enc_mr_get_mem_loc_fn_t mr_get_mem_loc; /**< \copydoc ::vpx_codec_enc_mr_get_mem_loc_fn_t */
- } enc;
+struct vpx_codec_iface {
+ const char *name; /**< Identification String */
+ int abi_version; /**< Implemented ABI version */
+ vpx_codec_caps_t caps; /**< Decoder capabilities */
+ vpx_codec_init_fn_t init; /**< \copydoc ::vpx_codec_init_fn_t */
+ vpx_codec_destroy_fn_t destroy; /**< \copydoc ::vpx_codec_destroy_fn_t */
+ vpx_codec_ctrl_fn_map_t *ctrl_maps; /**< \copydoc ::vpx_codec_ctrl_fn_map_t */
+ vpx_codec_get_mmap_fn_t get_mmap; /**< \copydoc ::vpx_codec_get_mmap_fn_t */
+ vpx_codec_set_mmap_fn_t set_mmap; /**< \copydoc ::vpx_codec_set_mmap_fn_t */
+ struct vpx_codec_dec_iface {
+ vpx_codec_peek_si_fn_t peek_si; /**< \copydoc ::vpx_codec_peek_si_fn_t */
+ vpx_codec_get_si_fn_t get_si; /**< \copydoc ::vpx_codec_peek_si_fn_t */
+ vpx_codec_decode_fn_t decode; /**< \copydoc ::vpx_codec_decode_fn_t */
+ vpx_codec_get_frame_fn_t get_frame; /**< \copydoc ::vpx_codec_get_frame_fn_t */
+ } dec;
+ struct vpx_codec_enc_iface {
+ vpx_codec_enc_cfg_map_t *cfg_maps; /**< \copydoc ::vpx_codec_enc_cfg_map_t */
+ vpx_codec_encode_fn_t encode; /**< \copydoc ::vpx_codec_encode_fn_t */
+ vpx_codec_get_cx_data_fn_t get_cx_data; /**< \copydoc ::vpx_codec_get_cx_data_fn_t */
+ vpx_codec_enc_config_set_fn_t cfg_set; /**< \copydoc ::vpx_codec_enc_config_set_fn_t */
+ vpx_codec_get_global_headers_fn_t get_glob_hdrs; /**< \copydoc ::vpx_codec_get_global_headers_fn_t */
+ vpx_codec_get_preview_frame_fn_t get_preview; /**< \copydoc ::vpx_codec_get_preview_frame_fn_t */
+ vpx_codec_enc_mr_get_mem_loc_fn_t mr_get_mem_loc; /**< \copydoc ::vpx_codec_enc_mr_get_mem_loc_fn_t */
+ } enc;
};
/*!\brief Callback function pointer / user data pair storage */
-typedef struct vpx_codec_priv_cb_pair
-{
- union
- {
- vpx_codec_put_frame_cb_fn_t put_frame;
- vpx_codec_put_slice_cb_fn_t put_slice;
- } u;
- void *user_priv;
+typedef struct vpx_codec_priv_cb_pair {
+ union {
+ vpx_codec_put_frame_cb_fn_t put_frame;
+ vpx_codec_put_slice_cb_fn_t put_slice;
+ } u;
+ void *user_priv;
} vpx_codec_priv_cb_pair_t;
@@ -341,27 +334,24 @@ typedef struct vpx_codec_priv_cb_pair
* structure can be made the first member of the algorithm specific structure,
* and the pointer cast to the proper type.
*/
-struct vpx_codec_priv
-{
- unsigned int sz;
- vpx_codec_iface_t *iface;
- struct vpx_codec_alg_priv *alg_priv;
- const char *err_detail;
- vpx_codec_flags_t init_flags;
- struct
- {
- vpx_codec_priv_cb_pair_t put_frame_cb;
- vpx_codec_priv_cb_pair_t put_slice_cb;
- } dec;
- struct
- {
- int tbd;
- struct vpx_fixed_buf cx_data_dst_buf;
- unsigned int cx_data_pad_before;
- unsigned int cx_data_pad_after;
- vpx_codec_cx_pkt_t cx_data_pkt;
- unsigned int total_encoders;
- } enc;
+struct vpx_codec_priv {
+ unsigned int sz;
+ vpx_codec_iface_t *iface;
+ struct vpx_codec_alg_priv *alg_priv;
+ const char *err_detail;
+ vpx_codec_flags_t init_flags;
+ struct {
+ vpx_codec_priv_cb_pair_t put_frame_cb;
+ vpx_codec_priv_cb_pair_t put_slice_cb;
+ } dec;
+ struct {
+ int tbd;
+ struct vpx_fixed_buf cx_data_dst_buf;
+ unsigned int cx_data_pad_before;
+ unsigned int cx_data_pad_after;
+ vpx_codec_cx_pkt_t cx_data_pkt;
+ unsigned int total_encoders;
+ } enc;
};
/*
@@ -377,32 +367,32 @@ struct vpx_codec_priv_enc_mr_cfg
#undef VPX_CTRL_USE_TYPE
#define VPX_CTRL_USE_TYPE(id, typ) \
- static typ id##__value(va_list args) {return va_arg(args, typ);} \
- static typ id##__convert(void *x)\
+ static typ id##__value(va_list args) {return va_arg(args, typ);} \
+ static typ id##__convert(void *x)\
+ {\
+ union\
{\
- union\
- {\
- void *x;\
- typ d;\
- } u;\
- u.x = x;\
- return u.d;\
- }
+ void *x;\
+ typ d;\
+ } u;\
+ u.x = x;\
+ return u.d;\
+ }
#undef VPX_CTRL_USE_TYPE_DEPRECATED
#define VPX_CTRL_USE_TYPE_DEPRECATED(id, typ) \
- static typ id##__value(va_list args) {return va_arg(args, typ);} \
- static typ id##__convert(void *x)\
+ static typ id##__value(va_list args) {return va_arg(args, typ);} \
+ static typ id##__convert(void *x)\
+ {\
+ union\
{\
- union\
- {\
- void *x;\
- typ d;\
- } u;\
- u.x = x;\
- return u.d;\
- }
+ void *x;\
+ typ d;\
+ } u;\
+ u.x = x;\
+ return u.d;\
+ }
#define CAST(id, arg) id##__value(arg)
#define RECAST(id, x) id##__convert(x)
@@ -418,8 +408,8 @@ struct vpx_codec_priv_enc_mr_cfg
* macro is provided to define this getter function automatically.
*/
#define CODEC_INTERFACE(id)\
-vpx_codec_iface_t* id(void) { return &id##_algo; }\
-vpx_codec_iface_t id##_algo
+ vpx_codec_iface_t* id(void) { return &id##_algo; }\
+ vpx_codec_iface_t id##_algo
/* Internal Utility Functions
@@ -427,64 +417,60 @@ vpx_codec_iface_t id##_algo
* The following functions are intended to be used inside algorithms as
* utilities for manipulating vpx_codec_* data structures.
*/
-struct vpx_codec_pkt_list
-{
- unsigned int cnt;
- unsigned int max;
- struct vpx_codec_cx_pkt pkts[1];
+struct vpx_codec_pkt_list {
+ unsigned int cnt;
+ unsigned int max;
+ struct vpx_codec_cx_pkt pkts[1];
};
#define vpx_codec_pkt_list_decl(n)\
- union {struct vpx_codec_pkt_list head;\
- struct {struct vpx_codec_pkt_list head;\
- struct vpx_codec_cx_pkt pkts[n];} alloc;}
+ union {struct vpx_codec_pkt_list head;\
+ struct {struct vpx_codec_pkt_list head;\
+ struct vpx_codec_cx_pkt pkts[n];} alloc;}
#define vpx_codec_pkt_list_init(m)\
- (m)->alloc.head.cnt = 0,\
- (m)->alloc.head.max = sizeof((m)->alloc.pkts) / sizeof((m)->alloc.pkts[0])
+ (m)->alloc.head.cnt = 0,\
+ (m)->alloc.head.max = sizeof((m)->alloc.pkts) / sizeof((m)->alloc.pkts[0])
int
vpx_codec_pkt_list_add(struct vpx_codec_pkt_list *,
const struct vpx_codec_cx_pkt *);
-const vpx_codec_cx_pkt_t*
+const vpx_codec_cx_pkt_t *
vpx_codec_pkt_list_get(struct vpx_codec_pkt_list *list,
vpx_codec_iter_t *iter);
#include <stdio.h>
#include <setjmp.h>
-struct vpx_internal_error_info
-{
- vpx_codec_err_t error_code;
- int has_detail;
- char detail[80];
- int setjmp;
- jmp_buf jmp;
+struct vpx_internal_error_info {
+ vpx_codec_err_t error_code;
+ int has_detail;
+ char detail[80];
+ int setjmp;
+ jmp_buf jmp;
};
static void vpx_internal_error(struct vpx_internal_error_info *info,
vpx_codec_err_t error,
const char *fmt,
- ...)
-{
- va_list ap;
+ ...) {
+ va_list ap;
- info->error_code = error;
- info->has_detail = 0;
+ info->error_code = error;
+ info->has_detail = 0;
- if (fmt)
- {
- size_t sz = sizeof(info->detail);
+ if (fmt) {
+ size_t sz = sizeof(info->detail);
- info->has_detail = 1;
- va_start(ap, fmt);
- vsnprintf(info->detail, sz - 1, fmt, ap);
- va_end(ap);
- info->detail[sz-1] = '\0';
- }
+ info->has_detail = 1;
+ va_start(ap, fmt);
+ vsnprintf(info->detail, sz - 1, fmt, ap);
+ va_end(ap);
+ info->detail[sz - 1] = '\0';
+ }
- if (info->setjmp)
- longjmp(info->jmp, info->error_code);
+ if (info->setjmp)
+ longjmp(info->jmp, info->error_code);
}
#endif
diff --git a/libvpx/vpx/src/vpx_codec.c b/libvpx/vpx/src/vpx_codec.c
index f1a8b67..61d7f4c 100644
--- a/libvpx/vpx/src/vpx_codec.c
+++ b/libvpx/vpx/src/vpx_codec.c
@@ -20,131 +20,116 @@
#define SAVE_STATUS(ctx,var) (ctx?(ctx->err = var):var)
-int vpx_codec_version(void)
-{
- return VERSION_PACKED;
+int vpx_codec_version(void) {
+ return VERSION_PACKED;
}
-const char *vpx_codec_version_str(void)
-{
- return VERSION_STRING_NOSP;
+const char *vpx_codec_version_str(void) {
+ return VERSION_STRING_NOSP;
}
-const char *vpx_codec_version_extra_str(void)
-{
- return VERSION_EXTRA;
+const char *vpx_codec_version_extra_str(void) {
+ return VERSION_EXTRA;
}
-const char *vpx_codec_iface_name(vpx_codec_iface_t *iface)
-{
- return iface ? iface->name : "<invalid interface>";
+const char *vpx_codec_iface_name(vpx_codec_iface_t *iface) {
+ return iface ? iface->name : "<invalid interface>";
}
-const char *vpx_codec_err_to_string(vpx_codec_err_t err)
-{
- switch (err)
- {
+const char *vpx_codec_err_to_string(vpx_codec_err_t err) {
+ switch (err) {
case VPX_CODEC_OK:
- return "Success";
+ return "Success";
case VPX_CODEC_ERROR:
- return "Unspecified internal error";
+ return "Unspecified internal error";
case VPX_CODEC_MEM_ERROR:
- return "Memory allocation error";
+ return "Memory allocation error";
case VPX_CODEC_ABI_MISMATCH:
- return "ABI version mismatch";
+ return "ABI version mismatch";
case VPX_CODEC_INCAPABLE:
- return "Codec does not implement requested capability";
+ return "Codec does not implement requested capability";
case VPX_CODEC_UNSUP_BITSTREAM:
- return "Bitstream not supported by this decoder";
+ return "Bitstream not supported by this decoder";
case VPX_CODEC_UNSUP_FEATURE:
- return "Bitstream required feature not supported by this decoder";
+ return "Bitstream required feature not supported by this decoder";
case VPX_CODEC_CORRUPT_FRAME:
- return "Corrupt frame detected";
+ return "Corrupt frame detected";
case VPX_CODEC_INVALID_PARAM:
- return "Invalid parameter";
+ return "Invalid parameter";
case VPX_CODEC_LIST_END:
- return "End of iterated list";
- }
+ return "End of iterated list";
+ }
- return "Unrecognized error code";
+ return "Unrecognized error code";
}
-const char *vpx_codec_error(vpx_codec_ctx_t *ctx)
-{
- return (ctx) ? vpx_codec_err_to_string(ctx->err)
- : vpx_codec_err_to_string(VPX_CODEC_INVALID_PARAM);
+const char *vpx_codec_error(vpx_codec_ctx_t *ctx) {
+ return (ctx) ? vpx_codec_err_to_string(ctx->err)
+ : vpx_codec_err_to_string(VPX_CODEC_INVALID_PARAM);
}
-const char *vpx_codec_error_detail(vpx_codec_ctx_t *ctx)
-{
- if (ctx && ctx->err)
- return ctx->priv ? ctx->priv->err_detail : ctx->err_detail;
+const char *vpx_codec_error_detail(vpx_codec_ctx_t *ctx) {
+ if (ctx && ctx->err)
+ return ctx->priv ? ctx->priv->err_detail : ctx->err_detail;
- return NULL;
+ return NULL;
}
-vpx_codec_err_t vpx_codec_destroy(vpx_codec_ctx_t *ctx)
-{
- vpx_codec_err_t res;
+vpx_codec_err_t vpx_codec_destroy(vpx_codec_ctx_t *ctx) {
+ vpx_codec_err_t res;
- if (!ctx)
- res = VPX_CODEC_INVALID_PARAM;
- else if (!ctx->iface || !ctx->priv)
- res = VPX_CODEC_ERROR;
- else
- {
- if (ctx->priv->alg_priv)
- ctx->iface->destroy(ctx->priv->alg_priv);
+ if (!ctx)
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (!ctx->iface || !ctx->priv)
+ res = VPX_CODEC_ERROR;
+ else {
+ if (ctx->priv->alg_priv)
+ ctx->iface->destroy(ctx->priv->alg_priv);
- ctx->iface = NULL;
- ctx->name = NULL;
- ctx->priv = NULL;
- res = VPX_CODEC_OK;
- }
+ ctx->iface = NULL;
+ ctx->name = NULL;
+ ctx->priv = NULL;
+ res = VPX_CODEC_OK;
+ }
- return SAVE_STATUS(ctx, res);
+ return SAVE_STATUS(ctx, res);
}
-vpx_codec_caps_t vpx_codec_get_caps(vpx_codec_iface_t *iface)
-{
- return (iface) ? iface->caps : 0;
+vpx_codec_caps_t vpx_codec_get_caps(vpx_codec_iface_t *iface) {
+ return (iface) ? iface->caps : 0;
}
vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t *ctx,
int ctrl_id,
- ...)
-{
- vpx_codec_err_t res;
-
- if (!ctx || !ctrl_id)
- res = VPX_CODEC_INVALID_PARAM;
- else if (!ctx->iface || !ctx->priv || !ctx->iface->ctrl_maps)
- res = VPX_CODEC_ERROR;
- else
- {
- vpx_codec_ctrl_fn_map_t *entry;
-
- res = VPX_CODEC_ERROR;
-
- for (entry = ctx->iface->ctrl_maps; entry && entry->fn; entry++)
- {
- if (!entry->ctrl_id || entry->ctrl_id == ctrl_id)
- {
- va_list ap;
-
- va_start(ap, ctrl_id);
- res = entry->fn(ctx->priv->alg_priv, ctrl_id, ap);
- va_end(ap);
- break;
- }
- }
+ ...) {
+ vpx_codec_err_t res;
+
+ if (!ctx || !ctrl_id)
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (!ctx->iface || !ctx->priv || !ctx->iface->ctrl_maps)
+ res = VPX_CODEC_ERROR;
+ else {
+ vpx_codec_ctrl_fn_map_t *entry;
+
+ res = VPX_CODEC_ERROR;
+
+ for (entry = ctx->iface->ctrl_maps; entry && entry->fn; entry++) {
+ if (!entry->ctrl_id || entry->ctrl_id == ctrl_id) {
+ va_list ap;
+
+ va_start(ap, ctrl_id);
+ res = entry->fn(ctx->priv->alg_priv, ctrl_id, ap);
+ va_end(ap);
+ break;
+ }
}
+ }
- return SAVE_STATUS(ctx, res);
+ return SAVE_STATUS(ctx, res);
}
diff --git a/libvpx/vpx/src/vpx_decoder.c b/libvpx/vpx/src/vpx_decoder.c
index 59a783d..1f575e0 100644
--- a/libvpx/vpx/src/vpx_decoder.c
+++ b/libvpx/vpx/src/vpx_decoder.c
@@ -22,99 +22,91 @@ vpx_codec_err_t vpx_codec_dec_init_ver(vpx_codec_ctx_t *ctx,
vpx_codec_iface_t *iface,
vpx_codec_dec_cfg_t *cfg,
vpx_codec_flags_t flags,
- int ver)
-{
- vpx_codec_err_t res;
-
- if (ver != VPX_DECODER_ABI_VERSION)
- res = VPX_CODEC_ABI_MISMATCH;
- else if (!ctx || !iface)
- res = VPX_CODEC_INVALID_PARAM;
- else if (iface->abi_version != VPX_CODEC_INTERNAL_ABI_VERSION)
- res = VPX_CODEC_ABI_MISMATCH;
- else if ((flags & VPX_CODEC_USE_XMA) && !(iface->caps & VPX_CODEC_CAP_XMA))
- res = VPX_CODEC_INCAPABLE;
- else if ((flags & VPX_CODEC_USE_POSTPROC) && !(iface->caps & VPX_CODEC_CAP_POSTPROC))
- res = VPX_CODEC_INCAPABLE;
- else if ((flags & VPX_CODEC_USE_ERROR_CONCEALMENT) &&
- !(iface->caps & VPX_CODEC_CAP_ERROR_CONCEALMENT))
- res = VPX_CODEC_INCAPABLE;
- else if ((flags & VPX_CODEC_USE_INPUT_FRAGMENTS) &&
- !(iface->caps & VPX_CODEC_CAP_INPUT_FRAGMENTS))
- res = VPX_CODEC_INCAPABLE;
- else if (!(iface->caps & VPX_CODEC_CAP_DECODER))
- res = VPX_CODEC_INCAPABLE;
- else
- {
- memset(ctx, 0, sizeof(*ctx));
- ctx->iface = iface;
- ctx->name = iface->name;
- ctx->priv = NULL;
- ctx->init_flags = flags;
- ctx->config.dec = cfg;
- res = VPX_CODEC_OK;
-
- if (!(flags & VPX_CODEC_USE_XMA))
- {
- res = ctx->iface->init(ctx, NULL);
-
- if (res)
- {
- ctx->err_detail = ctx->priv ? ctx->priv->err_detail : NULL;
- vpx_codec_destroy(ctx);
- }
-
- if (ctx->priv)
- ctx->priv->iface = ctx->iface;
- }
+ int ver) {
+ vpx_codec_err_t res;
+
+ if (ver != VPX_DECODER_ABI_VERSION)
+ res = VPX_CODEC_ABI_MISMATCH;
+ else if (!ctx || !iface)
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (iface->abi_version != VPX_CODEC_INTERNAL_ABI_VERSION)
+ res = VPX_CODEC_ABI_MISMATCH;
+ else if ((flags & VPX_CODEC_USE_XMA) && !(iface->caps & VPX_CODEC_CAP_XMA))
+ res = VPX_CODEC_INCAPABLE;
+ else if ((flags & VPX_CODEC_USE_POSTPROC) && !(iface->caps & VPX_CODEC_CAP_POSTPROC))
+ res = VPX_CODEC_INCAPABLE;
+ else if ((flags & VPX_CODEC_USE_ERROR_CONCEALMENT) &&
+ !(iface->caps & VPX_CODEC_CAP_ERROR_CONCEALMENT))
+ res = VPX_CODEC_INCAPABLE;
+ else if ((flags & VPX_CODEC_USE_INPUT_FRAGMENTS) &&
+ !(iface->caps & VPX_CODEC_CAP_INPUT_FRAGMENTS))
+ res = VPX_CODEC_INCAPABLE;
+ else if (!(iface->caps & VPX_CODEC_CAP_DECODER))
+ res = VPX_CODEC_INCAPABLE;
+ else {
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->iface = iface;
+ ctx->name = iface->name;
+ ctx->priv = NULL;
+ ctx->init_flags = flags;
+ ctx->config.dec = cfg;
+ res = VPX_CODEC_OK;
+
+ if (!(flags & VPX_CODEC_USE_XMA)) {
+ res = ctx->iface->init(ctx, NULL);
+
+ if (res) {
+ ctx->err_detail = ctx->priv ? ctx->priv->err_detail : NULL;
+ vpx_codec_destroy(ctx);
+ }
+
+ if (ctx->priv)
+ ctx->priv->iface = ctx->iface;
}
+ }
- return SAVE_STATUS(ctx, res);
+ return SAVE_STATUS(ctx, res);
}
vpx_codec_err_t vpx_codec_peek_stream_info(vpx_codec_iface_t *iface,
- const uint8_t *data,
- unsigned int data_sz,
- vpx_codec_stream_info_t *si)
-{
- vpx_codec_err_t res;
-
- if (!iface || !data || !data_sz || !si
- || si->sz < sizeof(vpx_codec_stream_info_t))
- res = VPX_CODEC_INVALID_PARAM;
- else
- {
- /* Set default/unknown values */
- si->w = 0;
- si->h = 0;
-
- res = iface->dec.peek_si(data, data_sz, si);
- }
-
- return res;
+ const uint8_t *data,
+ unsigned int data_sz,
+ vpx_codec_stream_info_t *si) {
+ vpx_codec_err_t res;
+
+ if (!iface || !data || !data_sz || !si
+ || si->sz < sizeof(vpx_codec_stream_info_t))
+ res = VPX_CODEC_INVALID_PARAM;
+ else {
+ /* Set default/unknown values */
+ si->w = 0;
+ si->h = 0;
+
+ res = iface->dec.peek_si(data, data_sz, si);
+ }
+
+ return res;
}
vpx_codec_err_t vpx_codec_get_stream_info(vpx_codec_ctx_t *ctx,
- vpx_codec_stream_info_t *si)
-{
- vpx_codec_err_t res;
-
- if (!ctx || !si || si->sz < sizeof(vpx_codec_stream_info_t))
- res = VPX_CODEC_INVALID_PARAM;
- else if (!ctx->iface || !ctx->priv)
- res = VPX_CODEC_ERROR;
- else
- {
- /* Set default/unknown values */
- si->w = 0;
- si->h = 0;
-
- res = ctx->iface->dec.get_si(ctx->priv->alg_priv, si);
- }
-
- return SAVE_STATUS(ctx, res);
+ vpx_codec_stream_info_t *si) {
+ vpx_codec_err_t res;
+
+ if (!ctx || !si || si->sz < sizeof(vpx_codec_stream_info_t))
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (!ctx->iface || !ctx->priv)
+ res = VPX_CODEC_ERROR;
+ else {
+ /* Set default/unknown values */
+ si->w = 0;
+ si->h = 0;
+
+ res = ctx->iface->dec.get_si(ctx->priv->alg_priv, si);
+ }
+
+ return SAVE_STATUS(ctx, res);
}
@@ -122,126 +114,115 @@ vpx_codec_err_t vpx_codec_decode(vpx_codec_ctx_t *ctx,
const uint8_t *data,
unsigned int data_sz,
void *user_priv,
- long deadline)
-{
- vpx_codec_err_t res;
-
- /* Sanity checks */
- /* NULL data ptr allowed if data_sz is 0 too */
- if (!ctx || (!data && data_sz))
- res = VPX_CODEC_INVALID_PARAM;
- else if (!ctx->iface || !ctx->priv)
- res = VPX_CODEC_ERROR;
- else
- {
- res = ctx->iface->dec.decode(ctx->priv->alg_priv, data, data_sz,
- user_priv, deadline);
- }
-
- return SAVE_STATUS(ctx, res);
+ long deadline) {
+ vpx_codec_err_t res;
+
+ /* Sanity checks */
+ /* NULL data ptr allowed if data_sz is 0 too */
+ if (!ctx || (!data && data_sz))
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (!ctx->iface || !ctx->priv)
+ res = VPX_CODEC_ERROR;
+ else {
+ res = ctx->iface->dec.decode(ctx->priv->alg_priv, data, data_sz,
+ user_priv, deadline);
+ }
+
+ return SAVE_STATUS(ctx, res);
}
vpx_image_t *vpx_codec_get_frame(vpx_codec_ctx_t *ctx,
- vpx_codec_iter_t *iter)
-{
- vpx_image_t *img;
+ vpx_codec_iter_t *iter) {
+ vpx_image_t *img;
- if (!ctx || !iter || !ctx->iface || !ctx->priv)
- img = NULL;
- else
- img = ctx->iface->dec.get_frame(ctx->priv->alg_priv, iter);
+ if (!ctx || !iter || !ctx->iface || !ctx->priv)
+ img = NULL;
+ else
+ img = ctx->iface->dec.get_frame(ctx->priv->alg_priv, iter);
- return img;
+ return img;
}
vpx_codec_err_t vpx_codec_register_put_frame_cb(vpx_codec_ctx_t *ctx,
- vpx_codec_put_frame_cb_fn_t cb,
- void *user_priv)
-{
- vpx_codec_err_t res;
-
- if (!ctx || !cb)
- res = VPX_CODEC_INVALID_PARAM;
- else if (!ctx->iface || !ctx->priv
- || !(ctx->iface->caps & VPX_CODEC_CAP_PUT_FRAME))
- res = VPX_CODEC_ERROR;
- else
- {
- ctx->priv->dec.put_frame_cb.u.put_frame = cb;
- ctx->priv->dec.put_frame_cb.user_priv = user_priv;
- res = VPX_CODEC_OK;
- }
-
- return SAVE_STATUS(ctx, res);
+ vpx_codec_put_frame_cb_fn_t cb,
+ void *user_priv) {
+ vpx_codec_err_t res;
+
+ if (!ctx || !cb)
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (!ctx->iface || !ctx->priv
+ || !(ctx->iface->caps & VPX_CODEC_CAP_PUT_FRAME))
+ res = VPX_CODEC_ERROR;
+ else {
+ ctx->priv->dec.put_frame_cb.u.put_frame = cb;
+ ctx->priv->dec.put_frame_cb.user_priv = user_priv;
+ res = VPX_CODEC_OK;
+ }
+
+ return SAVE_STATUS(ctx, res);
}
vpx_codec_err_t vpx_codec_register_put_slice_cb(vpx_codec_ctx_t *ctx,
- vpx_codec_put_slice_cb_fn_t cb,
- void *user_priv)
-{
- vpx_codec_err_t res;
-
- if (!ctx || !cb)
- res = VPX_CODEC_INVALID_PARAM;
- else if (!ctx->iface || !ctx->priv
- || !(ctx->iface->caps & VPX_CODEC_CAP_PUT_FRAME))
- res = VPX_CODEC_ERROR;
- else
- {
- ctx->priv->dec.put_slice_cb.u.put_slice = cb;
- ctx->priv->dec.put_slice_cb.user_priv = user_priv;
- res = VPX_CODEC_OK;
- }
-
- return SAVE_STATUS(ctx, res);
+ vpx_codec_put_slice_cb_fn_t cb,
+ void *user_priv) {
+ vpx_codec_err_t res;
+
+ if (!ctx || !cb)
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (!ctx->iface || !ctx->priv
+ || !(ctx->iface->caps & VPX_CODEC_CAP_PUT_FRAME))
+ res = VPX_CODEC_ERROR;
+ else {
+ ctx->priv->dec.put_slice_cb.u.put_slice = cb;
+ ctx->priv->dec.put_slice_cb.user_priv = user_priv;
+ res = VPX_CODEC_OK;
+ }
+
+ return SAVE_STATUS(ctx, res);
}
vpx_codec_err_t vpx_codec_get_mem_map(vpx_codec_ctx_t *ctx,
vpx_codec_mmap_t *mmap,
- vpx_codec_iter_t *iter)
-{
- vpx_codec_err_t res = VPX_CODEC_OK;
-
- if (!ctx || !mmap || !iter || !ctx->iface)
- res = VPX_CODEC_INVALID_PARAM;
- else if (!(ctx->iface->caps & VPX_CODEC_CAP_XMA))
- res = VPX_CODEC_ERROR;
- else
- res = ctx->iface->get_mmap(ctx, mmap, iter);
-
- return SAVE_STATUS(ctx, res);
+ vpx_codec_iter_t *iter) {
+ vpx_codec_err_t res = VPX_CODEC_OK;
+
+ if (!ctx || !mmap || !iter || !ctx->iface)
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (!(ctx->iface->caps & VPX_CODEC_CAP_XMA))
+ res = VPX_CODEC_ERROR;
+ else
+ res = ctx->iface->get_mmap(ctx, mmap, iter);
+
+ return SAVE_STATUS(ctx, res);
}
vpx_codec_err_t vpx_codec_set_mem_map(vpx_codec_ctx_t *ctx,
vpx_codec_mmap_t *mmap,
- unsigned int num_maps)
-{
- vpx_codec_err_t res = VPX_CODEC_MEM_ERROR;
-
- if (!ctx || !mmap || !ctx->iface)
- res = VPX_CODEC_INVALID_PARAM;
- else if (!(ctx->iface->caps & VPX_CODEC_CAP_XMA))
- res = VPX_CODEC_ERROR;
- else
- {
- unsigned int i;
-
- for (i = 0; i < num_maps; i++, mmap++)
- {
- if (!mmap->base)
- break;
-
- /* Everything look ok, set the mmap in the decoder */
- res = ctx->iface->set_mmap(ctx, mmap);
-
- if (res)
- break;
- }
+ unsigned int num_maps) {
+ vpx_codec_err_t res = VPX_CODEC_MEM_ERROR;
+
+ if (!ctx || !mmap || !ctx->iface)
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (!(ctx->iface->caps & VPX_CODEC_CAP_XMA))
+ res = VPX_CODEC_ERROR;
+ else {
+ unsigned int i;
+
+ for (i = 0; i < num_maps; i++, mmap++) {
+ if (!mmap->base)
+ break;
+
+ /* Everything look ok, set the mmap in the decoder */
+ res = ctx->iface->set_mmap(ctx, mmap);
+
+ if (res)
+ break;
}
+ }
- return SAVE_STATUS(ctx, res);
+ return SAVE_STATUS(ctx, res);
}
diff --git a/libvpx/vpx/src/vpx_encoder.c b/libvpx/vpx/src/vpx_encoder.c
index 73c1c66..3cec895 100644
--- a/libvpx/vpx/src/vpx_encoder.c
+++ b/libvpx/vpx/src/vpx_encoder.c
@@ -24,46 +24,43 @@ vpx_codec_err_t vpx_codec_enc_init_ver(vpx_codec_ctx_t *ctx,
vpx_codec_iface_t *iface,
vpx_codec_enc_cfg_t *cfg,
vpx_codec_flags_t flags,
- int ver)
-{
- vpx_codec_err_t res;
-
- if (ver != VPX_ENCODER_ABI_VERSION)
- res = VPX_CODEC_ABI_MISMATCH;
- else if (!ctx || !iface || !cfg)
- res = VPX_CODEC_INVALID_PARAM;
- else if (iface->abi_version != VPX_CODEC_INTERNAL_ABI_VERSION)
- res = VPX_CODEC_ABI_MISMATCH;
- else if (!(iface->caps & VPX_CODEC_CAP_ENCODER))
- res = VPX_CODEC_INCAPABLE;
- else if ((flags & VPX_CODEC_USE_XMA) && !(iface->caps & VPX_CODEC_CAP_XMA))
- res = VPX_CODEC_INCAPABLE;
- else if ((flags & VPX_CODEC_USE_PSNR)
- && !(iface->caps & VPX_CODEC_CAP_PSNR))
- res = VPX_CODEC_INCAPABLE;
- else if ((flags & VPX_CODEC_USE_OUTPUT_PARTITION)
- && !(iface->caps & VPX_CODEC_CAP_OUTPUT_PARTITION))
- res = VPX_CODEC_INCAPABLE;
- else
- {
- ctx->iface = iface;
- ctx->name = iface->name;
- ctx->priv = NULL;
- ctx->init_flags = flags;
- ctx->config.enc = cfg;
- res = ctx->iface->init(ctx, NULL);
-
- if (res)
- {
- ctx->err_detail = ctx->priv ? ctx->priv->err_detail : NULL;
- vpx_codec_destroy(ctx);
- }
-
- if (ctx->priv)
- ctx->priv->iface = ctx->iface;
+ int ver) {
+ vpx_codec_err_t res;
+
+ if (ver != VPX_ENCODER_ABI_VERSION)
+ res = VPX_CODEC_ABI_MISMATCH;
+ else if (!ctx || !iface || !cfg)
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (iface->abi_version != VPX_CODEC_INTERNAL_ABI_VERSION)
+ res = VPX_CODEC_ABI_MISMATCH;
+ else if (!(iface->caps & VPX_CODEC_CAP_ENCODER))
+ res = VPX_CODEC_INCAPABLE;
+ else if ((flags & VPX_CODEC_USE_XMA) && !(iface->caps & VPX_CODEC_CAP_XMA))
+ res = VPX_CODEC_INCAPABLE;
+ else if ((flags & VPX_CODEC_USE_PSNR)
+ && !(iface->caps & VPX_CODEC_CAP_PSNR))
+ res = VPX_CODEC_INCAPABLE;
+ else if ((flags & VPX_CODEC_USE_OUTPUT_PARTITION)
+ && !(iface->caps & VPX_CODEC_CAP_OUTPUT_PARTITION))
+ res = VPX_CODEC_INCAPABLE;
+ else {
+ ctx->iface = iface;
+ ctx->name = iface->name;
+ ctx->priv = NULL;
+ ctx->init_flags = flags;
+ ctx->config.enc = cfg;
+ res = ctx->iface->init(ctx, NULL);
+
+ if (res) {
+ ctx->err_detail = ctx->priv ? ctx->priv->err_detail : NULL;
+ vpx_codec_destroy(ctx);
}
- return SAVE_STATUS(ctx, res);
+ if (ctx->priv)
+ ctx->priv->iface = ctx->iface;
+ }
+
+ return SAVE_STATUS(ctx, res);
}
vpx_codec_err_t vpx_codec_enc_init_multi_ver(vpx_codec_ctx_t *ctx,
@@ -72,128 +69,117 @@ vpx_codec_err_t vpx_codec_enc_init_multi_ver(vpx_codec_ctx_t *ctx,
int num_enc,
vpx_codec_flags_t flags,
vpx_rational_t *dsf,
- int ver)
-{
- vpx_codec_err_t res = 0;
-
- if (ver != VPX_ENCODER_ABI_VERSION)
- res = VPX_CODEC_ABI_MISMATCH;
- else if (!ctx || !iface || !cfg || (num_enc > 16 || num_enc < 1))
- res = VPX_CODEC_INVALID_PARAM;
- else if (iface->abi_version != VPX_CODEC_INTERNAL_ABI_VERSION)
- res = VPX_CODEC_ABI_MISMATCH;
- else if (!(iface->caps & VPX_CODEC_CAP_ENCODER))
- res = VPX_CODEC_INCAPABLE;
- else if ((flags & VPX_CODEC_USE_XMA) && !(iface->caps & VPX_CODEC_CAP_XMA))
- res = VPX_CODEC_INCAPABLE;
- else if ((flags & VPX_CODEC_USE_PSNR)
- && !(iface->caps & VPX_CODEC_CAP_PSNR))
- res = VPX_CODEC_INCAPABLE;
- else if ((flags & VPX_CODEC_USE_OUTPUT_PARTITION)
- && !(iface->caps & VPX_CODEC_CAP_OUTPUT_PARTITION))
- res = VPX_CODEC_INCAPABLE;
- else
- {
- int i;
- void *mem_loc = NULL;
-
- if(!(res = iface->enc.mr_get_mem_loc(cfg, &mem_loc)))
- {
- for (i = 0; i < num_enc; i++)
- {
- vpx_codec_priv_enc_mr_cfg_t mr_cfg;
-
- /* Validate down-sampling factor. */
- if(dsf->num < 1 || dsf->num > 4096 || dsf->den < 1 ||
- dsf->den > dsf->num)
- {
- res = VPX_CODEC_INVALID_PARAM;
- break;
- }
-
- mr_cfg.mr_low_res_mode_info = mem_loc;
- mr_cfg.mr_total_resolutions = num_enc;
- mr_cfg.mr_encoder_id = num_enc-1-i;
- mr_cfg.mr_down_sampling_factor.num = dsf->num;
- mr_cfg.mr_down_sampling_factor.den = dsf->den;
-
- /* Force Key-frame synchronization. Namely, encoder at higher
- * resolution always use the same frame_type chosen by the
- * lowest-resolution encoder.
- */
- if(mr_cfg.mr_encoder_id)
- cfg->kf_mode = VPX_KF_DISABLED;
-
- ctx->iface = iface;
- ctx->name = iface->name;
- ctx->priv = NULL;
- ctx->init_flags = flags;
- ctx->config.enc = cfg;
- res = ctx->iface->init(ctx, &mr_cfg);
-
- if (res)
- {
- const char *error_detail =
- ctx->priv ? ctx->priv->err_detail : NULL;
- /* Destroy current ctx */
- ctx->err_detail = error_detail;
- vpx_codec_destroy(ctx);
-
- /* Destroy already allocated high-level ctx */
- while (i)
- {
- ctx--;
- ctx->err_detail = error_detail;
- vpx_codec_destroy(ctx);
- i--;
- }
- }
-
- if (ctx->priv)
- ctx->priv->iface = ctx->iface;
-
- if (res)
- break;
-
- ctx++;
- cfg++;
- dsf++;
- }
+ int ver) {
+ vpx_codec_err_t res = 0;
+
+ if (ver != VPX_ENCODER_ABI_VERSION)
+ res = VPX_CODEC_ABI_MISMATCH;
+ else if (!ctx || !iface || !cfg || (num_enc > 16 || num_enc < 1))
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (iface->abi_version != VPX_CODEC_INTERNAL_ABI_VERSION)
+ res = VPX_CODEC_ABI_MISMATCH;
+ else if (!(iface->caps & VPX_CODEC_CAP_ENCODER))
+ res = VPX_CODEC_INCAPABLE;
+ else if ((flags & VPX_CODEC_USE_XMA) && !(iface->caps & VPX_CODEC_CAP_XMA))
+ res = VPX_CODEC_INCAPABLE;
+ else if ((flags & VPX_CODEC_USE_PSNR)
+ && !(iface->caps & VPX_CODEC_CAP_PSNR))
+ res = VPX_CODEC_INCAPABLE;
+ else if ((flags & VPX_CODEC_USE_OUTPUT_PARTITION)
+ && !(iface->caps & VPX_CODEC_CAP_OUTPUT_PARTITION))
+ res = VPX_CODEC_INCAPABLE;
+ else {
+ int i;
+ void *mem_loc = NULL;
+
+ if (!(res = iface->enc.mr_get_mem_loc(cfg, &mem_loc))) {
+ for (i = 0; i < num_enc; i++) {
+ vpx_codec_priv_enc_mr_cfg_t mr_cfg;
+
+ /* Validate down-sampling factor. */
+ if (dsf->num < 1 || dsf->num > 4096 || dsf->den < 1 ||
+ dsf->den > dsf->num) {
+ res = VPX_CODEC_INVALID_PARAM;
+ break;
}
+
+ mr_cfg.mr_low_res_mode_info = mem_loc;
+ mr_cfg.mr_total_resolutions = num_enc;
+ mr_cfg.mr_encoder_id = num_enc - 1 - i;
+ mr_cfg.mr_down_sampling_factor.num = dsf->num;
+ mr_cfg.mr_down_sampling_factor.den = dsf->den;
+
+ /* Force Key-frame synchronization. Namely, encoder at higher
+ * resolution always use the same frame_type chosen by the
+ * lowest-resolution encoder.
+ */
+ if (mr_cfg.mr_encoder_id)
+ cfg->kf_mode = VPX_KF_DISABLED;
+
+ ctx->iface = iface;
+ ctx->name = iface->name;
+ ctx->priv = NULL;
+ ctx->init_flags = flags;
+ ctx->config.enc = cfg;
+ res = ctx->iface->init(ctx, &mr_cfg);
+
+ if (res) {
+ const char *error_detail =
+ ctx->priv ? ctx->priv->err_detail : NULL;
+ /* Destroy current ctx */
+ ctx->err_detail = error_detail;
+ vpx_codec_destroy(ctx);
+
+ /* Destroy already allocated high-level ctx */
+ while (i) {
+ ctx--;
+ ctx->err_detail = error_detail;
+ vpx_codec_destroy(ctx);
+ i--;
+ }
+ }
+
+ if (ctx->priv)
+ ctx->priv->iface = ctx->iface;
+
+ if (res)
+ break;
+
+ ctx++;
+ cfg++;
+ dsf++;
+ }
}
+ }
- return SAVE_STATUS(ctx, res);
+ return SAVE_STATUS(ctx, res);
}
vpx_codec_err_t vpx_codec_enc_config_default(vpx_codec_iface_t *iface,
- vpx_codec_enc_cfg_t *cfg,
- unsigned int usage)
-{
- vpx_codec_err_t res;
- vpx_codec_enc_cfg_map_t *map;
-
- if (!iface || !cfg || usage > INT_MAX)
- res = VPX_CODEC_INVALID_PARAM;
- else if (!(iface->caps & VPX_CODEC_CAP_ENCODER))
- res = VPX_CODEC_INCAPABLE;
- else
- {
- res = VPX_CODEC_INVALID_PARAM;
-
- for (map = iface->enc.cfg_maps; map->usage >= 0; map++)
- {
- if (map->usage == (int)usage)
- {
- *cfg = map->cfg;
- cfg->g_usage = usage;
- res = VPX_CODEC_OK;
- break;
- }
- }
+ vpx_codec_enc_cfg_t *cfg,
+ unsigned int usage) {
+ vpx_codec_err_t res;
+ vpx_codec_enc_cfg_map_t *map;
+
+ if (!iface || !cfg || usage > INT_MAX)
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (!(iface->caps & VPX_CODEC_CAP_ENCODER))
+ res = VPX_CODEC_INCAPABLE;
+ else {
+ res = VPX_CODEC_INVALID_PARAM;
+
+ for (map = iface->enc.cfg_maps; map->usage >= 0; map++) {
+ if (map->usage == (int)usage) {
+ *cfg = map->cfg;
+ cfg->g_usage = usage;
+ res = VPX_CODEC_OK;
+ break;
+ }
}
+ }
- return res;
+ return res;
}
@@ -203,9 +189,9 @@ vpx_codec_err_t vpx_codec_enc_config_default(vpx_codec_iface_t *iface,
*/
#include "vpx_ports/x86.h"
#define FLOATING_POINT_INIT() do {\
- unsigned short x87_orig_mode = x87_set_double_precision();
+ unsigned short x87_orig_mode = x87_set_double_precision();
#define FLOATING_POINT_RESTORE() \
- x87_set_control_word(x87_orig_mode); }while(0)
+ x87_set_control_word(x87_orig_mode); }while(0)
#else
@@ -219,224 +205,202 @@ vpx_codec_err_t vpx_codec_encode(vpx_codec_ctx_t *ctx,
vpx_codec_pts_t pts,
unsigned long duration,
vpx_enc_frame_flags_t flags,
- unsigned long deadline)
-{
- vpx_codec_err_t res = 0;
-
- if (!ctx || (img && !duration))
- res = VPX_CODEC_INVALID_PARAM;
- else if (!ctx->iface || !ctx->priv)
- res = VPX_CODEC_ERROR;
- else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
- res = VPX_CODEC_INCAPABLE;
- else
- {
- /* Execute in a normalized floating point environment, if the platform
- * requires it.
- */
- unsigned int num_enc =ctx->priv->enc.total_encoders;
-
- FLOATING_POINT_INIT();
-
- if (num_enc == 1)
- res = ctx->iface->enc.encode(ctx->priv->alg_priv, img, pts,
- duration, flags, deadline);
- else
- {
- /* Multi-resolution encoding:
- * Encode multi-levels in reverse order. For example,
- * if mr_total_resolutions = 3, first encode level 2,
- * then encode level 1, and finally encode level 0.
- */
- int i;
-
- ctx += num_enc - 1;
- if (img) img += num_enc - 1;
-
- for (i = num_enc-1; i >= 0; i--)
- {
- if ((res = ctx->iface->enc.encode(ctx->priv->alg_priv, img, pts,
- duration, flags, deadline)))
- break;
-
- ctx--;
- if (img) img--;
- }
- ctx++;
- }
-
- FLOATING_POINT_RESTORE();
+ unsigned long deadline) {
+ vpx_codec_err_t res = 0;
+
+ if (!ctx || (img && !duration))
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (!ctx->iface || !ctx->priv)
+ res = VPX_CODEC_ERROR;
+ else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
+ res = VPX_CODEC_INCAPABLE;
+ else {
+ /* Execute in a normalized floating point environment, if the platform
+ * requires it.
+ */
+ unsigned int num_enc = ctx->priv->enc.total_encoders;
+
+ FLOATING_POINT_INIT();
+
+ if (num_enc == 1)
+ res = ctx->iface->enc.encode(ctx->priv->alg_priv, img, pts,
+ duration, flags, deadline);
+ else {
+ /* Multi-resolution encoding:
+ * Encode multi-levels in reverse order. For example,
+ * if mr_total_resolutions = 3, first encode level 2,
+ * then encode level 1, and finally encode level 0.
+ */
+ int i;
+
+ ctx += num_enc - 1;
+ if (img) img += num_enc - 1;
+
+ for (i = num_enc - 1; i >= 0; i--) {
+ if ((res = ctx->iface->enc.encode(ctx->priv->alg_priv, img, pts,
+ duration, flags, deadline)))
+ break;
+
+ ctx--;
+ if (img) img--;
+ }
+ ctx++;
}
- return SAVE_STATUS(ctx, res);
+ FLOATING_POINT_RESTORE();
+ }
+
+ return SAVE_STATUS(ctx, res);
}
const vpx_codec_cx_pkt_t *vpx_codec_get_cx_data(vpx_codec_ctx_t *ctx,
- vpx_codec_iter_t *iter)
-{
- const vpx_codec_cx_pkt_t *pkt = NULL;
-
- if (ctx)
- {
- if (!iter)
- ctx->err = VPX_CODEC_INVALID_PARAM;
- else if (!ctx->iface || !ctx->priv)
- ctx->err = VPX_CODEC_ERROR;
- else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
- ctx->err = VPX_CODEC_INCAPABLE;
- else
- pkt = ctx->iface->enc.get_cx_data(ctx->priv->alg_priv, iter);
- }
+ vpx_codec_iter_t *iter) {
+ const vpx_codec_cx_pkt_t *pkt = NULL;
- if (pkt && pkt->kind == VPX_CODEC_CX_FRAME_PKT)
- {
- /* If the application has specified a destination area for the
- * compressed data, and the codec has not placed the data there,
- * and it fits, copy it.
- */
- char *dst_buf = ctx->priv->enc.cx_data_dst_buf.buf;
-
- if (dst_buf
- && pkt->data.raw.buf != dst_buf
- && pkt->data.raw.sz
- + ctx->priv->enc.cx_data_pad_before
- + ctx->priv->enc.cx_data_pad_after
- <= ctx->priv->enc.cx_data_dst_buf.sz)
- {
- vpx_codec_cx_pkt_t *modified_pkt = &ctx->priv->enc.cx_data_pkt;
-
- memcpy(dst_buf + ctx->priv->enc.cx_data_pad_before,
- pkt->data.raw.buf, pkt->data.raw.sz);
- *modified_pkt = *pkt;
- modified_pkt->data.raw.buf = dst_buf;
- modified_pkt->data.raw.sz += ctx->priv->enc.cx_data_pad_before
- + ctx->priv->enc.cx_data_pad_after;
- pkt = modified_pkt;
- }
+ if (ctx) {
+ if (!iter)
+ ctx->err = VPX_CODEC_INVALID_PARAM;
+ else if (!ctx->iface || !ctx->priv)
+ ctx->err = VPX_CODEC_ERROR;
+ else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
+ ctx->err = VPX_CODEC_INCAPABLE;
+ else
+ pkt = ctx->iface->enc.get_cx_data(ctx->priv->alg_priv, iter);
+ }
+
+ if (pkt && pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
+ /* If the application has specified a destination area for the
+ * compressed data, and the codec has not placed the data there,
+ * and it fits, copy it.
+ */
+ char *dst_buf = ctx->priv->enc.cx_data_dst_buf.buf;
+
+ if (dst_buf
+ && pkt->data.raw.buf != dst_buf
+ && pkt->data.raw.sz
+ + ctx->priv->enc.cx_data_pad_before
+ + ctx->priv->enc.cx_data_pad_after
+ <= ctx->priv->enc.cx_data_dst_buf.sz) {
+ vpx_codec_cx_pkt_t *modified_pkt = &ctx->priv->enc.cx_data_pkt;
+
+ memcpy(dst_buf + ctx->priv->enc.cx_data_pad_before,
+ pkt->data.raw.buf, pkt->data.raw.sz);
+ *modified_pkt = *pkt;
+ modified_pkt->data.raw.buf = dst_buf;
+ modified_pkt->data.raw.sz += ctx->priv->enc.cx_data_pad_before
+ + ctx->priv->enc.cx_data_pad_after;
+ pkt = modified_pkt;
+ }
- if (dst_buf == pkt->data.raw.buf)
- {
- ctx->priv->enc.cx_data_dst_buf.buf = dst_buf + pkt->data.raw.sz;
- ctx->priv->enc.cx_data_dst_buf.sz -= pkt->data.raw.sz;
- }
+ if (dst_buf == pkt->data.raw.buf) {
+ ctx->priv->enc.cx_data_dst_buf.buf = dst_buf + pkt->data.raw.sz;
+ ctx->priv->enc.cx_data_dst_buf.sz -= pkt->data.raw.sz;
}
+ }
- return pkt;
+ return pkt;
}
vpx_codec_err_t vpx_codec_set_cx_data_buf(vpx_codec_ctx_t *ctx,
- const vpx_fixed_buf_t *buf,
- unsigned int pad_before,
- unsigned int pad_after)
-{
- if (!ctx || !ctx->priv)
- return VPX_CODEC_INVALID_PARAM;
-
- if (buf)
- {
- ctx->priv->enc.cx_data_dst_buf = *buf;
- ctx->priv->enc.cx_data_pad_before = pad_before;
- ctx->priv->enc.cx_data_pad_after = pad_after;
- }
- else
- {
- ctx->priv->enc.cx_data_dst_buf.buf = NULL;
- ctx->priv->enc.cx_data_dst_buf.sz = 0;
- ctx->priv->enc.cx_data_pad_before = 0;
- ctx->priv->enc.cx_data_pad_after = 0;
- }
-
- return VPX_CODEC_OK;
+ const vpx_fixed_buf_t *buf,
+ unsigned int pad_before,
+ unsigned int pad_after) {
+ if (!ctx || !ctx->priv)
+ return VPX_CODEC_INVALID_PARAM;
+
+ if (buf) {
+ ctx->priv->enc.cx_data_dst_buf = *buf;
+ ctx->priv->enc.cx_data_pad_before = pad_before;
+ ctx->priv->enc.cx_data_pad_after = pad_after;
+ } else {
+ ctx->priv->enc.cx_data_dst_buf.buf = NULL;
+ ctx->priv->enc.cx_data_dst_buf.sz = 0;
+ ctx->priv->enc.cx_data_pad_before = 0;
+ ctx->priv->enc.cx_data_pad_after = 0;
+ }
+
+ return VPX_CODEC_OK;
}
-const vpx_image_t *vpx_codec_get_preview_frame(vpx_codec_ctx_t *ctx)
-{
- vpx_image_t *img = NULL;
-
- if (ctx)
- {
- if (!ctx->iface || !ctx->priv)
- ctx->err = VPX_CODEC_ERROR;
- else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
- ctx->err = VPX_CODEC_INCAPABLE;
- else if (!ctx->iface->enc.get_preview)
- ctx->err = VPX_CODEC_INCAPABLE;
- else
- img = ctx->iface->enc.get_preview(ctx->priv->alg_priv);
- }
+const vpx_image_t *vpx_codec_get_preview_frame(vpx_codec_ctx_t *ctx) {
+ vpx_image_t *img = NULL;
- return img;
+ if (ctx) {
+ if (!ctx->iface || !ctx->priv)
+ ctx->err = VPX_CODEC_ERROR;
+ else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
+ ctx->err = VPX_CODEC_INCAPABLE;
+ else if (!ctx->iface->enc.get_preview)
+ ctx->err = VPX_CODEC_INCAPABLE;
+ else
+ img = ctx->iface->enc.get_preview(ctx->priv->alg_priv);
+ }
+
+ return img;
}
-vpx_fixed_buf_t *vpx_codec_get_global_headers(vpx_codec_ctx_t *ctx)
-{
- vpx_fixed_buf_t *buf = NULL;
-
- if (ctx)
- {
- if (!ctx->iface || !ctx->priv)
- ctx->err = VPX_CODEC_ERROR;
- else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
- ctx->err = VPX_CODEC_INCAPABLE;
- else if (!ctx->iface->enc.get_glob_hdrs)
- ctx->err = VPX_CODEC_INCAPABLE;
- else
- buf = ctx->iface->enc.get_glob_hdrs(ctx->priv->alg_priv);
- }
+vpx_fixed_buf_t *vpx_codec_get_global_headers(vpx_codec_ctx_t *ctx) {
+ vpx_fixed_buf_t *buf = NULL;
- return buf;
+ if (ctx) {
+ if (!ctx->iface || !ctx->priv)
+ ctx->err = VPX_CODEC_ERROR;
+ else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
+ ctx->err = VPX_CODEC_INCAPABLE;
+ else if (!ctx->iface->enc.get_glob_hdrs)
+ ctx->err = VPX_CODEC_INCAPABLE;
+ else
+ buf = ctx->iface->enc.get_glob_hdrs(ctx->priv->alg_priv);
+ }
+
+ return buf;
}
vpx_codec_err_t vpx_codec_enc_config_set(vpx_codec_ctx_t *ctx,
- const vpx_codec_enc_cfg_t *cfg)
-{
- vpx_codec_err_t res;
+ const vpx_codec_enc_cfg_t *cfg) {
+ vpx_codec_err_t res;
- if (!ctx || !ctx->iface || !ctx->priv || !cfg)
- res = VPX_CODEC_INVALID_PARAM;
- else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
- res = VPX_CODEC_INCAPABLE;
- else
- res = ctx->iface->enc.cfg_set(ctx->priv->alg_priv, cfg);
+ if (!ctx || !ctx->iface || !ctx->priv || !cfg)
+ res = VPX_CODEC_INVALID_PARAM;
+ else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
+ res = VPX_CODEC_INCAPABLE;
+ else
+ res = ctx->iface->enc.cfg_set(ctx->priv->alg_priv, cfg);
- return SAVE_STATUS(ctx, res);
+ return SAVE_STATUS(ctx, res);
}
int vpx_codec_pkt_list_add(struct vpx_codec_pkt_list *list,
- const struct vpx_codec_cx_pkt *pkt)
-{
- if (list->cnt < list->max)
- {
- list->pkts[list->cnt++] = *pkt;
- return 0;
- }
+ const struct vpx_codec_cx_pkt *pkt) {
+ if (list->cnt < list->max) {
+ list->pkts[list->cnt++] = *pkt;
+ return 0;
+ }
- return 1;
+ return 1;
}
const vpx_codec_cx_pkt_t *vpx_codec_pkt_list_get(struct vpx_codec_pkt_list *list,
- vpx_codec_iter_t *iter)
-{
- const vpx_codec_cx_pkt_t *pkt;
+ vpx_codec_iter_t *iter) {
+ const vpx_codec_cx_pkt_t *pkt;
- if (!(*iter))
- {
- *iter = list->pkts;
- }
+ if (!(*iter)) {
+ *iter = list->pkts;
+ }
- pkt = (const void *) * iter;
+ pkt = (const void *) * iter;
- if ((size_t)(pkt - list->pkts) < list->cnt)
- *iter = pkt + 1;
- else
- pkt = NULL;
+ if ((size_t)(pkt - list->pkts) < list->cnt)
+ *iter = pkt + 1;
+ else
+ pkt = NULL;
- return pkt;
+ return pkt;
}
diff --git a/libvpx/vpx/src/vpx_image.c b/libvpx/vpx/src/vpx_image.c
index 336b6e2..36eda95 100644
--- a/libvpx/vpx/src/vpx_image.c
+++ b/libvpx/vpx/src/vpx_image.c
@@ -18,30 +18,26 @@
#define align_addr(addr,align) (void*)(((size_t)(addr) + ((align) - 1)) & (size_t)-(align))
/* Memalign code is copied from vpx_mem.c */
-static void *img_buf_memalign(size_t align, size_t size)
-{
- void *addr,
- * x = NULL;
-
- addr = malloc(size + align - 1 + ADDRESS_STORAGE_SIZE);
-
- if (addr)
- {
- x = align_addr((unsigned char *)addr + ADDRESS_STORAGE_SIZE, (int)align);
- /* save the actual malloc address */
- ((size_t *)x)[-1] = (size_t)addr;
- }
+static void *img_buf_memalign(size_t align, size_t size) {
+ void *addr,
+ * x = NULL;
+
+ addr = malloc(size + align - 1 + ADDRESS_STORAGE_SIZE);
- return x;
+ if (addr) {
+ x = align_addr((unsigned char *)addr + ADDRESS_STORAGE_SIZE, (int)align);
+ /* save the actual malloc address */
+ ((size_t *)x)[-1] = (size_t)addr;
+ }
+
+ return x;
}
-static void img_buf_free(void *memblk)
-{
- if (memblk)
- {
- void *addr = (void *)(((size_t *)memblk)[-1]);
- free(addr);
- }
+static void img_buf_free(void *memblk) {
+ if (memblk) {
+ void *addr = (void *)(((size_t *)memblk)[-1]);
+ free(addr);
+ }
}
static vpx_image_t *img_alloc_helper(vpx_image_t *img,
@@ -50,41 +46,39 @@ static vpx_image_t *img_alloc_helper(vpx_image_t *img,
unsigned int d_h,
unsigned int buf_align,
unsigned int stride_align,
- unsigned char *img_data)
-{
+ unsigned char *img_data) {
- unsigned int h, w, s, xcs, ycs, bps;
- int align;
+ unsigned int h, w, s, xcs, ycs, bps;
+ int align;
- /* Treat align==0 like align==1 */
- if (!buf_align)
- buf_align = 1;
+ /* Treat align==0 like align==1 */
+ if (!buf_align)
+ buf_align = 1;
- /* Validate alignment (must be power of 2) */
- if (buf_align & (buf_align - 1))
- goto fail;
+ /* Validate alignment (must be power of 2) */
+ if (buf_align & (buf_align - 1))
+ goto fail;
- /* Treat align==0 like align==1 */
- if (!stride_align)
- stride_align = 1;
+ /* Treat align==0 like align==1 */
+ if (!stride_align)
+ stride_align = 1;
- /* Validate alignment (must be power of 2) */
- if (stride_align & (stride_align - 1))
- goto fail;
+ /* Validate alignment (must be power of 2) */
+ if (stride_align & (stride_align - 1))
+ goto fail;
- /* Get sample size for this format */
- switch (fmt)
- {
+ /* Get sample size for this format */
+ switch (fmt) {
case VPX_IMG_FMT_RGB32:
case VPX_IMG_FMT_RGB32_LE:
case VPX_IMG_FMT_ARGB:
case VPX_IMG_FMT_ARGB_LE:
- bps = 32;
- break;
+ bps = 32;
+ break;
case VPX_IMG_FMT_RGB24:
case VPX_IMG_FMT_BGR24:
- bps = 24;
- break;
+ bps = 24;
+ break;
case VPX_IMG_FMT_RGB565:
case VPX_IMG_FMT_RGB565_LE:
case VPX_IMG_FMT_RGB555:
@@ -92,108 +86,101 @@ static vpx_image_t *img_alloc_helper(vpx_image_t *img,
case VPX_IMG_FMT_UYVY:
case VPX_IMG_FMT_YUY2:
case VPX_IMG_FMT_YVYU:
- bps = 16;
- break;
+ bps = 16;
+ break;
case VPX_IMG_FMT_I420:
case VPX_IMG_FMT_YV12:
case VPX_IMG_FMT_VPXI420:
case VPX_IMG_FMT_VPXYV12:
- bps = 12;
- break;
+ bps = 12;
+ break;
default:
- bps = 16;
- break;
- }
+ bps = 16;
+ break;
+ }
- /* Get chroma shift values for this format */
- switch (fmt)
- {
+ /* Get chroma shift values for this format */
+ switch (fmt) {
case VPX_IMG_FMT_I420:
case VPX_IMG_FMT_YV12:
case VPX_IMG_FMT_VPXI420:
case VPX_IMG_FMT_VPXYV12:
- xcs = 1;
- break;
+ xcs = 1;
+ break;
default:
- xcs = 0;
- break;
- }
+ xcs = 0;
+ break;
+ }
- switch (fmt)
- {
+ switch (fmt) {
case VPX_IMG_FMT_I420:
case VPX_IMG_FMT_YV12:
case VPX_IMG_FMT_VPXI420:
case VPX_IMG_FMT_VPXYV12:
- ycs = 1;
- break;
+ ycs = 1;
+ break;
default:
- ycs = 0;
- break;
- }
+ ycs = 0;
+ break;
+ }
+
+ /* Calculate storage sizes given the chroma subsampling */
+ align = (1 << xcs) - 1;
+ w = (d_w + align) & ~align;
+ align = (1 << ycs) - 1;
+ h = (d_h + align) & ~align;
+ s = (fmt & VPX_IMG_FMT_PLANAR) ? w : bps * w / 8;
+ s = (s + stride_align - 1) & ~(stride_align - 1);
+
+ /* Allocate the new image */
+ if (!img) {
+ img = (vpx_image_t *)calloc(1, sizeof(vpx_image_t));
- /* Calculate storage sizes given the chroma subsampling */
- align = (1 << xcs) - 1;
- w = (d_w + align) & ~align;
- align = (1 << ycs) - 1;
- h = (d_h + align) & ~align;
- s = (fmt & VPX_IMG_FMT_PLANAR) ? w : bps * w / 8;
- s = (s + stride_align - 1) & ~(stride_align - 1);
-
- /* Allocate the new image */
if (!img)
- {
- img = (vpx_image_t *)calloc(1, sizeof(vpx_image_t));
+ goto fail;
- if (!img)
- goto fail;
+ img->self_allocd = 1;
+ } else {
+ memset(img, 0, sizeof(vpx_image_t));
+ }
- img->self_allocd = 1;
- }
- else
- {
- memset(img, 0, sizeof(vpx_image_t));
- }
+ img->img_data = img_data;
- img->img_data = img_data;
+ if (!img_data) {
+ img->img_data = img_buf_memalign(buf_align, ((fmt & VPX_IMG_FMT_PLANAR) ?
+ h * s * bps / 8 : h * s));
+ img->img_data_owner = 1;
+ }
- if (!img_data)
- {
- img->img_data = img_buf_memalign(buf_align, ((fmt & VPX_IMG_FMT_PLANAR)?
- h * s * bps / 8 : h * s));
- img->img_data_owner = 1;
- }
+ if (!img->img_data)
+ goto fail;
- if (!img->img_data)
- goto fail;
+ img->fmt = fmt;
+ img->w = w;
+ img->h = h;
+ img->x_chroma_shift = xcs;
+ img->y_chroma_shift = ycs;
+ img->bps = bps;
- img->fmt = fmt;
- img->w = w;
- img->h = h;
- img->x_chroma_shift = xcs;
- img->y_chroma_shift = ycs;
- img->bps = bps;
+ /* Calculate strides */
+ img->stride[VPX_PLANE_Y] = img->stride[VPX_PLANE_ALPHA] = s;
+ img->stride[VPX_PLANE_U] = img->stride[VPX_PLANE_V] = s >> xcs;
- /* Calculate strides */
- img->stride[VPX_PLANE_Y] = img->stride[VPX_PLANE_ALPHA] = s;
- img->stride[VPX_PLANE_U] = img->stride[VPX_PLANE_V] = s >> xcs;
-
- /* Default viewport to entire image */
- if (!vpx_img_set_rect(img, 0, 0, d_w, d_h))
- return img;
+ /* Default viewport to entire image */
+ if (!vpx_img_set_rect(img, 0, 0, d_w, d_h))
+ return img;
fail:
- vpx_img_free(img);
- return NULL;
+ vpx_img_free(img);
+ return NULL;
}
vpx_image_t *vpx_img_alloc(vpx_image_t *img,
vpx_img_fmt_t fmt,
unsigned int d_w,
unsigned int d_h,
- unsigned int align)
-{
- return img_alloc_helper(img, fmt, d_w, d_h, align, align, NULL);
+ unsigned int align) {
+ return img_alloc_helper(img, fmt, d_w, d_h, align, align, NULL);
}
vpx_image_t *vpx_img_wrap(vpx_image_t *img,
@@ -201,105 +188,92 @@ vpx_image_t *vpx_img_wrap(vpx_image_t *img,
unsigned int d_w,
unsigned int d_h,
unsigned int stride_align,
- unsigned char *img_data)
-{
- /* By setting buf_align = 1, we don't change buffer alignment in this
- * function. */
- return img_alloc_helper(img, fmt, d_w, d_h, 1, stride_align, img_data);
+ unsigned char *img_data) {
+ /* By setting buf_align = 1, we don't change buffer alignment in this
+ * function. */
+ return img_alloc_helper(img, fmt, d_w, d_h, 1, stride_align, img_data);
}
int vpx_img_set_rect(vpx_image_t *img,
unsigned int x,
unsigned int y,
unsigned int w,
- unsigned int h)
-{
- unsigned char *data;
-
- if (x + w <= img->w && y + h <= img->h)
- {
- img->d_w = w;
- img->d_h = h;
-
- /* Calculate plane pointers */
- if (!(img->fmt & VPX_IMG_FMT_PLANAR))
- {
- img->planes[VPX_PLANE_PACKED] =
- img->img_data + x * img->bps / 8 + y * img->stride[VPX_PLANE_PACKED];
- }
- else
- {
- data = img->img_data;
-
- if (img->fmt & VPX_IMG_FMT_HAS_ALPHA)
- {
- img->planes[VPX_PLANE_ALPHA] =
- data + x + y * img->stride[VPX_PLANE_ALPHA];
- data += img->h * img->stride[VPX_PLANE_ALPHA];
- }
-
- img->planes[VPX_PLANE_Y] = data + x + y * img->stride[VPX_PLANE_Y];
- data += img->h * img->stride[VPX_PLANE_Y];
-
- if (!(img->fmt & VPX_IMG_FMT_UV_FLIP))
- {
- img->planes[VPX_PLANE_U] = data
- + (x >> img->x_chroma_shift)
- + (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
- data += (img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
- img->planes[VPX_PLANE_V] = data
- + (x >> img->x_chroma_shift)
- + (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
- }
- else
- {
- img->planes[VPX_PLANE_V] = data
- + (x >> img->x_chroma_shift)
- + (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
- data += (img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
- img->planes[VPX_PLANE_U] = data
- + (x >> img->x_chroma_shift)
- + (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
- }
- }
-
- return 0;
+ unsigned int h) {
+ unsigned char *data;
+
+ if (x + w <= img->w && y + h <= img->h) {
+ img->d_w = w;
+ img->d_h = h;
+
+ /* Calculate plane pointers */
+ if (!(img->fmt & VPX_IMG_FMT_PLANAR)) {
+ img->planes[VPX_PLANE_PACKED] =
+ img->img_data + x * img->bps / 8 + y * img->stride[VPX_PLANE_PACKED];
+ } else {
+ data = img->img_data;
+
+ if (img->fmt & VPX_IMG_FMT_HAS_ALPHA) {
+ img->planes[VPX_PLANE_ALPHA] =
+ data + x + y * img->stride[VPX_PLANE_ALPHA];
+ data += img->h * img->stride[VPX_PLANE_ALPHA];
+ }
+
+ img->planes[VPX_PLANE_Y] = data + x + y * img->stride[VPX_PLANE_Y];
+ data += img->h * img->stride[VPX_PLANE_Y];
+
+ if (!(img->fmt & VPX_IMG_FMT_UV_FLIP)) {
+ img->planes[VPX_PLANE_U] = data
+ + (x >> img->x_chroma_shift)
+ + (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
+ data += (img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
+ img->planes[VPX_PLANE_V] = data
+ + (x >> img->x_chroma_shift)
+ + (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
+ } else {
+ img->planes[VPX_PLANE_V] = data
+ + (x >> img->x_chroma_shift)
+ + (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
+ data += (img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
+ img->planes[VPX_PLANE_U] = data
+ + (x >> img->x_chroma_shift)
+ + (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
+ }
}
- return -1;
+ return 0;
+ }
+
+ return -1;
}
-void vpx_img_flip(vpx_image_t *img)
-{
- /* Note: In the calculation pointer adjustment calculation, we want the
- * rhs to be promoted to a signed type. Section 6.3.1.8 of the ISO C99
- * standard indicates that if the adjustment parameter is unsigned, the
- * stride parameter will be promoted to unsigned, causing errors when
- * the lhs is a larger type than the rhs.
- */
- img->planes[VPX_PLANE_Y] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_Y];
- img->stride[VPX_PLANE_Y] = -img->stride[VPX_PLANE_Y];
-
- img->planes[VPX_PLANE_U] += (signed)((img->d_h >> img->y_chroma_shift) - 1)
- * img->stride[VPX_PLANE_U];
- img->stride[VPX_PLANE_U] = -img->stride[VPX_PLANE_U];
-
- img->planes[VPX_PLANE_V] += (signed)((img->d_h >> img->y_chroma_shift) - 1)
- * img->stride[VPX_PLANE_V];
- img->stride[VPX_PLANE_V] = -img->stride[VPX_PLANE_V];
-
- img->planes[VPX_PLANE_ALPHA] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_ALPHA];
- img->stride[VPX_PLANE_ALPHA] = -img->stride[VPX_PLANE_ALPHA];
+void vpx_img_flip(vpx_image_t *img) {
+ /* Note: In the calculation pointer adjustment calculation, we want the
+ * rhs to be promoted to a signed type. Section 6.3.1.8 of the ISO C99
+ * standard indicates that if the adjustment parameter is unsigned, the
+ * stride parameter will be promoted to unsigned, causing errors when
+ * the lhs is a larger type than the rhs.
+ */
+ img->planes[VPX_PLANE_Y] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_Y];
+ img->stride[VPX_PLANE_Y] = -img->stride[VPX_PLANE_Y];
+
+ img->planes[VPX_PLANE_U] += (signed)((img->d_h >> img->y_chroma_shift) - 1)
+ * img->stride[VPX_PLANE_U];
+ img->stride[VPX_PLANE_U] = -img->stride[VPX_PLANE_U];
+
+ img->planes[VPX_PLANE_V] += (signed)((img->d_h >> img->y_chroma_shift) - 1)
+ * img->stride[VPX_PLANE_V];
+ img->stride[VPX_PLANE_V] = -img->stride[VPX_PLANE_V];
+
+ img->planes[VPX_PLANE_ALPHA] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_ALPHA];
+ img->stride[VPX_PLANE_ALPHA] = -img->stride[VPX_PLANE_ALPHA];
}
-void vpx_img_free(vpx_image_t *img)
-{
- if (img)
- {
- if (img->img_data && img->img_data_owner)
- img_buf_free(img->img_data);
+void vpx_img_free(vpx_image_t *img) {
+ if (img) {
+ if (img->img_data && img->img_data_owner)
+ img_buf_free(img->img_data);
- if (img->self_allocd)
- free(img);
- }
+ if (img->self_allocd)
+ free(img);
+ }
}
diff --git a/libvpx/vpx/vp8.h b/libvpx/vpx/vp8.h
index 2952203..0b4cb1b 100644
--- a/libvpx/vpx/vp8.h
+++ b/libvpx/vpx/vp8.h
@@ -36,34 +36,38 @@
*
* The set of macros define the control functions of VP8 interface
*/
-enum vp8_com_control_id
-{
- VP8_SET_REFERENCE = 1, /**< pass in an external frame into decoder to be used as reference frame */
- VP8_COPY_REFERENCE = 2, /**< get a copy of reference frame from the decoder */
- VP8_SET_POSTPROC = 3, /**< set the decoder's post processing settings */
- VP8_SET_DBG_COLOR_REF_FRAME = 4, /**< set the reference frames to color for each macroblock */
- VP8_SET_DBG_COLOR_MB_MODES = 5, /**< set which macro block modes to color */
- VP8_SET_DBG_COLOR_B_MODES = 6, /**< set which blocks modes to color */
- VP8_SET_DBG_DISPLAY_MV = 7, /**< set which motion vector modes to draw */
- VP8_COMMON_CTRL_ID_MAX,
- VP8_DECODER_CTRL_ID_START = 256
+enum vp8_com_control_id {
+ VP8_SET_REFERENCE = 1, /**< pass in an external frame into decoder to be used as reference frame */
+ VP8_COPY_REFERENCE = 2, /**< get a copy of reference frame from the decoder */
+ VP8_SET_POSTPROC = 3, /**< set the decoder's post processing settings */
+ VP8_SET_DBG_COLOR_REF_FRAME = 4, /**< set the reference frames to color for each macroblock */
+ VP8_SET_DBG_COLOR_MB_MODES = 5, /**< set which macro block modes to color */
+ VP8_SET_DBG_COLOR_B_MODES = 6, /**< set which blocks modes to color */
+ VP8_SET_DBG_DISPLAY_MV = 7, /**< set which motion vector modes to draw */
+
+ /* TODO(jkoleszar): The encoder incorrectly reuses some of these values (5+)
+ * for its control ids. These should be migrated to something like the
+ * VP8_DECODER_CTRL_ID_START range next time we're ready to break the ABI.
+ */
+ VP9_GET_REFERENCE = 128, /**< get a pointer to a reference frame */
+ VP8_COMMON_CTRL_ID_MAX,
+ VP8_DECODER_CTRL_ID_START = 256
};
/*!\brief post process flags
*
* The set of macros define VP8 decoder post processing flags
*/
-enum vp8_postproc_level
-{
- VP8_NOFILTERING = 0,
- VP8_DEBLOCK = 1<<0,
- VP8_DEMACROBLOCK = 1<<1,
- VP8_ADDNOISE = 1<<2,
- VP8_DEBUG_TXT_FRAME_INFO = 1<<3, /**< print frame information */
- VP8_DEBUG_TXT_MBLK_MODES = 1<<4, /**< print macro block modes over each macro block */
- VP8_DEBUG_TXT_DC_DIFF = 1<<5, /**< print dc diff for each macro block */
- VP8_DEBUG_TXT_RATE_INFO = 1<<6, /**< print video rate info (encoder only) */
- VP8_MFQE = 1<<10
+enum vp8_postproc_level {
+ VP8_NOFILTERING = 0,
+ VP8_DEBLOCK = 1 << 0,
+ VP8_DEMACROBLOCK = 1 << 1,
+ VP8_ADDNOISE = 1 << 2,
+ VP8_DEBUG_TXT_FRAME_INFO = 1 << 3, /**< print frame information */
+ VP8_DEBUG_TXT_MBLK_MODES = 1 << 4, /**< print macro block modes over each macro block */
+ VP8_DEBUG_TXT_DC_DIFF = 1 << 5, /**< print dc diff for each macro block */
+ VP8_DEBUG_TXT_RATE_INFO = 1 << 6, /**< print video rate info (encoder only) */
+ VP8_MFQE = 1 << 10
};
/*!\brief post process flags
@@ -73,22 +77,20 @@ enum vp8_postproc_level
* to VP8_DEBLOCK and deblocking_level to 1.
*/
-typedef struct vp8_postproc_cfg
-{
- int post_proc_flag; /**< the types of post processing to be done, should be combination of "vp8_postproc_level" */
- int deblocking_level; /**< the strength of deblocking, valid range [0, 16] */
- int noise_level; /**< the strength of additive noise, valid range [0, 16] */
+typedef struct vp8_postproc_cfg {
+ int post_proc_flag; /**< the types of post processing to be done, should be combination of "vp8_postproc_level" */
+ int deblocking_level; /**< the strength of deblocking, valid range [0, 16] */
+ int noise_level; /**< the strength of additive noise, valid range [0, 16] */
} vp8_postproc_cfg_t;
/*!\brief reference frame type
*
* The set of macros define the type of VP8 reference frames
*/
-typedef enum vpx_ref_frame_type
-{
- VP8_LAST_FRAME = 1,
- VP8_GOLD_FRAME = 2,
- VP8_ALTR_FRAME = 4
+typedef enum vpx_ref_frame_type {
+ VP8_LAST_FRAME = 1,
+ VP8_GOLD_FRAME = 2,
+ VP8_ALTR_FRAME = 4
} vpx_ref_frame_type_t;
/*!\brief reference frame data struct
@@ -96,12 +98,15 @@ typedef enum vpx_ref_frame_type
* define the data struct to access vp8 reference frames
*/
-typedef struct vpx_ref_frame
-{
- vpx_ref_frame_type_t frame_type; /**< which reference frame */
- vpx_image_t img; /**< reference frame data in image format */
+typedef struct vpx_ref_frame {
+ vpx_ref_frame_type_t frame_type; /**< which reference frame */
+ vpx_image_t img; /**< reference frame data in image format */
} vpx_ref_frame_t;
+typedef struct vp9_ref_frame {
+ int idx; /**< frame index to get (input) */
+ vpx_image_t img; /**< img structure to populate (output) */
+} vp9_ref_frame_t;
/*!\brief vp8 decoder control function parameter type
*
@@ -115,6 +120,7 @@ VPX_CTRL_USE_TYPE(VP8_SET_DBG_COLOR_REF_FRAME, int)
VPX_CTRL_USE_TYPE(VP8_SET_DBG_COLOR_MB_MODES, int)
VPX_CTRL_USE_TYPE(VP8_SET_DBG_COLOR_B_MODES, int)
VPX_CTRL_USE_TYPE(VP8_SET_DBG_DISPLAY_MV, int)
+VPX_CTRL_USE_TYPE(VP9_GET_REFERENCE, vp9_ref_frame_t *)
/*! @} - end defgroup vp8 */
diff --git a/libvpx/vpx/vp8cx.h b/libvpx/vpx/vp8cx.h
index a3c95d2..f8e2ef9 100644
--- a/libvpx/vpx/vp8cx.h
+++ b/libvpx/vpx/vp8cx.h
@@ -31,7 +31,14 @@
* @{
*/
extern vpx_codec_iface_t vpx_codec_vp8_cx_algo;
-extern vpx_codec_iface_t* vpx_codec_vp8_cx(void);
+extern vpx_codec_iface_t *vpx_codec_vp8_cx(void);
+
+/* TODO(jkoleszar): These move to VP9 in a later patch set. */
+extern vpx_codec_iface_t vpx_codec_vp9_cx_algo;
+extern vpx_codec_iface_t *vpx_codec_vp9_cx(void);
+extern vpx_codec_iface_t vpx_codec_vp9x_cx_algo;
+extern vpx_codec_iface_t *vpx_codec_vp9x_cx(void);
+
/*!@} - end algorithm interface member group*/
@@ -121,75 +128,80 @@ extern vpx_codec_iface_t* vpx_codec_vp8_cx(void);
*
* \sa #vpx_codec_control
*/
-enum vp8e_enc_control_id
-{
- VP8E_UPD_ENTROPY = 5, /**< control function to set mode of entropy update in encoder */
- VP8E_UPD_REFERENCE, /**< control function to set reference update mode in encoder */
- VP8E_USE_REFERENCE, /**< control function to set which reference frame encoder can use */
- VP8E_SET_ROI_MAP, /**< control function to pass an ROI map to encoder */
- VP8E_SET_ACTIVEMAP, /**< control function to pass an Active map to encoder */
- VP8E_SET_SCALEMODE = 11, /**< control function to set encoder scaling mode */
- /*!\brief control function to set vp8 encoder cpuused
- *
- * Changes in this value influences, among others, the encoder's selection
- * of motion estimation methods. Values greater than 0 will increase encoder
- * speed at the expense of quality.
- * The full set of adjustments can be found in
- * onyx_if.c:vp8_set_speed_features().
- * \todo List highlights of the changes at various levels.
- *
- * \note Valid range: -16..16
- */
- VP8E_SET_CPUUSED = 13,
- VP8E_SET_ENABLEAUTOALTREF, /**< control function to enable vp8 to automatic set and use altref frame */
- VP8E_SET_NOISE_SENSITIVITY, /**< control function to set noise sensitivity */
- VP8E_SET_SHARPNESS, /**< control function to set sharpness */
- VP8E_SET_STATIC_THRESHOLD, /**< control function to set the threshold for macroblocks treated static */
- VP8E_SET_TOKEN_PARTITIONS, /**< control function to set the number of token partitions */
- VP8E_GET_LAST_QUANTIZER, /**< return the quantizer chosen by the
+enum vp8e_enc_control_id {
+ VP8E_UPD_ENTROPY = 5, /**< control function to set mode of entropy update in encoder */
+ VP8E_UPD_REFERENCE, /**< control function to set reference update mode in encoder */
+ VP8E_USE_REFERENCE, /**< control function to set which reference frame encoder can use */
+ VP8E_SET_ROI_MAP, /**< control function to pass an ROI map to encoder */
+ VP8E_SET_ACTIVEMAP, /**< control function to pass an Active map to encoder */
+ VP8E_SET_SCALEMODE = 11, /**< control function to set encoder scaling mode */
+ /*!\brief control function to set vp8 encoder cpuused
+ *
+ * Changes in this value influences, among others, the encoder's selection
+ * of motion estimation methods. Values greater than 0 will increase encoder
+ * speed at the expense of quality.
+ * The full set of adjustments can be found in
+ * onyx_if.c:vp8_set_speed_features().
+ * \todo List highlights of the changes at various levels.
+ *
+ * \note Valid range: -16..16
+ */
+ VP8E_SET_CPUUSED = 13,
+ VP8E_SET_ENABLEAUTOALTREF, /**< control function to enable vp8 to automatic set and use altref frame */
+ VP8E_SET_NOISE_SENSITIVITY, /**< control function to set noise sensitivity */
+ VP8E_SET_SHARPNESS, /**< control function to set sharpness */
+ VP8E_SET_STATIC_THRESHOLD, /**< control function to set the threshold for macroblocks treated static */
+ VP8E_SET_TOKEN_PARTITIONS, /**< control function to set the number of token partitions */
+ VP8E_GET_LAST_QUANTIZER, /**< return the quantizer chosen by the
encoder for the last frame using the internal
scale */
- VP8E_GET_LAST_QUANTIZER_64, /**< return the quantizer chosen by the
+ VP8E_GET_LAST_QUANTIZER_64, /**< return the quantizer chosen by the
encoder for the last frame, using the 0..63
scale as used by the rc_*_quantizer config
parameters */
- VP8E_SET_ARNR_MAXFRAMES, /**< control function to set the max number of frames blurred creating arf*/
- VP8E_SET_ARNR_STRENGTH , /**< control function to set the filter strength for the arf */
- VP8E_SET_ARNR_TYPE , /**< control function to set the type of filter to use for the arf*/
- VP8E_SET_TUNING, /**< control function to set visual tuning */
- /*!\brief control function to set constrained quality level
- *
- * \attention For this value to be used vpx_codec_enc_cfg_t::g_usage must be
- * set to #VPX_CQ.
- * \note Valid range: 0..63
- */
- VP8E_SET_CQ_LEVEL,
-
- /*!\brief Max data rate for Intra frames
- *
- * This value controls additional clamping on the maximum size of a
- * keyframe. It is expressed as a percentage of the average
- * per-frame bitrate, with the special (and default) value 0 meaning
- * unlimited, or no additional clamping beyond the codec's built-in
- * algorithm.
- *
- * For example, to allocate no more than 4.5 frames worth of bitrate
- * to a keyframe, set this to 450.
- *
- */
- VP8E_SET_MAX_INTRA_BITRATE_PCT
+ VP8E_SET_ARNR_MAXFRAMES, /**< control function to set the max number of frames blurred creating arf*/
+ VP8E_SET_ARNR_STRENGTH, /**< control function to set the filter strength for the arf */
+ VP8E_SET_ARNR_TYPE, /**< control function to set the type of filter to use for the arf*/
+ VP8E_SET_TUNING, /**< control function to set visual tuning */
+ /*!\brief control function to set constrained quality level
+ *
+ * \attention For this value to be used vpx_codec_enc_cfg_t::g_usage must be
+ * set to #VPX_CQ.
+ * \note Valid range: 0..63
+ */
+ VP8E_SET_CQ_LEVEL,
+
+ /*!\brief Max data rate for Intra frames
+ *
+ * This value controls additional clamping on the maximum size of a
+ * keyframe. It is expressed as a percentage of the average
+ * per-frame bitrate, with the special (and default) value 0 meaning
+ * unlimited, or no additional clamping beyond the codec's built-in
+ * algorithm.
+ *
+ * For example, to allocate no more than 4.5 frames worth of bitrate
+ * to a keyframe, set this to 450.
+ *
+ */
+ VP8E_SET_MAX_INTRA_BITRATE_PCT,
+
+
+ /* TODO(jkoleszar): Move to vp9cx.h */
+ VP9E_SET_LOSSLESS,
+ VP9E_SET_TILE_COLUMNS,
+ VP9E_SET_TILE_ROWS,
+ VP9E_SET_FRAME_PARALLEL_DECODING
};
/*!\brief vpx 1-D scaling mode
*
* This set of constants define 1-D vpx scaling modes
*/
-typedef enum vpx_scaling_mode_1d
-{
- VP8E_NORMAL = 0,
- VP8E_FOURFIVE = 1,
- VP8E_THREEFIVE = 2,
- VP8E_ONETWO = 3
+typedef enum vpx_scaling_mode_1d {
+ VP8E_NORMAL = 0,
+ VP8E_FOURFIVE = 1,
+ VP8E_THREEFIVE = 2,
+ VP8E_ONETWO = 3
} VPX_SCALING_MODE;
@@ -199,14 +211,17 @@ typedef enum vpx_scaling_mode_1d
*
*/
-typedef struct vpx_roi_map
-{
- unsigned char *roi_map; /**< specify an id between 0 and 3 for each 16x16 region within a frame */
- unsigned int rows; /**< number of rows */
- unsigned int cols; /**< number of cols */
- int delta_q[4]; /**< quantizer delta [-63, 63] off baseline for regions with id between 0 and 3*/
- int delta_lf[4]; /**< loop filter strength delta [-63, 63] for regions with id between 0 and 3 */
- unsigned int static_threshold[4];/**< threshold for region to be treated as static */
+typedef struct vpx_roi_map {
+ unsigned char *roi_map; /**< specify an id between 0 and 3 for each 16x16 region within a frame */
+ unsigned int rows; /**< number of rows */
+ unsigned int cols; /**< number of cols */
+ // TODO(paulwilkins): broken for VP9 which has 8 segments
+ // q and loop filter deltas for each segment
+ // (see MAX_MB_SEGMENTS)
+ int delta_q[4];
+ int delta_lf[4];
+ // Static breakout threshold for each segment
+ unsigned int static_threshold[4];
} vpx_roi_map_t;
/*!\brief vpx active region map
@@ -216,11 +231,10 @@ typedef struct vpx_roi_map
*/
-typedef struct vpx_active_map
-{
- unsigned char *active_map; /**< specify an on (1) or off (0) each 16x16 region within a frame */
- unsigned int rows; /**< number of rows */
- unsigned int cols; /**< number of cols */
+typedef struct vpx_active_map {
+ unsigned char *active_map; /**< specify an on (1) or off (0) each 16x16 region within a frame */
+ unsigned int rows; /**< number of rows */
+ unsigned int cols; /**< number of cols */
} vpx_active_map_t;
/*!\brief vpx image scaling mode
@@ -228,10 +242,9 @@ typedef struct vpx_active_map
* This defines the data structure for image scaling mode
*
*/
-typedef struct vpx_scaling_mode
-{
- VPX_SCALING_MODE h_scaling_mode; /**< horizontal scaling mode */
- VPX_SCALING_MODE v_scaling_mode; /**< vertical scaling mode */
+typedef struct vpx_scaling_mode {
+ VPX_SCALING_MODE h_scaling_mode; /**< horizontal scaling mode */
+ VPX_SCALING_MODE v_scaling_mode; /**< vertical scaling mode */
} vpx_scaling_mode_t;
/*!\brief VP8 token partition mode
@@ -241,12 +254,11 @@ typedef struct vpx_scaling_mode
*
*/
-typedef enum
-{
- VP8_ONE_TOKENPARTITION = 0,
- VP8_TWO_TOKENPARTITION = 1,
- VP8_FOUR_TOKENPARTITION = 2,
- VP8_EIGHT_TOKENPARTITION = 3
+typedef enum {
+ VP8_ONE_TOKENPARTITION = 0,
+ VP8_TWO_TOKENPARTITION = 1,
+ VP8_FOUR_TOKENPARTITION = 2,
+ VP8_EIGHT_TOKENPARTITION = 3
} vp8e_token_partitions;
@@ -255,10 +267,9 @@ typedef enum
* Changes the encoder to tune for certain types of input material.
*
*/
-typedef enum
-{
- VP8_TUNE_PSNR,
- VP8_TUNE_SSIM
+typedef enum {
+ VP8_TUNE_PSNR,
+ VP8_TUNE_SSIM
} vp8e_tuning;
@@ -289,17 +300,22 @@ VPX_CTRL_USE_TYPE(VP8E_SET_STATIC_THRESHOLD, unsigned int)
VPX_CTRL_USE_TYPE(VP8E_SET_TOKEN_PARTITIONS, int) /* vp8e_token_partitions */
VPX_CTRL_USE_TYPE(VP8E_SET_ARNR_MAXFRAMES, unsigned int)
-VPX_CTRL_USE_TYPE(VP8E_SET_ARNR_STRENGTH , unsigned int)
-VPX_CTRL_USE_TYPE(VP8E_SET_ARNR_TYPE , unsigned int)
+VPX_CTRL_USE_TYPE(VP8E_SET_ARNR_STRENGTH, unsigned int)
+VPX_CTRL_USE_TYPE(VP8E_SET_ARNR_TYPE, unsigned int)
VPX_CTRL_USE_TYPE(VP8E_SET_TUNING, int) /* vp8e_tuning */
-VPX_CTRL_USE_TYPE(VP8E_SET_CQ_LEVEL , unsigned int)
+VPX_CTRL_USE_TYPE(VP8E_SET_CQ_LEVEL, unsigned int)
+
+VPX_CTRL_USE_TYPE(VP9E_SET_TILE_COLUMNS, int)
+VPX_CTRL_USE_TYPE(VP9E_SET_TILE_ROWS, int)
VPX_CTRL_USE_TYPE(VP8E_GET_LAST_QUANTIZER, int *)
VPX_CTRL_USE_TYPE(VP8E_GET_LAST_QUANTIZER_64, int *)
VPX_CTRL_USE_TYPE(VP8E_SET_MAX_INTRA_BITRATE_PCT, unsigned int)
+VPX_CTRL_USE_TYPE(VP9E_SET_LOSSLESS, unsigned int)
+VPX_CTRL_USE_TYPE(VP9E_SET_FRAME_PARALLEL_DECODING, unsigned int)
/*! @} - end defgroup vp8_encoder */
#include "vpx_codec_impl_bottom.h"
#endif
diff --git a/libvpx/vpx/vp8dx.h b/libvpx/vpx/vp8dx.h
index 8661035..7d250cc 100644
--- a/libvpx/vpx/vp8dx.h
+++ b/libvpx/vpx/vp8dx.h
@@ -31,7 +31,11 @@
* @{
*/
extern vpx_codec_iface_t vpx_codec_vp8_dx_algo;
-extern vpx_codec_iface_t* vpx_codec_vp8_dx(void);
+extern vpx_codec_iface_t *vpx_codec_vp8_dx(void);
+
+/* TODO(jkoleszar): These move to VP9 in a later patch set. */
+extern vpx_codec_iface_t vpx_codec_vp9_dx_algo;
+extern vpx_codec_iface_t *vpx_codec_vp9_dx(void);
/*!@} - end algorithm interface member group*/
/* Include controls common to both the encoder and decoder */
@@ -45,24 +49,40 @@ extern vpx_codec_iface_t* vpx_codec_vp8_dx(void);
*
* \sa #vpx_codec_control
*/
-enum vp8_dec_control_id
-{
- /** control function to get info on which reference frames were updated
- * by the last decode
- */
- VP8D_GET_LAST_REF_UPDATES = VP8_DECODER_CTRL_ID_START,
-
- /** check if the indicated frame is corrupted */
- VP8D_GET_FRAME_CORRUPTED,
-
- /** control function to get info on which reference frames were used
- * by the last decode
+enum vp8_dec_control_id {
+ /** control function to get info on which reference frames were updated
+ * by the last decode
+ */
+ VP8D_GET_LAST_REF_UPDATES = VP8_DECODER_CTRL_ID_START,
+
+ /** check if the indicated frame is corrupted */
+ VP8D_GET_FRAME_CORRUPTED,
+
+ /** control function to get info on which reference frames were used
+ * by the last decode
+ */
+ VP8D_GET_LAST_REF_USED,
+
+ /** decryption function to decrypt encoded buffer data immediately
+ * before decoding. Takes a vp8_decrypt_init, which contains
+ * a callback function and opaque context pointer.
+ */
+ VP8D_SET_DECRYPTOR,
+
+ /** For testing. */
+ VP9_INVERT_TILE_DECODE_ORDER,
+
+ VP8_DECODER_CTRL_ID_MAX
+};
+
+typedef struct vp8_decrypt_init {
+ /** Decrypt n bytes of data from input -> output, using the decrypt_state
+ * passed in VP8D_SET_DECRYPTOR.
*/
- VP8D_GET_LAST_REF_USED,
-
- VP8_DECODER_CTRL_ID_MAX
-} ;
-
+ void (*decrypt_cb)(void *decrypt_state, const unsigned char *input,
+ unsigned char *output, int count);
+ void *decrypt_state;
+} vp8_decrypt_init;
/*!\brief VP8 decoder control function parameter type
*
@@ -75,6 +95,8 @@ enum vp8_dec_control_id
VPX_CTRL_USE_TYPE(VP8D_GET_LAST_REF_UPDATES, int *)
VPX_CTRL_USE_TYPE(VP8D_GET_FRAME_CORRUPTED, int *)
VPX_CTRL_USE_TYPE(VP8D_GET_LAST_REF_USED, int *)
+VPX_CTRL_USE_TYPE(VP8D_SET_DECRYPTOR, vp8_decrypt_init *)
+VPX_CTRL_USE_TYPE(VP9_INVERT_TILE_DECODE_ORDER, int)
/*! @} - end defgroup vp8_decoder */
diff --git a/libvpx/vpx/vpx_codec.h b/libvpx/vpx/vpx_codec.h
index d92e165..2e6f1e7 100644
--- a/libvpx/vpx/vpx_codec.h
+++ b/libvpx/vpx/vpx_codec.h
@@ -45,21 +45,28 @@ extern "C" {
#include "vpx_integer.h"
#include "vpx_image.h"
- /*!\brief Decorator indicating a function is deprecated */
+ /*!\brief Decorator indicating a function is deprecated */
#ifndef DEPRECATED
#if defined(__GNUC__) && __GNUC__
#define DEPRECATED __attribute__ ((deprecated))
-#define DECLSPEC_DEPRECATED /**< \copydoc #DEPRECATED */
#elif defined(_MSC_VER)
#define DEPRECATED
-#define DECLSPEC_DEPRECATED __declspec(deprecated) /**< \copydoc #DEPRECATED */
#else
#define DEPRECATED
-#define DECLSPEC_DEPRECATED /**< \copydoc #DEPRECATED */
#endif
+#endif /* DEPRECATED */
+
+#ifndef DECLSPEC_DEPRECATED
+#if defined(__GNUC__) && __GNUC__
+#define DECLSPEC_DEPRECATED /**< \copydoc #DEPRECATED */
+#elif defined(_MSC_VER)
+#define DECLSPEC_DEPRECATED __declspec(deprecated) /**< \copydoc #DEPRECATED */
+#else
+#define DECLSPEC_DEPRECATED /**< \copydoc #DEPRECATED */
#endif
+#endif /* DECLSPEC_DEPRECATED */
- /*!\brief Decorator indicating a function is potentially unused */
+ /*!\brief Decorator indicating a function is potentially unused */
#ifdef UNUSED
#elif __GNUC__
#define UNUSED __attribute__ ((unused))
@@ -67,312 +74,310 @@ extern "C" {
#define UNUSED
#endif
- /*!\brief Current ABI version number
- *
- * \internal
- * If this file is altered in any way that changes the ABI, this value
- * must be bumped. Examples include, but are not limited to, changing
- * types, removing or reassigning enums, adding/removing/rearranging
- * fields to structures
- */
+ /*!\brief Current ABI version number
+ *
+ * \internal
+ * If this file is altered in any way that changes the ABI, this value
+ * must be bumped. Examples include, but are not limited to, changing
+ * types, removing or reassigning enums, adding/removing/rearranging
+ * fields to structures
+ */
#define VPX_CODEC_ABI_VERSION (2 + VPX_IMAGE_ABI_VERSION) /**<\hideinitializer*/
- /*!\brief Algorithm return codes */
- typedef enum {
- /*!\brief Operation completed without error */
- VPX_CODEC_OK,
-
- /*!\brief Unspecified error */
- VPX_CODEC_ERROR,
-
- /*!\brief Memory operation failed */
- VPX_CODEC_MEM_ERROR,
-
- /*!\brief ABI version mismatch */
- VPX_CODEC_ABI_MISMATCH,
-
- /*!\brief Algorithm does not have required capability */
- VPX_CODEC_INCAPABLE,
-
- /*!\brief The given bitstream is not supported.
- *
- * The bitstream was unable to be parsed at the highest level. The decoder
- * is unable to proceed. This error \ref SHOULD be treated as fatal to the
- * stream. */
- VPX_CODEC_UNSUP_BITSTREAM,
-
- /*!\brief Encoded bitstream uses an unsupported feature
- *
- * The decoder does not implement a feature required by the encoder. This
- * return code should only be used for features that prevent future
- * pictures from being properly decoded. This error \ref MAY be treated as
- * fatal to the stream or \ref MAY be treated as fatal to the current GOP.
- */
- VPX_CODEC_UNSUP_FEATURE,
-
- /*!\brief The coded data for this stream is corrupt or incomplete
- *
- * There was a problem decoding the current frame. This return code
- * should only be used for failures that prevent future pictures from
- * being properly decoded. This error \ref MAY be treated as fatal to the
- * stream or \ref MAY be treated as fatal to the current GOP. If decoding
- * is continued for the current GOP, artifacts may be present.
- */
- VPX_CODEC_CORRUPT_FRAME,
-
- /*!\brief An application-supplied parameter is not valid.
- *
- */
- VPX_CODEC_INVALID_PARAM,
-
- /*!\brief An iterator reached the end of list.
- *
- */
- VPX_CODEC_LIST_END
-
- }
- vpx_codec_err_t;
-
-
- /*! \brief Codec capabilities bitfield
- *
- * Each codec advertises the capabilities it supports as part of its
- * ::vpx_codec_iface_t interface structure. Capabilities are extra interfaces
- * or functionality, and are not required to be supported.
- *
- * The available flags are specified by VPX_CODEC_CAP_* defines.
- */
- typedef long vpx_codec_caps_t;
-#define VPX_CODEC_CAP_DECODER 0x1 /**< Is a decoder */
-#define VPX_CODEC_CAP_ENCODER 0x2 /**< Is an encoder */
-#define VPX_CODEC_CAP_XMA 0x4 /**< Supports eXternal Memory Allocation */
+ /*!\brief Algorithm return codes */
+ typedef enum {
+ /*!\brief Operation completed without error */
+ VPX_CODEC_OK,
+ /*!\brief Unspecified error */
+ VPX_CODEC_ERROR,
- /*! \brief Initialization-time Feature Enabling
- *
- * Certain codec features must be known at initialization time, to allow for
- * proper memory allocation.
- *
- * The available flags are specified by VPX_CODEC_USE_* defines.
- */
- typedef long vpx_codec_flags_t;
-#define VPX_CODEC_USE_XMA 0x00000001 /**< Use eXternal Memory Allocation mode */
+ /*!\brief Memory operation failed */
+ VPX_CODEC_MEM_ERROR,
+ /*!\brief ABI version mismatch */
+ VPX_CODEC_ABI_MISMATCH,
- /*!\brief Codec interface structure.
- *
- * Contains function pointers and other data private to the codec
- * implementation. This structure is opaque to the application.
- */
- typedef const struct vpx_codec_iface vpx_codec_iface_t;
-
+ /*!\brief Algorithm does not have required capability */
+ VPX_CODEC_INCAPABLE,
- /*!\brief Codec private data structure.
+ /*!\brief The given bitstream is not supported.
*
- * Contains data private to the codec implementation. This structure is opaque
- * to the application.
- */
- typedef struct vpx_codec_priv vpx_codec_priv_t;
-
+ * The bitstream was unable to be parsed at the highest level. The decoder
+ * is unable to proceed. This error \ref SHOULD be treated as fatal to the
+ * stream. */
+ VPX_CODEC_UNSUP_BITSTREAM,
- /*!\brief Iterator
+ /*!\brief Encoded bitstream uses an unsupported feature
*
- * Opaque storage used for iterating over lists.
+ * The decoder does not implement a feature required by the encoder. This
+ * return code should only be used for features that prevent future
+ * pictures from being properly decoded. This error \ref MAY be treated as
+ * fatal to the stream or \ref MAY be treated as fatal to the current GOP.
*/
- typedef const void *vpx_codec_iter_t;
+ VPX_CODEC_UNSUP_FEATURE,
-
- /*!\brief Codec context structure
+ /*!\brief The coded data for this stream is corrupt or incomplete
*
- * All codecs \ref MUST support this context structure fully. In general,
- * this data should be considered private to the codec algorithm, and
- * not be manipulated or examined by the calling application. Applications
- * may reference the 'name' member to get a printable description of the
- * algorithm.
+ * There was a problem decoding the current frame. This return code
+ * should only be used for failures that prevent future pictures from
+ * being properly decoded. This error \ref MAY be treated as fatal to the
+ * stream or \ref MAY be treated as fatal to the current GOP. If decoding
+ * is continued for the current GOP, artifacts may be present.
*/
- typedef struct vpx_codec_ctx
- {
- const char *name; /**< Printable interface name */
- vpx_codec_iface_t *iface; /**< Interface pointers */
- vpx_codec_err_t err; /**< Last returned error */
- const char *err_detail; /**< Detailed info, if available */
- vpx_codec_flags_t init_flags; /**< Flags passed at init time */
- union
- {
- struct vpx_codec_dec_cfg *dec; /**< Decoder Configuration Pointer */
- struct vpx_codec_enc_cfg *enc; /**< Encoder Configuration Pointer */
- void *raw;
- } config; /**< Configuration pointer aliasing union */
- vpx_codec_priv_t *priv; /**< Algorithm private storage */
- } vpx_codec_ctx_t;
-
+ VPX_CODEC_CORRUPT_FRAME,
- /*
- * Library Version Number Interface
+ /*!\brief An application-supplied parameter is not valid.
*
- * For example, see the following sample return values:
- * vpx_codec_version() (1<<16 | 2<<8 | 3)
- * vpx_codec_version_str() "v1.2.3-rc1-16-gec6a1ba"
- * vpx_codec_version_extra_str() "rc1-16-gec6a1ba"
*/
+ VPX_CODEC_INVALID_PARAM,
- /*!\brief Return the version information (as an integer)
- *
- * Returns a packed encoding of the library version number. This will only include
- * the major.minor.patch component of the version number. Note that this encoded
- * value should be accessed through the macros provided, as the encoding may change
- * in the future.
+ /*!\brief An iterator reached the end of list.
*
*/
- int vpx_codec_version(void);
-#define VPX_VERSION_MAJOR(v) ((v>>16)&0xff) /**< extract major from packed version */
-#define VPX_VERSION_MINOR(v) ((v>>8)&0xff) /**< extract minor from packed version */
-#define VPX_VERSION_PATCH(v) ((v>>0)&0xff) /**< extract patch from packed version */
+ VPX_CODEC_LIST_END
- /*!\brief Return the version major number */
-#define vpx_codec_version_major() ((vpx_codec_version()>>16)&0xff)
-
- /*!\brief Return the version minor number */
-#define vpx_codec_version_minor() ((vpx_codec_version()>>8)&0xff)
-
- /*!\brief Return the version patch number */
-#define vpx_codec_version_patch() ((vpx_codec_version()>>0)&0xff)
-
-
- /*!\brief Return the version information (as a string)
- *
- * Returns a printable string containing the full library version number. This may
- * contain additional text following the three digit version number, as to indicate
- * release candidates, prerelease versions, etc.
- *
- */
- const char *vpx_codec_version_str(void);
-
-
- /*!\brief Return the version information (as a string)
- *
- * Returns a printable "extra string". This is the component of the string returned
- * by vpx_codec_version_str() following the three digit version number.
- *
- */
- const char *vpx_codec_version_extra_str(void);
-
-
- /*!\brief Return the build configuration
- *
- * Returns a printable string containing an encoded version of the build
- * configuration. This may be useful to vpx support.
- *
- */
- const char *vpx_codec_build_config(void);
+ }
+ vpx_codec_err_t;
- /*!\brief Return the name for a given interface
- *
- * Returns a human readable string for name of the given codec interface.
- *
- * \param[in] iface Interface pointer
- *
- */
- const char *vpx_codec_iface_name(vpx_codec_iface_t *iface);
-
-
- /*!\brief Convert error number to printable string
- *
- * Returns a human readable string for the last error returned by the
- * algorithm. The returned error will be one line and will not contain
- * any newline characters.
- *
- *
- * \param[in] err Error number.
- *
- */
- const char *vpx_codec_err_to_string(vpx_codec_err_t err);
-
-
- /*!\brief Retrieve error synopsis for codec context
- *
- * Returns a human readable string for the last error returned by the
- * algorithm. The returned error will be one line and will not contain
- * any newline characters.
- *
- *
- * \param[in] ctx Pointer to this instance's context.
- *
- */
- const char *vpx_codec_error(vpx_codec_ctx_t *ctx);
+ /*! \brief Codec capabilities bitfield
+ *
+ * Each codec advertises the capabilities it supports as part of its
+ * ::vpx_codec_iface_t interface structure. Capabilities are extra interfaces
+ * or functionality, and are not required to be supported.
+ *
+ * The available flags are specified by VPX_CODEC_CAP_* defines.
+ */
+ typedef long vpx_codec_caps_t;
+#define VPX_CODEC_CAP_DECODER 0x1 /**< Is a decoder */
+#define VPX_CODEC_CAP_ENCODER 0x2 /**< Is an encoder */
+#define VPX_CODEC_CAP_XMA 0x4 /**< Supports eXternal Memory Allocation */
- /*!\brief Retrieve detailed error information for codec context
- *
- * Returns a human readable string providing detailed information about
- * the last error.
- *
- * \param[in] ctx Pointer to this instance's context.
- *
- * \retval NULL
- * No detailed information is available.
- */
- const char *vpx_codec_error_detail(vpx_codec_ctx_t *ctx);
+ /*! \brief Initialization-time Feature Enabling
+ *
+ * Certain codec features must be known at initialization time, to allow for
+ * proper memory allocation.
+ *
+ * The available flags are specified by VPX_CODEC_USE_* defines.
+ */
+ typedef long vpx_codec_flags_t;
+#define VPX_CODEC_USE_XMA 0x00000001 /**< Use eXternal Memory Allocation mode */
- /* REQUIRED FUNCTIONS
- *
- * The following functions are required to be implemented for all codecs.
- * They represent the base case functionality expected of all codecs.
- */
+ /*!\brief Codec interface structure.
+ *
+ * Contains function pointers and other data private to the codec
+ * implementation. This structure is opaque to the application.
+ */
+ typedef const struct vpx_codec_iface vpx_codec_iface_t;
+
+
+ /*!\brief Codec private data structure.
+ *
+ * Contains data private to the codec implementation. This structure is opaque
+ * to the application.
+ */
+ typedef struct vpx_codec_priv vpx_codec_priv_t;
+
+
+ /*!\brief Iterator
+ *
+ * Opaque storage used for iterating over lists.
+ */
+ typedef const void *vpx_codec_iter_t;
+
+
+ /*!\brief Codec context structure
+ *
+ * All codecs \ref MUST support this context structure fully. In general,
+ * this data should be considered private to the codec algorithm, and
+ * not be manipulated or examined by the calling application. Applications
+ * may reference the 'name' member to get a printable description of the
+ * algorithm.
+ */
+ typedef struct vpx_codec_ctx {
+ const char *name; /**< Printable interface name */
+ vpx_codec_iface_t *iface; /**< Interface pointers */
+ vpx_codec_err_t err; /**< Last returned error */
+ const char *err_detail; /**< Detailed info, if available */
+ vpx_codec_flags_t init_flags; /**< Flags passed at init time */
+ union {
+ struct vpx_codec_dec_cfg *dec; /**< Decoder Configuration Pointer */
+ struct vpx_codec_enc_cfg *enc; /**< Encoder Configuration Pointer */
+ void *raw;
+ } config; /**< Configuration pointer aliasing union */
+ vpx_codec_priv_t *priv; /**< Algorithm private storage */
+ } vpx_codec_ctx_t;
+
+
+ /*
+ * Library Version Number Interface
+ *
+ * For example, see the following sample return values:
+ * vpx_codec_version() (1<<16 | 2<<8 | 3)
+ * vpx_codec_version_str() "v1.2.3-rc1-16-gec6a1ba"
+ * vpx_codec_version_extra_str() "rc1-16-gec6a1ba"
+ */
+
+ /*!\brief Return the version information (as an integer)
+ *
+ * Returns a packed encoding of the library version number. This will only include
+ * the major.minor.patch component of the version number. Note that this encoded
+ * value should be accessed through the macros provided, as the encoding may change
+ * in the future.
+ *
+ */
+ int vpx_codec_version(void);
+#define VPX_VERSION_MAJOR(v) ((v>>16)&0xff) /**< extract major from packed version */
+#define VPX_VERSION_MINOR(v) ((v>>8)&0xff) /**< extract minor from packed version */
+#define VPX_VERSION_PATCH(v) ((v>>0)&0xff) /**< extract patch from packed version */
- /*!\brief Destroy a codec instance
- *
- * Destroys a codec context, freeing any associated memory buffers.
- *
- * \param[in] ctx Pointer to this instance's context
- *
- * \retval #VPX_CODEC_OK
- * The codec algorithm initialized.
- * \retval #VPX_CODEC_MEM_ERROR
- * Memory allocation failed.
- */
- vpx_codec_err_t vpx_codec_destroy(vpx_codec_ctx_t *ctx);
+ /*!\brief Return the version major number */
+#define vpx_codec_version_major() ((vpx_codec_version()>>16)&0xff)
+ /*!\brief Return the version minor number */
+#define vpx_codec_version_minor() ((vpx_codec_version()>>8)&0xff)
- /*!\brief Get the capabilities of an algorithm.
- *
- * Retrieves the capabilities bitfield from the algorithm's interface.
- *
- * \param[in] iface Pointer to the algorithm interface
- *
- */
- vpx_codec_caps_t vpx_codec_get_caps(vpx_codec_iface_t *iface);
+ /*!\brief Return the version patch number */
+#define vpx_codec_version_patch() ((vpx_codec_version()>>0)&0xff)
- /*!\brief Control algorithm
- *
- * This function is used to exchange algorithm specific data with the codec
- * instance. This can be used to implement features specific to a particular
- * algorithm.
- *
- * This wrapper function dispatches the request to the helper function
- * associated with the given ctrl_id. It tries to call this function
- * transparently, but will return #VPX_CODEC_ERROR if the request could not
- * be dispatched.
- *
- * Note that this function should not be used directly. Call the
- * #vpx_codec_control wrapper macro instead.
- *
- * \param[in] ctx Pointer to this instance's context
- * \param[in] ctrl_id Algorithm specific control identifier
- *
- * \retval #VPX_CODEC_OK
- * The control request was processed.
- * \retval #VPX_CODEC_ERROR
- * The control request was not processed.
- * \retval #VPX_CODEC_INVALID_PARAM
- * The data was not valid.
- */
- vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t *ctx,
- int ctrl_id,
- ...);
+ /*!\brief Return the version information (as a string)
+ *
+ * Returns a printable string containing the full library version number. This may
+ * contain additional text following the three digit version number, as to indicate
+ * release candidates, prerelease versions, etc.
+ *
+ */
+ const char *vpx_codec_version_str(void);
+
+
+ /*!\brief Return the version information (as a string)
+ *
+ * Returns a printable "extra string". This is the component of the string returned
+ * by vpx_codec_version_str() following the three digit version number.
+ *
+ */
+ const char *vpx_codec_version_extra_str(void);
+
+
+ /*!\brief Return the build configuration
+ *
+ * Returns a printable string containing an encoded version of the build
+ * configuration. This may be useful to vpx support.
+ *
+ */
+ const char *vpx_codec_build_config(void);
+
+
+ /*!\brief Return the name for a given interface
+ *
+ * Returns a human readable string for name of the given codec interface.
+ *
+ * \param[in] iface Interface pointer
+ *
+ */
+ const char *vpx_codec_iface_name(vpx_codec_iface_t *iface);
+
+
+ /*!\brief Convert error number to printable string
+ *
+ * Returns a human readable string for the last error returned by the
+ * algorithm. The returned error will be one line and will not contain
+ * any newline characters.
+ *
+ *
+ * \param[in] err Error number.
+ *
+ */
+ const char *vpx_codec_err_to_string(vpx_codec_err_t err);
+
+
+ /*!\brief Retrieve error synopsis for codec context
+ *
+ * Returns a human readable string for the last error returned by the
+ * algorithm. The returned error will be one line and will not contain
+ * any newline characters.
+ *
+ *
+ * \param[in] ctx Pointer to this instance's context.
+ *
+ */
+ const char *vpx_codec_error(vpx_codec_ctx_t *ctx);
+
+
+ /*!\brief Retrieve detailed error information for codec context
+ *
+ * Returns a human readable string providing detailed information about
+ * the last error.
+ *
+ * \param[in] ctx Pointer to this instance's context.
+ *
+ * \retval NULL
+ * No detailed information is available.
+ */
+ const char *vpx_codec_error_detail(vpx_codec_ctx_t *ctx);
+
+
+ /* REQUIRED FUNCTIONS
+ *
+ * The following functions are required to be implemented for all codecs.
+ * They represent the base case functionality expected of all codecs.
+ */
+
+ /*!\brief Destroy a codec instance
+ *
+ * Destroys a codec context, freeing any associated memory buffers.
+ *
+ * \param[in] ctx Pointer to this instance's context
+ *
+ * \retval #VPX_CODEC_OK
+ * The codec algorithm initialized.
+ * \retval #VPX_CODEC_MEM_ERROR
+ * Memory allocation failed.
+ */
+ vpx_codec_err_t vpx_codec_destroy(vpx_codec_ctx_t *ctx);
+
+
+ /*!\brief Get the capabilities of an algorithm.
+ *
+ * Retrieves the capabilities bitfield from the algorithm's interface.
+ *
+ * \param[in] iface Pointer to the algorithm interface
+ *
+ */
+ vpx_codec_caps_t vpx_codec_get_caps(vpx_codec_iface_t *iface);
+
+
+ /*!\brief Control algorithm
+ *
+ * This function is used to exchange algorithm specific data with the codec
+ * instance. This can be used to implement features specific to a particular
+ * algorithm.
+ *
+ * This wrapper function dispatches the request to the helper function
+ * associated with the given ctrl_id. It tries to call this function
+ * transparently, but will return #VPX_CODEC_ERROR if the request could not
+ * be dispatched.
+ *
+ * Note that this function should not be used directly. Call the
+ * #vpx_codec_control wrapper macro instead.
+ *
+ * \param[in] ctx Pointer to this instance's context
+ * \param[in] ctrl_id Algorithm specific control identifier
+ *
+ * \retval #VPX_CODEC_OK
+ * The control request was processed.
+ * \retval #VPX_CODEC_ERROR
+ * The control request was not processed.
+ * \retval #VPX_CODEC_INVALID_PARAM
+ * The data was not valid.
+ */
+ vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t *ctx,
+ int ctrl_id,
+ ...);
#if defined(VPX_DISABLE_CTRL_TYPECHECKS) && VPX_DISABLE_CTRL_TYPECHECKS
# define vpx_codec_control(ctx,id,data) vpx_codec_control_(ctx,id,data)
# define VPX_CTRL_USE_TYPE(id, typ)
@@ -380,172 +385,171 @@ extern "C" {
# define VPX_CTRL_VOID(id, typ)
#else
- /*!\brief vpx_codec_control wrapper macro
- *
- * This macro allows for type safe conversions across the variadic parameter
- * to vpx_codec_control_().
- *
- * \internal
- * It works by dispatching the call to the control function through a wrapper
- * function named with the id parameter.
- */
+ /*!\brief vpx_codec_control wrapper macro
+ *
+ * This macro allows for type safe conversions across the variadic parameter
+ * to vpx_codec_control_().
+ *
+ * \internal
+ * It works by dispatching the call to the control function through a wrapper
+ * function named with the id parameter.
+ */
# define vpx_codec_control(ctx,id,data) vpx_codec_control_##id(ctx,id,data)\
- /**<\hideinitializer*/
-
-
- /*!\brief vpx_codec_control type definition macro
- *
- * This macro allows for type safe conversions across the variadic parameter
- * to vpx_codec_control_(). It defines the type of the argument for a given
- * control identifier.
- *
- * \internal
- * It defines a static function with
- * the correctly typed arguments as a wrapper to the type-unsafe internal
- * function.
- */
+ /**<\hideinitializer*/
+
+
+ /*!\brief vpx_codec_control type definition macro
+ *
+ * This macro allows for type safe conversions across the variadic parameter
+ * to vpx_codec_control_(). It defines the type of the argument for a given
+ * control identifier.
+ *
+ * \internal
+ * It defines a static function with
+ * the correctly typed arguments as a wrapper to the type-unsafe internal
+ * function.
+ */
# define VPX_CTRL_USE_TYPE(id, typ) \
- static vpx_codec_err_t \
- vpx_codec_control_##id(vpx_codec_ctx_t*, int, typ) UNUSED;\
- \
- static vpx_codec_err_t \
- vpx_codec_control_##id(vpx_codec_ctx_t *ctx, int ctrl_id, typ data) {\
- return vpx_codec_control_(ctx, ctrl_id, data);\
- } /**<\hideinitializer*/
-
-
- /*!\brief vpx_codec_control deprecated type definition macro
- *
- * Like #VPX_CTRL_USE_TYPE, but indicates that the specified control is
- * deprecated and should not be used. Consult the documentation for your
- * codec for more information.
- *
- * \internal
- * It defines a static function with the correctly typed arguments as a
- * wrapper to the type-unsafe internal function.
- */
+ static vpx_codec_err_t \
+ vpx_codec_control_##id(vpx_codec_ctx_t*, int, typ) UNUSED;\
+ \
+ static vpx_codec_err_t \
+ vpx_codec_control_##id(vpx_codec_ctx_t *ctx, int ctrl_id, typ data) {\
+ return vpx_codec_control_(ctx, ctrl_id, data);\
+ } /**<\hideinitializer*/
+
+
+ /*!\brief vpx_codec_control deprecated type definition macro
+ *
+ * Like #VPX_CTRL_USE_TYPE, but indicates that the specified control is
+ * deprecated and should not be used. Consult the documentation for your
+ * codec for more information.
+ *
+ * \internal
+ * It defines a static function with the correctly typed arguments as a
+ * wrapper to the type-unsafe internal function.
+ */
# define VPX_CTRL_USE_TYPE_DEPRECATED(id, typ) \
- DECLSPEC_DEPRECATED static vpx_codec_err_t \
- vpx_codec_control_##id(vpx_codec_ctx_t*, int, typ) DEPRECATED UNUSED;\
- \
- DECLSPEC_DEPRECATED static vpx_codec_err_t \
- vpx_codec_control_##id(vpx_codec_ctx_t *ctx, int ctrl_id, typ data) {\
- return vpx_codec_control_(ctx, ctrl_id, data);\
- } /**<\hideinitializer*/
-
-
- /*!\brief vpx_codec_control void type definition macro
- *
- * This macro allows for type safe conversions across the variadic parameter
- * to vpx_codec_control_(). It indicates that a given control identifier takes
- * no argument.
- *
- * \internal
- * It defines a static function without a data argument as a wrapper to the
- * type-unsafe internal function.
- */
+ DECLSPEC_DEPRECATED static vpx_codec_err_t \
+ vpx_codec_control_##id(vpx_codec_ctx_t*, int, typ) DEPRECATED UNUSED;\
+ \
+ DECLSPEC_DEPRECATED static vpx_codec_err_t \
+ vpx_codec_control_##id(vpx_codec_ctx_t *ctx, int ctrl_id, typ data) {\
+ return vpx_codec_control_(ctx, ctrl_id, data);\
+ } /**<\hideinitializer*/
+
+
+ /*!\brief vpx_codec_control void type definition macro
+ *
+ * This macro allows for type safe conversions across the variadic parameter
+ * to vpx_codec_control_(). It indicates that a given control identifier takes
+ * no argument.
+ *
+ * \internal
+ * It defines a static function without a data argument as a wrapper to the
+ * type-unsafe internal function.
+ */
# define VPX_CTRL_VOID(id) \
- static vpx_codec_err_t \
- vpx_codec_control_##id(vpx_codec_ctx_t*, int) UNUSED;\
- \
- static vpx_codec_err_t \
- vpx_codec_control_##id(vpx_codec_ctx_t *ctx, int ctrl_id) {\
- return vpx_codec_control_(ctx, ctrl_id);\
- } /**<\hideinitializer*/
+ static vpx_codec_err_t \
+ vpx_codec_control_##id(vpx_codec_ctx_t*, int) UNUSED;\
+ \
+ static vpx_codec_err_t \
+ vpx_codec_control_##id(vpx_codec_ctx_t *ctx, int ctrl_id) {\
+ return vpx_codec_control_(ctx, ctrl_id);\
+ } /**<\hideinitializer*/
#endif
- /*!\defgroup cap_xma External Memory Allocation Functions
- *
- * The following functions are required to be implemented for all codecs
- * that advertise the VPX_CODEC_CAP_XMA capability. Calling these functions
- * for codecs that don't advertise this capability will result in an error
- * code being returned, usually VPX_CODEC_INCAPABLE
- * @{
- */
+ /*!\defgroup cap_xma External Memory Allocation Functions
+ *
+ * The following functions are required to be implemented for all codecs
+ * that advertise the VPX_CODEC_CAP_XMA capability. Calling these functions
+ * for codecs that don't advertise this capability will result in an error
+ * code being returned, usually VPX_CODEC_INCAPABLE
+ * @{
+ */
- /*!\brief Memory Map Entry
- *
- * This structure is used to contain the properties of a memory segment. It
- * is populated by the codec in the request phase, and by the calling
- * application once the requested allocation has been performed.
+ /*!\brief Memory Map Entry
+ *
+ * This structure is used to contain the properties of a memory segment. It
+ * is populated by the codec in the request phase, and by the calling
+ * application once the requested allocation has been performed.
+ */
+ typedef struct vpx_codec_mmap {
+ /*
+ * The following members are set by the codec when requesting a segment
*/
- typedef struct vpx_codec_mmap
- {
- /*
- * The following members are set by the codec when requesting a segment
- */
- unsigned int id; /**< identifier for the segment's contents */
- unsigned long sz; /**< size of the segment, in bytes */
- unsigned int align; /**< required alignment of the segment, in bytes */
- unsigned int flags; /**< bitfield containing segment properties */
+ unsigned int id; /**< identifier for the segment's contents */
+ unsigned long sz; /**< size of the segment, in bytes */
+ unsigned int align; /**< required alignment of the segment, in bytes */
+ unsigned int flags; /**< bitfield containing segment properties */
#define VPX_CODEC_MEM_ZERO 0x1 /**< Segment must be zeroed by allocation */
#define VPX_CODEC_MEM_WRONLY 0x2 /**< Segment need not be readable */
#define VPX_CODEC_MEM_FAST 0x4 /**< Place in fast memory, if available */
- /* The following members are to be filled in by the allocation function */
- void *base; /**< pointer to the allocated segment */
- void (*dtor)(struct vpx_codec_mmap *map); /**< destructor to call */
- void *priv; /**< allocator private storage */
- } vpx_codec_mmap_t; /**< alias for struct vpx_codec_mmap */
-
-
- /*!\brief Iterate over the list of segments to allocate.
- *
- * Iterates over a list of the segments to allocate. The iterator storage
- * should be initialized to NULL to start the iteration. Iteration is complete
- * when this function returns VPX_CODEC_LIST_END. The amount of memory needed to
- * allocate is dependent upon the size of the encoded stream. In cases where the
- * stream is not available at allocation time, a fixed size must be requested.
- * The codec will not be able to operate on streams larger than the size used at
- * allocation time.
- *
- * \param[in] ctx Pointer to this instance's context.
- * \param[out] mmap Pointer to the memory map entry to populate.
- * \param[in,out] iter Iterator storage, initialized to NULL
- *
- * \retval #VPX_CODEC_OK
- * The memory map entry was populated.
- * \retval #VPX_CODEC_ERROR
- * Codec does not support XMA mode.
- * \retval #VPX_CODEC_MEM_ERROR
- * Unable to determine segment size from stream info.
- */
- vpx_codec_err_t vpx_codec_get_mem_map(vpx_codec_ctx_t *ctx,
- vpx_codec_mmap_t *mmap,
- vpx_codec_iter_t *iter);
-
-
- /*!\brief Identify allocated segments to codec instance
- *
- * Stores a list of allocated segments in the codec. Segments \ref MUST be
- * passed in the order they are read from vpx_codec_get_mem_map(), but may be
- * passed in groups of any size. Segments \ref MUST be set only once. The
- * allocation function \ref MUST ensure that the vpx_codec_mmap_t::base member
- * is non-NULL. If the segment requires cleanup handling (e.g., calling free()
- * or close()) then the vpx_codec_mmap_t::dtor member \ref MUST be populated.
- *
- * \param[in] ctx Pointer to this instance's context.
- * \param[in] mmaps Pointer to the first memory map entry in the list.
- * \param[in] num_maps Number of entries being set at this time
- *
- * \retval #VPX_CODEC_OK
- * The segment was stored in the codec context.
- * \retval #VPX_CODEC_INCAPABLE
- * Codec does not support XMA mode.
- * \retval #VPX_CODEC_MEM_ERROR
- * Segment base address was not set, or segment was already stored.
-
- */
- vpx_codec_err_t vpx_codec_set_mem_map(vpx_codec_ctx_t *ctx,
- vpx_codec_mmap_t *mmaps,
- unsigned int num_maps);
-
- /*!@} - end defgroup cap_xma*/
- /*!@} - end defgroup codec*/
+ /* The following members are to be filled in by the allocation function */
+ void *base; /**< pointer to the allocated segment */
+ void (*dtor)(struct vpx_codec_mmap *map); /**< destructor to call */
+ void *priv; /**< allocator private storage */
+ } vpx_codec_mmap_t; /**< alias for struct vpx_codec_mmap */
+
+
+ /*!\brief Iterate over the list of segments to allocate.
+ *
+ * Iterates over a list of the segments to allocate. The iterator storage
+ * should be initialized to NULL to start the iteration. Iteration is complete
+ * when this function returns VPX_CODEC_LIST_END. The amount of memory needed to
+ * allocate is dependent upon the size of the encoded stream. In cases where the
+ * stream is not available at allocation time, a fixed size must be requested.
+ * The codec will not be able to operate on streams larger than the size used at
+ * allocation time.
+ *
+ * \param[in] ctx Pointer to this instance's context.
+ * \param[out] mmap Pointer to the memory map entry to populate.
+ * \param[in,out] iter Iterator storage, initialized to NULL
+ *
+ * \retval #VPX_CODEC_OK
+ * The memory map entry was populated.
+ * \retval #VPX_CODEC_ERROR
+ * Codec does not support XMA mode.
+ * \retval #VPX_CODEC_MEM_ERROR
+ * Unable to determine segment size from stream info.
+ */
+ vpx_codec_err_t vpx_codec_get_mem_map(vpx_codec_ctx_t *ctx,
+ vpx_codec_mmap_t *mmap,
+ vpx_codec_iter_t *iter);
+
+
+ /*!\brief Identify allocated segments to codec instance
+ *
+ * Stores a list of allocated segments in the codec. Segments \ref MUST be
+ * passed in the order they are read from vpx_codec_get_mem_map(), but may be
+ * passed in groups of any size. Segments \ref MUST be set only once. The
+ * allocation function \ref MUST ensure that the vpx_codec_mmap_t::base member
+ * is non-NULL. If the segment requires cleanup handling (e.g., calling free()
+ * or close()) then the vpx_codec_mmap_t::dtor member \ref MUST be populated.
+ *
+ * \param[in] ctx Pointer to this instance's context.
+ * \param[in] mmaps Pointer to the first memory map entry in the list.
+ * \param[in] num_maps Number of entries being set at this time
+ *
+ * \retval #VPX_CODEC_OK
+ * The segment was stored in the codec context.
+ * \retval #VPX_CODEC_INCAPABLE
+ * Codec does not support XMA mode.
+ * \retval #VPX_CODEC_MEM_ERROR
+ * Segment base address was not set, or segment was already stored.
+
+ */
+ vpx_codec_err_t vpx_codec_set_mem_map(vpx_codec_ctx_t *ctx,
+ vpx_codec_mmap_t *mmaps,
+ unsigned int num_maps);
+
+ /*!@} - end defgroup cap_xma*/
+ /*!@} - end defgroup codec*/
#endif
diff --git a/libvpx/vpx/vpx_decoder.h b/libvpx/vpx/vpx_decoder.h
index 1ccf1c5..e7701e5 100644
--- a/libvpx/vpx/vpx_decoder.h
+++ b/libvpx/vpx/vpx_decoder.h
@@ -32,299 +32,302 @@ extern "C" {
#define VPX_DECODER_H
#include "vpx_codec.h"
- /*!\brief Current ABI version number
- *
- * \internal
- * If this file is altered in any way that changes the ABI, this value
- * must be bumped. Examples include, but are not limited to, changing
- * types, removing or reassigning enums, adding/removing/rearranging
- * fields to structures
- */
+ /*!\brief Current ABI version number
+ *
+ * \internal
+ * If this file is altered in any way that changes the ABI, this value
+ * must be bumped. Examples include, but are not limited to, changing
+ * types, removing or reassigning enums, adding/removing/rearranging
+ * fields to structures
+ */
#define VPX_DECODER_ABI_VERSION (2 + VPX_CODEC_ABI_VERSION) /**<\hideinitializer*/
- /*! \brief Decoder capabilities bitfield
- *
- * Each decoder advertises the capabilities it supports as part of its
- * ::vpx_codec_iface_t interface structure. Capabilities are extra interfaces
- * or functionality, and are not required to be supported by a decoder.
- *
- * The available flags are specified by VPX_CODEC_CAP_* defines.
- */
+ /*! \brief Decoder capabilities bitfield
+ *
+ * Each decoder advertises the capabilities it supports as part of its
+ * ::vpx_codec_iface_t interface structure. Capabilities are extra interfaces
+ * or functionality, and are not required to be supported by a decoder.
+ *
+ * The available flags are specified by VPX_CODEC_CAP_* defines.
+ */
#define VPX_CODEC_CAP_PUT_SLICE 0x10000 /**< Will issue put_slice callbacks */
#define VPX_CODEC_CAP_PUT_FRAME 0x20000 /**< Will issue put_frame callbacks */
#define VPX_CODEC_CAP_POSTPROC 0x40000 /**< Can postprocess decoded frame */
#define VPX_CODEC_CAP_ERROR_CONCEALMENT 0x80000 /**< Can conceal errors due to
- packet loss */
+ packet loss */
#define VPX_CODEC_CAP_INPUT_FRAGMENTS 0x100000 /**< Can receive encoded frames
- one fragment at a time */
-
- /*! \brief Initialization-time Feature Enabling
- *
- * Certain codec features must be known at initialization time, to allow for
- * proper memory allocation.
- *
- * The available flags are specified by VPX_CODEC_USE_* defines.
- */
+ one fragment at a time */
+
+ /*! \brief Initialization-time Feature Enabling
+ *
+ * Certain codec features must be known at initialization time, to allow for
+ * proper memory allocation.
+ *
+ * The available flags are specified by VPX_CODEC_USE_* defines.
+ */
+#define VPX_CODEC_CAP_FRAME_THREADING 0x200000 /**< Can support frame-based
+ multi-threading */
+
#define VPX_CODEC_USE_POSTPROC 0x10000 /**< Postprocess decoded frame */
#define VPX_CODEC_USE_ERROR_CONCEALMENT 0x20000 /**< Conceal errors in decoded
- frames */
+ frames */
#define VPX_CODEC_USE_INPUT_FRAGMENTS 0x40000 /**< The input frame should be
- passed to the decoder one
- fragment at a time */
-
- /*!\brief Stream properties
- *
- * This structure is used to query or set properties of the decoded
- * stream. Algorithms may extend this structure with data specific
- * to their bitstream by setting the sz member appropriately.
- */
- typedef struct vpx_codec_stream_info
- {
- unsigned int sz; /**< Size of this structure */
- unsigned int w; /**< Width (or 0 for unknown/default) */
- unsigned int h; /**< Height (or 0 for unknown/default) */
- unsigned int is_kf; /**< Current frame is a keyframe */
- } vpx_codec_stream_info_t;
-
- /* REQUIRED FUNCTIONS
- *
- * The following functions are required to be implemented for all decoders.
- * They represent the base case functionality expected of all decoders.
- */
-
-
- /*!\brief Initialization Configurations
- *
- * This structure is used to pass init time configuration options to the
- * decoder.
- */
- typedef struct vpx_codec_dec_cfg
- {
- unsigned int threads; /**< Maximum number of threads to use, default 1 */
- unsigned int w; /**< Width */
- unsigned int h; /**< Height */
- } vpx_codec_dec_cfg_t; /**< alias for struct vpx_codec_dec_cfg */
-
-
- /*!\brief Initialize a decoder instance
- *
- * Initializes a decoder context using the given interface. Applications
- * should call the vpx_codec_dec_init convenience macro instead of this
- * function directly, to ensure that the ABI version number parameter
- * is properly initialized.
- *
- * If the library was configured with --disable-multithread, this call
- * is not thread safe and should be guarded with a lock if being used
- * in a multithreaded context.
- *
- * In XMA mode (activated by setting VPX_CODEC_USE_XMA in the flags
- * parameter), the storage pointed to by the cfg parameter must be
- * kept readable and stable until all memory maps have been set.
- *
- * \param[in] ctx Pointer to this instance's context.
- * \param[in] iface Pointer to the algorithm interface to use.
- * \param[in] cfg Configuration to use, if known. May be NULL.
- * \param[in] flags Bitfield of VPX_CODEC_USE_* flags
- * \param[in] ver ABI version number. Must be set to
- * VPX_DECODER_ABI_VERSION
- * \retval #VPX_CODEC_OK
- * The decoder algorithm initialized.
- * \retval #VPX_CODEC_MEM_ERROR
- * Memory allocation failed.
- */
- vpx_codec_err_t vpx_codec_dec_init_ver(vpx_codec_ctx_t *ctx,
- vpx_codec_iface_t *iface,
- vpx_codec_dec_cfg_t *cfg,
- vpx_codec_flags_t flags,
- int ver);
-
- /*!\brief Convenience macro for vpx_codec_dec_init_ver()
- *
- * Ensures the ABI version parameter is properly set.
- */
+ passed to the decoder one
+ fragment at a time */
+#define VPX_CODEC_USE_FRAME_THREADING 0x80000 /**< Enable frame-based
+ multi-threading */
+
+ /*!\brief Stream properties
+ *
+ * This structure is used to query or set properties of the decoded
+ * stream. Algorithms may extend this structure with data specific
+ * to their bitstream by setting the sz member appropriately.
+ */
+ typedef struct vpx_codec_stream_info {
+ unsigned int sz; /**< Size of this structure */
+ unsigned int w; /**< Width (or 0 for unknown/default) */
+ unsigned int h; /**< Height (or 0 for unknown/default) */
+ unsigned int is_kf; /**< Current frame is a keyframe */
+ } vpx_codec_stream_info_t;
+
+ /* REQUIRED FUNCTIONS
+ *
+ * The following functions are required to be implemented for all decoders.
+ * They represent the base case functionality expected of all decoders.
+ */
+
+
+ /*!\brief Initialization Configurations
+ *
+ * This structure is used to pass init time configuration options to the
+ * decoder.
+ */
+ typedef struct vpx_codec_dec_cfg {
+ unsigned int threads; /**< Maximum number of threads to use, default 1 */
+ unsigned int w; /**< Width */
+ unsigned int h; /**< Height */
+ } vpx_codec_dec_cfg_t; /**< alias for struct vpx_codec_dec_cfg */
+
+
+ /*!\brief Initialize a decoder instance
+ *
+ * Initializes a decoder context using the given interface. Applications
+ * should call the vpx_codec_dec_init convenience macro instead of this
+ * function directly, to ensure that the ABI version number parameter
+ * is properly initialized.
+ *
+ * If the library was configured with --disable-multithread, this call
+ * is not thread safe and should be guarded with a lock if being used
+ * in a multithreaded context.
+ *
+ * In XMA mode (activated by setting VPX_CODEC_USE_XMA in the flags
+ * parameter), the storage pointed to by the cfg parameter must be
+ * kept readable and stable until all memory maps have been set.
+ *
+ * \param[in] ctx Pointer to this instance's context.
+ * \param[in] iface Pointer to the algorithm interface to use.
+ * \param[in] cfg Configuration to use, if known. May be NULL.
+ * \param[in] flags Bitfield of VPX_CODEC_USE_* flags
+ * \param[in] ver ABI version number. Must be set to
+ * VPX_DECODER_ABI_VERSION
+ * \retval #VPX_CODEC_OK
+ * The decoder algorithm initialized.
+ * \retval #VPX_CODEC_MEM_ERROR
+ * Memory allocation failed.
+ */
+ vpx_codec_err_t vpx_codec_dec_init_ver(vpx_codec_ctx_t *ctx,
+ vpx_codec_iface_t *iface,
+ vpx_codec_dec_cfg_t *cfg,
+ vpx_codec_flags_t flags,
+ int ver);
+
+ /*!\brief Convenience macro for vpx_codec_dec_init_ver()
+ *
+ * Ensures the ABI version parameter is properly set.
+ */
#define vpx_codec_dec_init(ctx, iface, cfg, flags) \
- vpx_codec_dec_init_ver(ctx, iface, cfg, flags, VPX_DECODER_ABI_VERSION)
-
-
- /*!\brief Parse stream info from a buffer
- *
- * Performs high level parsing of the bitstream. Construction of a decoder
- * context is not necessary. Can be used to determine if the bitstream is
- * of the proper format, and to extract information from the stream.
- *
- * \param[in] iface Pointer to the algorithm interface
- * \param[in] data Pointer to a block of data to parse
- * \param[in] data_sz Size of the data buffer
- * \param[in,out] si Pointer to stream info to update. The size member
- * \ref MUST be properly initialized, but \ref MAY be
- * clobbered by the algorithm. This parameter \ref MAY
- * be NULL.
- *
- * \retval #VPX_CODEC_OK
- * Bitstream is parsable and stream information updated
- */
- vpx_codec_err_t vpx_codec_peek_stream_info(vpx_codec_iface_t *iface,
- const uint8_t *data,
- unsigned int data_sz,
- vpx_codec_stream_info_t *si);
-
-
- /*!\brief Return information about the current stream.
- *
- * Returns information about the stream that has been parsed during decoding.
- *
- * \param[in] ctx Pointer to this instance's context
- * \param[in,out] si Pointer to stream info to update. The size member
- * \ref MUST be properly initialized, but \ref MAY be
- * clobbered by the algorithm. This parameter \ref MAY
- * be NULL.
- *
- * \retval #VPX_CODEC_OK
- * Bitstream is parsable and stream information updated
- */
- vpx_codec_err_t vpx_codec_get_stream_info(vpx_codec_ctx_t *ctx,
- vpx_codec_stream_info_t *si);
-
-
- /*!\brief Decode data
- *
- * Processes a buffer of coded data. If the processing results in a new
- * decoded frame becoming available, PUT_SLICE and PUT_FRAME events may be
- * generated, as appropriate. Encoded data \ref MUST be passed in DTS (decode
- * time stamp) order. Frames produced will always be in PTS (presentation
- * time stamp) order.
- * If the decoder is configured with VPX_CODEC_USE_INPUT_FRAGMENTS enabled,
- * data and data_sz can contain a fragment of the encoded frame. Fragment
- * \#n must contain at least partition \#n, but can also contain subsequent
- * partitions (\#n+1 - \#n+i), and if so, fragments \#n+1, .., \#n+i must
- * be empty. When no more data is available, this function should be called
- * with NULL as data and 0 as data_sz. The memory passed to this function
- * must be available until the frame has been decoded.
- *
- * \param[in] ctx Pointer to this instance's context
- * \param[in] data Pointer to this block of new coded data. If
- * NULL, a VPX_CODEC_CB_PUT_FRAME event is posted
- * for the previously decoded frame.
- * \param[in] data_sz Size of the coded data, in bytes.
- * \param[in] user_priv Application specific data to associate with
- * this frame.
- * \param[in] deadline Soft deadline the decoder should attempt to meet,
- * in us. Set to zero for unlimited.
- *
- * \return Returns #VPX_CODEC_OK if the coded data was processed completely
- * and future pictures can be decoded without error. Otherwise,
- * see the descriptions of the other error codes in ::vpx_codec_err_t
- * for recoverability capabilities.
- */
- vpx_codec_err_t vpx_codec_decode(vpx_codec_ctx_t *ctx,
- const uint8_t *data,
- unsigned int data_sz,
- void *user_priv,
- long deadline);
-
-
- /*!\brief Decoded frames iterator
- *
- * Iterates over a list of the frames available for display. The iterator
- * storage should be initialized to NULL to start the iteration. Iteration is
- * complete when this function returns NULL.
- *
- * The list of available frames becomes valid upon completion of the
- * vpx_codec_decode call, and remains valid until the next call to vpx_codec_decode.
- *
- * \param[in] ctx Pointer to this instance's context
- * \param[in,out] iter Iterator storage, initialized to NULL
- *
- * \return Returns a pointer to an image, if one is ready for display. Frames
- * produced will always be in PTS (presentation time stamp) order.
- */
- vpx_image_t *vpx_codec_get_frame(vpx_codec_ctx_t *ctx,
- vpx_codec_iter_t *iter);
-
-
- /*!\defgroup cap_put_frame Frame-Based Decoding Functions
- *
- * The following functions are required to be implemented for all decoders
- * that advertise the VPX_CODEC_CAP_PUT_FRAME capability. Calling these functions
- * for codecs that don't advertise this capability will result in an error
- * code being returned, usually VPX_CODEC_ERROR
- * @{
- */
-
- /*!\brief put frame callback prototype
- *
- * This callback is invoked by the decoder to notify the application of
- * the availability of decoded image data.
- */
- typedef void (*vpx_codec_put_frame_cb_fn_t)(void *user_priv,
- const vpx_image_t *img);
-
-
- /*!\brief Register for notification of frame completion.
- *
- * Registers a given function to be called when a decoded frame is
- * available.
- *
- * \param[in] ctx Pointer to this instance's context
- * \param[in] cb Pointer to the callback function
- * \param[in] user_priv User's private data
- *
- * \retval #VPX_CODEC_OK
- * Callback successfully registered.
- * \retval #VPX_CODEC_ERROR
- * Decoder context not initialized, or algorithm not capable of
- * posting slice completion.
- */
- vpx_codec_err_t vpx_codec_register_put_frame_cb(vpx_codec_ctx_t *ctx,
- vpx_codec_put_frame_cb_fn_t cb,
- void *user_priv);
-
-
- /*!@} - end defgroup cap_put_frame */
-
- /*!\defgroup cap_put_slice Slice-Based Decoding Functions
- *
- * The following functions are required to be implemented for all decoders
- * that advertise the VPX_CODEC_CAP_PUT_SLICE capability. Calling these functions
- * for codecs that don't advertise this capability will result in an error
- * code being returned, usually VPX_CODEC_ERROR
- * @{
- */
-
- /*!\brief put slice callback prototype
- *
- * This callback is invoked by the decoder to notify the application of
- * the availability of partially decoded image data. The
- */
- typedef void (*vpx_codec_put_slice_cb_fn_t)(void *user_priv,
- const vpx_image_t *img,
- const vpx_image_rect_t *valid,
- const vpx_image_rect_t *update);
-
-
- /*!\brief Register for notification of slice completion.
- *
- * Registers a given function to be called when a decoded slice is
- * available.
- *
- * \param[in] ctx Pointer to this instance's context
- * \param[in] cb Pointer to the callback function
- * \param[in] user_priv User's private data
- *
- * \retval #VPX_CODEC_OK
- * Callback successfully registered.
- * \retval #VPX_CODEC_ERROR
- * Decoder context not initialized, or algorithm not capable of
- * posting slice completion.
- */
- vpx_codec_err_t vpx_codec_register_put_slice_cb(vpx_codec_ctx_t *ctx,
- vpx_codec_put_slice_cb_fn_t cb,
- void *user_priv);
-
-
- /*!@} - end defgroup cap_put_slice*/
-
- /*!@} - end defgroup decoder*/
+ vpx_codec_dec_init_ver(ctx, iface, cfg, flags, VPX_DECODER_ABI_VERSION)
+
+
+ /*!\brief Parse stream info from a buffer
+ *
+ * Performs high level parsing of the bitstream. Construction of a decoder
+ * context is not necessary. Can be used to determine if the bitstream is
+ * of the proper format, and to extract information from the stream.
+ *
+ * \param[in] iface Pointer to the algorithm interface
+ * \param[in] data Pointer to a block of data to parse
+ * \param[in] data_sz Size of the data buffer
+ * \param[in,out] si Pointer to stream info to update. The size member
+ * \ref MUST be properly initialized, but \ref MAY be
+ * clobbered by the algorithm. This parameter \ref MAY
+ * be NULL.
+ *
+ * \retval #VPX_CODEC_OK
+ * Bitstream is parsable and stream information updated
+ */
+ vpx_codec_err_t vpx_codec_peek_stream_info(vpx_codec_iface_t *iface,
+ const uint8_t *data,
+ unsigned int data_sz,
+ vpx_codec_stream_info_t *si);
+
+
+ /*!\brief Return information about the current stream.
+ *
+ * Returns information about the stream that has been parsed during decoding.
+ *
+ * \param[in] ctx Pointer to this instance's context
+ * \param[in,out] si Pointer to stream info to update. The size member
+ * \ref MUST be properly initialized, but \ref MAY be
+ * clobbered by the algorithm. This parameter \ref MAY
+ * be NULL.
+ *
+ * \retval #VPX_CODEC_OK
+ * Bitstream is parsable and stream information updated
+ */
+ vpx_codec_err_t vpx_codec_get_stream_info(vpx_codec_ctx_t *ctx,
+ vpx_codec_stream_info_t *si);
+
+
+ /*!\brief Decode data
+ *
+ * Processes a buffer of coded data. If the processing results in a new
+ * decoded frame becoming available, PUT_SLICE and PUT_FRAME events may be
+ * generated, as appropriate. Encoded data \ref MUST be passed in DTS (decode
+ * time stamp) order. Frames produced will always be in PTS (presentation
+ * time stamp) order.
+ * If the decoder is configured with VPX_CODEC_USE_INPUT_FRAGMENTS enabled,
+ * data and data_sz can contain a fragment of the encoded frame. Fragment
+ * \#n must contain at least partition \#n, but can also contain subsequent
+ * partitions (\#n+1 - \#n+i), and if so, fragments \#n+1, .., \#n+i must
+ * be empty. When no more data is available, this function should be called
+ * with NULL as data and 0 as data_sz. The memory passed to this function
+ * must be available until the frame has been decoded.
+ *
+ * \param[in] ctx Pointer to this instance's context
+ * \param[in] data Pointer to this block of new coded data. If
+ * NULL, a VPX_CODEC_CB_PUT_FRAME event is posted
+ * for the previously decoded frame.
+ * \param[in] data_sz Size of the coded data, in bytes.
+ * \param[in] user_priv Application specific data to associate with
+ * this frame.
+ * \param[in] deadline Soft deadline the decoder should attempt to meet,
+ * in us. Set to zero for unlimited.
+ *
+ * \return Returns #VPX_CODEC_OK if the coded data was processed completely
+ * and future pictures can be decoded without error. Otherwise,
+ * see the descriptions of the other error codes in ::vpx_codec_err_t
+ * for recoverability capabilities.
+ */
+ vpx_codec_err_t vpx_codec_decode(vpx_codec_ctx_t *ctx,
+ const uint8_t *data,
+ unsigned int data_sz,
+ void *user_priv,
+ long deadline);
+
+
+ /*!\brief Decoded frames iterator
+ *
+ * Iterates over a list of the frames available for display. The iterator
+ * storage should be initialized to NULL to start the iteration. Iteration is
+ * complete when this function returns NULL.
+ *
+ * The list of available frames becomes valid upon completion of the
+ * vpx_codec_decode call, and remains valid until the next call to vpx_codec_decode.
+ *
+ * \param[in] ctx Pointer to this instance's context
+ * \param[in,out] iter Iterator storage, initialized to NULL
+ *
+ * \return Returns a pointer to an image, if one is ready for display. Frames
+ * produced will always be in PTS (presentation time stamp) order.
+ */
+ vpx_image_t *vpx_codec_get_frame(vpx_codec_ctx_t *ctx,
+ vpx_codec_iter_t *iter);
+
+
+ /*!\defgroup cap_put_frame Frame-Based Decoding Functions
+ *
+ * The following functions are required to be implemented for all decoders
+ * that advertise the VPX_CODEC_CAP_PUT_FRAME capability. Calling these functions
+ * for codecs that don't advertise this capability will result in an error
+ * code being returned, usually VPX_CODEC_ERROR
+ * @{
+ */
+
+ /*!\brief put frame callback prototype
+ *
+ * This callback is invoked by the decoder to notify the application of
+ * the availability of decoded image data.
+ */
+ typedef void (*vpx_codec_put_frame_cb_fn_t)(void *user_priv,
+ const vpx_image_t *img);
+
+
+ /*!\brief Register for notification of frame completion.
+ *
+ * Registers a given function to be called when a decoded frame is
+ * available.
+ *
+ * \param[in] ctx Pointer to this instance's context
+ * \param[in] cb Pointer to the callback function
+ * \param[in] user_priv User's private data
+ *
+ * \retval #VPX_CODEC_OK
+ * Callback successfully registered.
+ * \retval #VPX_CODEC_ERROR
+ * Decoder context not initialized, or algorithm not capable of
+ * posting slice completion.
+ */
+ vpx_codec_err_t vpx_codec_register_put_frame_cb(vpx_codec_ctx_t *ctx,
+ vpx_codec_put_frame_cb_fn_t cb,
+ void *user_priv);
+
+
+ /*!@} - end defgroup cap_put_frame */
+
+ /*!\defgroup cap_put_slice Slice-Based Decoding Functions
+ *
+ * The following functions are required to be implemented for all decoders
+ * that advertise the VPX_CODEC_CAP_PUT_SLICE capability. Calling these functions
+ * for codecs that don't advertise this capability will result in an error
+ * code being returned, usually VPX_CODEC_ERROR
+ * @{
+ */
+
+ /*!\brief put slice callback prototype
+ *
+ * This callback is invoked by the decoder to notify the application of
+ * the availability of partially decoded image data. The
+ */
+ typedef void (*vpx_codec_put_slice_cb_fn_t)(void *user_priv,
+ const vpx_image_t *img,
+ const vpx_image_rect_t *valid,
+ const vpx_image_rect_t *update);
+
+
+ /*!\brief Register for notification of slice completion.
+ *
+ * Registers a given function to be called when a decoded slice is
+ * available.
+ *
+ * \param[in] ctx Pointer to this instance's context
+ * \param[in] cb Pointer to the callback function
+ * \param[in] user_priv User's private data
+ *
+ * \retval #VPX_CODEC_OK
+ * Callback successfully registered.
+ * \retval #VPX_CODEC_ERROR
+ * Decoder context not initialized, or algorithm not capable of
+ * posting slice completion.
+ */
+ vpx_codec_err_t vpx_codec_register_put_slice_cb(vpx_codec_ctx_t *ctx,
+ vpx_codec_put_slice_cb_fn_t cb,
+ void *user_priv);
+
+
+ /*!@} - end defgroup cap_put_slice*/
+
+ /*!@} - end defgroup decoder*/
#endif
diff --git a/libvpx/vpx/vpx_encoder.h b/libvpx/vpx/vpx_encoder.h
index 67d9033..ffdbc06 100644
--- a/libvpx/vpx/vpx_encoder.h
+++ b/libvpx/vpx/vpx_encoder.h
@@ -32,902 +32,891 @@ extern "C" {
#define VPX_ENCODER_H
#include "vpx_codec.h"
-/*! Temporal Scalability: Maximum length of the sequence defining frame
- * layer membership
- */
+ /*! Temporal Scalability: Maximum length of the sequence defining frame
+ * layer membership
+ */
#define VPX_TS_MAX_PERIODICITY 16
-/*! Temporal Scalability: Maximum number of coding layers */
+ /*! Temporal Scalability: Maximum number of coding layers */
#define VPX_TS_MAX_LAYERS 5
-/*!\deprecated Use #VPX_TS_MAX_PERIODICITY instead. */
+ /*!\deprecated Use #VPX_TS_MAX_PERIODICITY instead. */
#define MAX_PERIODICITY VPX_TS_MAX_PERIODICITY
-/*!\deprecated Use #VPX_TS_MAX_LAYERS instead. */
+ /*!\deprecated Use #VPX_TS_MAX_LAYERS instead. */
#define MAX_LAYERS VPX_TS_MAX_LAYERS
- /*!\brief Current ABI version number
- *
- * \internal
- * If this file is altered in any way that changes the ABI, this value
- * must be bumped. Examples include, but are not limited to, changing
- * types, removing or reassigning enums, adding/removing/rearranging
- * fields to structures
- */
+ /*!\brief Current ABI version number
+ *
+ * \internal
+ * If this file is altered in any way that changes the ABI, this value
+ * must be bumped. Examples include, but are not limited to, changing
+ * types, removing or reassigning enums, adding/removing/rearranging
+ * fields to structures
+ */
#define VPX_ENCODER_ABI_VERSION (3 + VPX_CODEC_ABI_VERSION) /**<\hideinitializer*/
- /*! \brief Encoder capabilities bitfield
- *
- * Each encoder advertises the capabilities it supports as part of its
- * ::vpx_codec_iface_t interface structure. Capabilities are extra
- * interfaces or functionality, and are not required to be supported
- * by an encoder.
- *
- * The available flags are specified by VPX_CODEC_CAP_* defines.
- */
+ /*! \brief Encoder capabilities bitfield
+ *
+ * Each encoder advertises the capabilities it supports as part of its
+ * ::vpx_codec_iface_t interface structure. Capabilities are extra
+ * interfaces or functionality, and are not required to be supported
+ * by an encoder.
+ *
+ * The available flags are specified by VPX_CODEC_CAP_* defines.
+ */
#define VPX_CODEC_CAP_PSNR 0x10000 /**< Can issue PSNR packets */
- /*! Can output one partition at a time. Each partition is returned in its
- * own VPX_CODEC_CX_FRAME_PKT, with the FRAME_IS_FRAGMENT flag set for
- * every partition but the last. In this mode all frames are always
- * returned partition by partition.
- */
+ /*! Can output one partition at a time. Each partition is returned in its
+ * own VPX_CODEC_CX_FRAME_PKT, with the FRAME_IS_FRAGMENT flag set for
+ * every partition but the last. In this mode all frames are always
+ * returned partition by partition.
+ */
#define VPX_CODEC_CAP_OUTPUT_PARTITION 0x20000
- /*! \brief Initialization-time Feature Enabling
- *
- * Certain codec features must be known at initialization time, to allow
- * for proper memory allocation.
- *
- * The available flags are specified by VPX_CODEC_USE_* defines.
- */
+ /*! \brief Initialization-time Feature Enabling
+ *
+ * Certain codec features must be known at initialization time, to allow
+ * for proper memory allocation.
+ *
+ * The available flags are specified by VPX_CODEC_USE_* defines.
+ */
#define VPX_CODEC_USE_PSNR 0x10000 /**< Calculate PSNR on each frame */
#define VPX_CODEC_USE_OUTPUT_PARTITION 0x20000 /**< Make the encoder output one
- partition at a time. */
-
-
- /*!\brief Generic fixed size buffer structure
- *
- * This structure is able to hold a reference to any fixed size buffer.
- */
- typedef struct vpx_fixed_buf
- {
- void *buf; /**< Pointer to the data */
- size_t sz; /**< Length of the buffer, in chars */
- } vpx_fixed_buf_t; /**< alias for struct vpx_fixed_buf */
-
-
- /*!\brief Time Stamp Type
- *
- * An integer, which when multiplied by the stream's time base, provides
- * the absolute time of a sample.
- */
- typedef int64_t vpx_codec_pts_t;
-
-
- /*!\brief Compressed Frame Flags
- *
- * This type represents a bitfield containing information about a compressed
- * frame that may be useful to an application. The most significant 16 bits
- * can be used by an algorithm to provide additional detail, for example to
- * support frame types that are codec specific (MPEG-1 D-frames for example)
- */
- typedef uint32_t vpx_codec_frame_flags_t;
+ partition at a time. */
+
+
+ /*!\brief Generic fixed size buffer structure
+ *
+ * This structure is able to hold a reference to any fixed size buffer.
+ */
+ typedef struct vpx_fixed_buf {
+ void *buf; /**< Pointer to the data */
+ size_t sz; /**< Length of the buffer, in chars */
+ } vpx_fixed_buf_t; /**< alias for struct vpx_fixed_buf */
+
+
+ /*!\brief Time Stamp Type
+ *
+ * An integer, which when multiplied by the stream's time base, provides
+ * the absolute time of a sample.
+ */
+ typedef int64_t vpx_codec_pts_t;
+
+
+ /*!\brief Compressed Frame Flags
+ *
+ * This type represents a bitfield containing information about a compressed
+ * frame that may be useful to an application. The most significant 16 bits
+ * can be used by an algorithm to provide additional detail, for example to
+ * support frame types that are codec specific (MPEG-1 D-frames for example)
+ */
+ typedef uint32_t vpx_codec_frame_flags_t;
#define VPX_FRAME_IS_KEY 0x1 /**< frame is the start of a GOP */
#define VPX_FRAME_IS_DROPPABLE 0x2 /**< frame can be dropped without affecting
- the stream (no future frame depends on
- this one) */
+ the stream (no future frame depends on
+ this one) */
#define VPX_FRAME_IS_INVISIBLE 0x4 /**< frame should be decoded but will not
- be shown */
+ be shown */
#define VPX_FRAME_IS_FRAGMENT 0x8 /**< this is a fragment of the encoded
- frame */
-
- /*!\brief Error Resilient flags
- *
- * These flags define which error resilient features to enable in the
- * encoder. The flags are specified through the
- * vpx_codec_enc_cfg::g_error_resilient variable.
- */
- typedef uint32_t vpx_codec_er_flags_t;
+ frame */
+
+ /*!\brief Error Resilient flags
+ *
+ * These flags define which error resilient features to enable in the
+ * encoder. The flags are specified through the
+ * vpx_codec_enc_cfg::g_error_resilient variable.
+ */
+ typedef uint32_t vpx_codec_er_flags_t;
#define VPX_ERROR_RESILIENT_DEFAULT 0x1 /**< Improve resiliency against
- losses of whole frames */
+ losses of whole frames */
#define VPX_ERROR_RESILIENT_PARTITIONS 0x2 /**< The frame partitions are
- independently decodable by the
- bool decoder, meaning that
- partitions can be decoded even
- though earlier partitions have
- been lost. Note that intra
- predicition is still done over
- the partition boundary. */
-
- /*!\brief Encoder output packet variants
- *
- * This enumeration lists the different kinds of data packets that can be
- * returned by calls to vpx_codec_get_cx_data(). Algorithms \ref MAY
- * extend this list to provide additional functionality.
- */
- enum vpx_codec_cx_pkt_kind
- {
- VPX_CODEC_CX_FRAME_PKT, /**< Compressed video frame */
- VPX_CODEC_STATS_PKT, /**< Two-pass statistics for this frame */
- VPX_CODEC_PSNR_PKT, /**< PSNR statistics for this frame */
- VPX_CODEC_CUSTOM_PKT = 256 /**< Algorithm extensions */
- };
-
-
- /*!\brief Encoder output packet
- *
- * This structure contains the different kinds of output data the encoder
- * may produce while compressing a frame.
- */
- typedef struct vpx_codec_cx_pkt
- {
- enum vpx_codec_cx_pkt_kind kind; /**< packet variant */
- union
- {
- struct
- {
- void *buf; /**< compressed data buffer */
- size_t sz; /**< length of compressed data */
- vpx_codec_pts_t pts; /**< time stamp to show frame
+ independently decodable by the
+ bool decoder, meaning that
+ partitions can be decoded even
+ though earlier partitions have
+ been lost. Note that intra
+ predicition is still done over
+ the partition boundary. */
+
+ /*!\brief Encoder output packet variants
+ *
+ * This enumeration lists the different kinds of data packets that can be
+ * returned by calls to vpx_codec_get_cx_data(). Algorithms \ref MAY
+ * extend this list to provide additional functionality.
+ */
+ enum vpx_codec_cx_pkt_kind {
+ VPX_CODEC_CX_FRAME_PKT, /**< Compressed video frame */
+ VPX_CODEC_STATS_PKT, /**< Two-pass statistics for this frame */
+ VPX_CODEC_PSNR_PKT, /**< PSNR statistics for this frame */
+ VPX_CODEC_CUSTOM_PKT = 256 /**< Algorithm extensions */
+ };
+
+
+ /*!\brief Encoder output packet
+ *
+ * This structure contains the different kinds of output data the encoder
+ * may produce while compressing a frame.
+ */
+ typedef struct vpx_codec_cx_pkt {
+ enum vpx_codec_cx_pkt_kind kind; /**< packet variant */
+ union {
+ struct {
+ void *buf; /**< compressed data buffer */
+ size_t sz; /**< length of compressed data */
+ vpx_codec_pts_t pts; /**< time stamp to show frame
(in timebase units) */
- unsigned long duration; /**< duration to show frame
+ unsigned long duration; /**< duration to show frame
(in timebase units) */
- vpx_codec_frame_flags_t flags; /**< flags for this frame */
- int partition_id; /**< the partition id
+ vpx_codec_frame_flags_t flags; /**< flags for this frame */
+ int partition_id; /**< the partition id
defines the decoding order
of the partitions. Only
applicable when "output partition"
mode is enabled. First partition
has id 0.*/
- } frame; /**< data for compressed frame packet */
- struct vpx_fixed_buf twopass_stats; /**< data for two-pass packet */
- struct vpx_psnr_pkt
- {
- unsigned int samples[4]; /**< Number of samples, total/y/u/v */
- uint64_t sse[4]; /**< sum squared error, total/y/u/v */
- double psnr[4]; /**< PSNR, total/y/u/v */
- } psnr; /**< data for PSNR packet */
- struct vpx_fixed_buf raw; /**< data for arbitrary packets */
+ } frame; /**< data for compressed frame packet */
+ struct vpx_fixed_buf twopass_stats; /**< data for two-pass packet */
+ struct vpx_psnr_pkt {
+ unsigned int samples[4]; /**< Number of samples, total/y/u/v */
+ uint64_t sse[4]; /**< sum squared error, total/y/u/v */
+ double psnr[4]; /**< PSNR, total/y/u/v */
+ } psnr; /**< data for PSNR packet */
+ struct vpx_fixed_buf raw; /**< data for arbitrary packets */
+
+ /* This packet size is fixed to allow codecs to extend this
+ * interface without having to manage storage for raw packets,
+ * i.e., if it's smaller than 128 bytes, you can store in the
+ * packet list directly.
+ */
+ char pad[128 - sizeof(enum vpx_codec_cx_pkt_kind)]; /**< fixed sz */
+ } data; /**< packet data */
+ } vpx_codec_cx_pkt_t; /**< alias for struct vpx_codec_cx_pkt */
+
+
+ /*!\brief Rational Number
+ *
+ * This structure holds a fractional value.
+ */
+ typedef struct vpx_rational {
+ int num; /**< fraction numerator */
+ int den; /**< fraction denominator */
+ } vpx_rational_t; /**< alias for struct vpx_rational */
+
+
+ /*!\brief Multi-pass Encoding Pass */
+ enum vpx_enc_pass {
+ VPX_RC_ONE_PASS, /**< Single pass mode */
+ VPX_RC_FIRST_PASS, /**< First pass of multi-pass mode */
+ VPX_RC_LAST_PASS /**< Final pass of multi-pass mode */
+ };
+
+
+ /*!\brief Rate control mode */
+ enum vpx_rc_mode {
+ VPX_VBR, /**< Variable Bit Rate (VBR) mode */
+ VPX_CBR, /**< Constant Bit Rate (CBR) mode */
+ VPX_CQ /**< Constant Quality (CQ) mode */
+ };
+
+
+ /*!\brief Keyframe placement mode.
+ *
+ * This enumeration determines whether keyframes are placed automatically by
+ * the encoder or whether this behavior is disabled. Older releases of this
+ * SDK were implemented such that VPX_KF_FIXED meant keyframes were disabled.
+ * This name is confusing for this behavior, so the new symbols to be used
+ * are VPX_KF_AUTO and VPX_KF_DISABLED.
+ */
+ enum vpx_kf_mode {
+ VPX_KF_FIXED, /**< deprecated, implies VPX_KF_DISABLED */
+ VPX_KF_AUTO, /**< Encoder determines optimal placement automatically */
+ VPX_KF_DISABLED = 0 /**< Encoder does not place keyframes. */
+ };
+
+
+ /*!\brief Encoded Frame Flags
+ *
+ * This type indicates a bitfield to be passed to vpx_codec_encode(), defining
+ * per-frame boolean values. By convention, bits common to all codecs will be
+ * named VPX_EFLAG_*, and bits specific to an algorithm will be named
+ * /algo/_eflag_*. The lower order 16 bits are reserved for common use.
+ */
+ typedef long vpx_enc_frame_flags_t;
+#define VPX_EFLAG_FORCE_KF (1<<0) /**< Force this frame to be a keyframe */
- /* This packet size is fixed to allow codecs to extend this
- * interface without having to manage storage for raw packets,
- * i.e., if it's smaller than 128 bytes, you can store in the
- * packet list directly.
- */
- char pad[128 - sizeof(enum vpx_codec_cx_pkt_kind)]; /**< fixed sz */
- } data; /**< packet data */
- } vpx_codec_cx_pkt_t; /**< alias for struct vpx_codec_cx_pkt */
+ /*!\brief Encoder configuration structure
+ *
+ * This structure contains the encoder settings that have common representations
+ * across all codecs. This doesn't imply that all codecs support all features,
+ * however.
+ */
+ typedef struct vpx_codec_enc_cfg {
+ /*
+ * generic settings (g)
+ */
- /*!\brief Rational Number
+ /*!\brief Algorithm specific "usage" value
*
- * This structure holds a fractional value.
+ * Algorithms may define multiple values for usage, which may convey the
+ * intent of how the application intends to use the stream. If this value
+ * is non-zero, consult the documentation for the codec to determine its
+ * meaning.
*/
- typedef struct vpx_rational
- {
- int num; /**< fraction numerator */
- int den; /**< fraction denominator */
- } vpx_rational_t; /**< alias for struct vpx_rational */
+ unsigned int g_usage;
- /*!\brief Multi-pass Encoding Pass */
- enum vpx_enc_pass
- {
- VPX_RC_ONE_PASS, /**< Single pass mode */
- VPX_RC_FIRST_PASS, /**< First pass of multi-pass mode */
- VPX_RC_LAST_PASS /**< Final pass of multi-pass mode */
- };
+ /*!\brief Maximum number of threads to use
+ *
+ * For multi-threaded implementations, use no more than this number of
+ * threads. The codec may use fewer threads than allowed. The value
+ * 0 is equivalent to the value 1.
+ */
+ unsigned int g_threads;
+
+ /*!\brief Bitstream profile to use
+ *
+ * Some codecs support a notion of multiple bitstream profiles. Typically
+ * this maps to a set of features that are turned on or off. Often the
+ * profile to use is determined by the features of the intended decoder.
+ * Consult the documentation for the codec to determine the valid values
+ * for this parameter, or set to zero for a sane default.
+ */
+ unsigned int g_profile; /**< profile of bitstream to use */
- /*!\brief Rate control mode */
- enum vpx_rc_mode
- {
- VPX_VBR, /**< Variable Bit Rate (VBR) mode */
- VPX_CBR, /**< Constant Bit Rate (CBR) mode */
- VPX_CQ /**< Constant Quality (CQ) mode */
- };
- /*!\brief Keyframe placement mode.
+ /*!\brief Width of the frame
*
- * This enumeration determines whether keyframes are placed automatically by
- * the encoder or whether this behavior is disabled. Older releases of this
- * SDK were implemented such that VPX_KF_FIXED meant keyframes were disabled.
- * This name is confusing for this behavior, so the new symbols to be used
- * are VPX_KF_AUTO and VPX_KF_DISABLED.
+ * This value identifies the presentation resolution of the frame,
+ * in pixels. Note that the frames passed as input to the encoder must
+ * have this resolution. Frames will be presented by the decoder in this
+ * resolution, independent of any spatial resampling the encoder may do.
*/
- enum vpx_kf_mode
- {
- VPX_KF_FIXED, /**< deprecated, implies VPX_KF_DISABLED */
- VPX_KF_AUTO, /**< Encoder determines optimal placement automatically */
- VPX_KF_DISABLED = 0 /**< Encoder does not place keyframes. */
- };
+ unsigned int g_w;
- /*!\brief Encoded Frame Flags
+ /*!\brief Height of the frame
*
- * This type indicates a bitfield to be passed to vpx_codec_encode(), defining
- * per-frame boolean values. By convention, bits common to all codecs will be
- * named VPX_EFLAG_*, and bits specific to an algorithm will be named
- * /algo/_eflag_*. The lower order 16 bits are reserved for common use.
+ * This value identifies the presentation resolution of the frame,
+ * in pixels. Note that the frames passed as input to the encoder must
+ * have this resolution. Frames will be presented by the decoder in this
+ * resolution, independent of any spatial resampling the encoder may do.
*/
- typedef long vpx_enc_frame_flags_t;
-#define VPX_EFLAG_FORCE_KF (1<<0) /**< Force this frame to be a keyframe */
+ unsigned int g_h;
- /*!\brief Encoder configuration structure
- *
- * This structure contains the encoder settings that have common representations
- * across all codecs. This doesn't imply that all codecs support all features,
- * however.
- */
- typedef struct vpx_codec_enc_cfg
- {
- /*
- * generic settings (g)
- */
-
- /*!\brief Algorithm specific "usage" value
- *
- * Algorithms may define multiple values for usage, which may convey the
- * intent of how the application intends to use the stream. If this value
- * is non-zero, consult the documentation for the codec to determine its
- * meaning.
- */
- unsigned int g_usage;
-
-
- /*!\brief Maximum number of threads to use
- *
- * For multi-threaded implementations, use no more than this number of
- * threads. The codec may use fewer threads than allowed. The value
- * 0 is equivalent to the value 1.
- */
- unsigned int g_threads;
-
-
- /*!\brief Bitstream profile to use
- *
- * Some codecs support a notion of multiple bitstream profiles. Typically
- * this maps to a set of features that are turned on or off. Often the
- * profile to use is determined by the features of the intended decoder.
- * Consult the documentation for the codec to determine the valid values
- * for this parameter, or set to zero for a sane default.
- */
- unsigned int g_profile; /**< profile of bitstream to use */
-
-
-
- /*!\brief Width of the frame
- *
- * This value identifies the presentation resolution of the frame,
- * in pixels. Note that the frames passed as input to the encoder must
- * have this resolution. Frames will be presented by the decoder in this
- * resolution, independent of any spatial resampling the encoder may do.
- */
- unsigned int g_w;
-
-
- /*!\brief Height of the frame
- *
- * This value identifies the presentation resolution of the frame,
- * in pixels. Note that the frames passed as input to the encoder must
- * have this resolution. Frames will be presented by the decoder in this
- * resolution, independent of any spatial resampling the encoder may do.
- */
- unsigned int g_h;
-
-
- /*!\brief Stream timebase units
- *
- * Indicates the smallest interval of time, in seconds, used by the stream.
- * For fixed frame rate material, or variable frame rate material where
- * frames are timed at a multiple of a given clock (ex: video capture),
- * the \ref RECOMMENDED method is to set the timebase to the reciprocal
- * of the frame rate (ex: 1001/30000 for 29.970 Hz NTSC). This allows the
- * pts to correspond to the frame number, which can be handy. For
- * re-encoding video from containers with absolute time timestamps, the
- * \ref RECOMMENDED method is to set the timebase to that of the parent
- * container or multimedia framework (ex: 1/1000 for ms, as in FLV).
- */
- struct vpx_rational g_timebase;
-
-
- /*!\brief Enable error resilient modes.
- *
- * The error resilient bitfield indicates to the encoder which features
- * it should enable to take measures for streaming over lossy or noisy
- * links.
- */
- vpx_codec_er_flags_t g_error_resilient;
-
-
- /*!\brief Multi-pass Encoding Mode
- *
- * This value should be set to the current phase for multi-pass encoding.
- * For single pass, set to #VPX_RC_ONE_PASS.
- */
- enum vpx_enc_pass g_pass;
-
-
- /*!\brief Allow lagged encoding
- *
- * If set, this value allows the encoder to consume a number of input
- * frames before producing output frames. This allows the encoder to
- * base decisions for the current frame on future frames. This does
- * increase the latency of the encoding pipeline, so it is not appropriate
- * in all situations (ex: realtime encoding).
- *
- * Note that this is a maximum value -- the encoder may produce frames
- * sooner than the given limit. Set this value to 0 to disable this
- * feature.
- */
- unsigned int g_lag_in_frames;
-
-
- /*
- * rate control settings (rc)
- */
-
- /*!\brief Temporal resampling configuration, if supported by the codec.
- *
- * Temporal resampling allows the codec to "drop" frames as a strategy to
- * meet its target data rate. This can cause temporal discontinuities in
- * the encoded video, which may appear as stuttering during playback. This
- * trade-off is often acceptable, but for many applications is not. It can
- * be disabled in these cases.
- *
- * Note that not all codecs support this feature. All vpx VPx codecs do.
- * For other codecs, consult the documentation for that algorithm.
- *
- * This threshold is described as a percentage of the target data buffer.
- * When the data buffer falls below this percentage of fullness, a
- * dropped frame is indicated. Set the threshold to zero (0) to disable
- * this feature.
- */
- unsigned int rc_dropframe_thresh;
-
-
- /*!\brief Enable/disable spatial resampling, if supported by the codec.
- *
- * Spatial resampling allows the codec to compress a lower resolution
- * version of the frame, which is then upscaled by the encoder to the
- * correct presentation resolution. This increases visual quality at
- * low data rates, at the expense of CPU time on the encoder/decoder.
- */
- unsigned int rc_resize_allowed;
-
-
- /*!\brief Spatial resampling up watermark.
- *
- * This threshold is described as a percentage of the target data buffer.
- * When the data buffer rises above this percentage of fullness, the
- * encoder will step up to a higher resolution version of the frame.
- */
- unsigned int rc_resize_up_thresh;
-
-
- /*!\brief Spatial resampling down watermark.
- *
- * This threshold is described as a percentage of the target data buffer.
- * When the data buffer falls below this percentage of fullness, the
- * encoder will step down to a lower resolution version of the frame.
- */
- unsigned int rc_resize_down_thresh;
-
-
- /*!\brief Rate control algorithm to use.
- *
- * Indicates whether the end usage of this stream is to be streamed over
- * a bandwidth constrained link, indicating that Constant Bit Rate (CBR)
- * mode should be used, or whether it will be played back on a high
- * bandwidth link, as from a local disk, where higher variations in
- * bitrate are acceptable.
- */
- enum vpx_rc_mode rc_end_usage;
-
-
- /*!\brief Two-pass stats buffer.
- *
- * A buffer containing all of the stats packets produced in the first
- * pass, concatenated.
- */
- struct vpx_fixed_buf rc_twopass_stats_in;
-
-
- /*!\brief Target data rate
- *
- * Target bandwidth to use for this stream, in kilobits per second.
- */
- unsigned int rc_target_bitrate;
-
-
- /*
- * quantizer settings
- */
-
-
- /*!\brief Minimum (Best Quality) Quantizer
- *
- * The quantizer is the most direct control over the quality of the
- * encoded image. The range of valid values for the quantizer is codec
- * specific. Consult the documentation for the codec to determine the
- * values to use. To determine the range programmatically, call
- * vpx_codec_enc_config_default() with a usage value of 0.
- */
- unsigned int rc_min_quantizer;
-
-
- /*!\brief Maximum (Worst Quality) Quantizer
- *
- * The quantizer is the most direct control over the quality of the
- * encoded image. The range of valid values for the quantizer is codec
- * specific. Consult the documentation for the codec to determine the
- * values to use. To determine the range programmatically, call
- * vpx_codec_enc_config_default() with a usage value of 0.
- */
- unsigned int rc_max_quantizer;
-
-
- /*
- * bitrate tolerance
- */
-
-
- /*!\brief Rate control adaptation undershoot control
- *
- * This value, expressed as a percentage of the target bitrate,
- * controls the maximum allowed adaptation speed of the codec.
- * This factor controls the maximum amount of bits that can
- * be subtracted from the target bitrate in order to compensate
- * for prior overshoot.
- *
- * Valid values in the range 0-1000.
- */
- unsigned int rc_undershoot_pct;
-
-
- /*!\brief Rate control adaptation overshoot control
- *
- * This value, expressed as a percentage of the target bitrate,
- * controls the maximum allowed adaptation speed of the codec.
- * This factor controls the maximum amount of bits that can
- * be added to the target bitrate in order to compensate for
- * prior undershoot.
- *
- * Valid values in the range 0-1000.
- */
- unsigned int rc_overshoot_pct;
-
-
- /*
- * decoder buffer model parameters
- */
-
-
- /*!\brief Decoder Buffer Size
- *
- * This value indicates the amount of data that may be buffered by the
- * decoding application. Note that this value is expressed in units of
- * time (milliseconds). For example, a value of 5000 indicates that the
- * client will buffer (at least) 5000ms worth of encoded data. Use the
- * target bitrate (#rc_target_bitrate) to convert to bits/bytes, if
- * necessary.
- */
- unsigned int rc_buf_sz;
-
-
- /*!\brief Decoder Buffer Initial Size
- *
- * This value indicates the amount of data that will be buffered by the
- * decoding application prior to beginning playback. This value is
- * expressed in units of time (milliseconds). Use the target bitrate
- * (#rc_target_bitrate) to convert to bits/bytes, if necessary.
- */
- unsigned int rc_buf_initial_sz;
-
-
- /*!\brief Decoder Buffer Optimal Size
- *
- * This value indicates the amount of data that the encoder should try
- * to maintain in the decoder's buffer. This value is expressed in units
- * of time (milliseconds). Use the target bitrate (#rc_target_bitrate)
- * to convert to bits/bytes, if necessary.
- */
- unsigned int rc_buf_optimal_sz;
-
-
- /*
- * 2 pass rate control parameters
- */
-
-
- /*!\brief Two-pass mode CBR/VBR bias
- *
- * Bias, expressed on a scale of 0 to 100, for determining target size
- * for the current frame. The value 0 indicates the optimal CBR mode
- * value should be used. The value 100 indicates the optimal VBR mode
- * value should be used. Values in between indicate which way the
- * encoder should "lean."
- */
- unsigned int rc_2pass_vbr_bias_pct; /**< RC mode bias between CBR and VBR(0-100: 0->CBR, 100->VBR) */
-
-
- /*!\brief Two-pass mode per-GOP minimum bitrate
- *
- * This value, expressed as a percentage of the target bitrate, indicates
- * the minimum bitrate to be used for a single GOP (aka "section")
- */
- unsigned int rc_2pass_vbr_minsection_pct;
-
-
- /*!\brief Two-pass mode per-GOP maximum bitrate
- *
- * This value, expressed as a percentage of the target bitrate, indicates
- * the maximum bitrate to be used for a single GOP (aka "section")
- */
- unsigned int rc_2pass_vbr_maxsection_pct;
-
-
- /*
- * keyframing settings (kf)
- */
-
- /*!\brief Keyframe placement mode
- *
- * This value indicates whether the encoder should place keyframes at a
- * fixed interval, or determine the optimal placement automatically
- * (as governed by the #kf_min_dist and #kf_max_dist parameters)
- */
- enum vpx_kf_mode kf_mode;
-
-
- /*!\brief Keyframe minimum interval
- *
- * This value, expressed as a number of frames, prevents the encoder from
- * placing a keyframe nearer than kf_min_dist to the previous keyframe. At
- * least kf_min_dist frames non-keyframes will be coded before the next
- * keyframe. Set kf_min_dist equal to kf_max_dist for a fixed interval.
- */
- unsigned int kf_min_dist;
-
-
- /*!\brief Keyframe maximum interval
- *
- * This value, expressed as a number of frames, forces the encoder to code
- * a keyframe if one has not been coded in the last kf_max_dist frames.
- * A value of 0 implies all frames will be keyframes. Set kf_min_dist
- * equal to kf_max_dist for a fixed interval.
- */
- unsigned int kf_max_dist;
-
- /*
- * Temporal scalability settings (ts)
- */
-
- /*!\brief Number of coding layers
- *
- * This value specifies the number of coding layers to be used.
- */
- unsigned int ts_number_layers;
-
- /*!\brief Target bitrate for each layer
- *
- * These values specify the target coding bitrate for each coding layer.
- */
- unsigned int ts_target_bitrate[VPX_TS_MAX_LAYERS];
-
- /*!\brief Frame rate decimation factor for each layer
- *
- * These values specify the frame rate decimation factors to apply
- * to each layer.
- */
- unsigned int ts_rate_decimator[VPX_TS_MAX_LAYERS];
-
- /*!\brief Length of the sequence defining frame layer membership
- *
- * This value specifies the length of the sequence that defines the
- * membership of frames to layers. For example, if ts_periodicity=8 then
- * frames are assigned to coding layers with a repeated sequence of
- * length 8.
- */
- unsigned int ts_periodicity;
-
- /*!\brief Template defining the membership of frames to coding layers
- *
- * This array defines the membership of frames to coding layers. For a
- * 2-layer encoding that assigns even numbered frames to one layer (0)
- * and odd numbered frames to a second layer (1) with ts_periodicity=8,
- * then ts_layer_id = (0,1,0,1,0,1,0,1).
- */
- unsigned int ts_layer_id[VPX_TS_MAX_PERIODICITY];
- } vpx_codec_enc_cfg_t; /**< alias for struct vpx_codec_enc_cfg */
-
-
- /*!\brief Initialize an encoder instance
- *
- * Initializes a encoder context using the given interface. Applications
- * should call the vpx_codec_enc_init convenience macro instead of this
- * function directly, to ensure that the ABI version number parameter
- * is properly initialized.
- *
- * If the library was configured with --disable-multithread, this call
- * is not thread safe and should be guarded with a lock if being used
- * in a multithreaded context.
- *
- * In XMA mode (activated by setting VPX_CODEC_USE_XMA in the flags
- * parameter), the storage pointed to by the cfg parameter must be
- * kept readable and stable until all memory maps have been set.
- *
- * \param[in] ctx Pointer to this instance's context.
- * \param[in] iface Pointer to the algorithm interface to use.
- * \param[in] cfg Configuration to use, if known. May be NULL.
- * \param[in] flags Bitfield of VPX_CODEC_USE_* flags
- * \param[in] ver ABI version number. Must be set to
- * VPX_ENCODER_ABI_VERSION
- * \retval #VPX_CODEC_OK
- * The decoder algorithm initialized.
- * \retval #VPX_CODEC_MEM_ERROR
- * Memory allocation failed.
- */
- vpx_codec_err_t vpx_codec_enc_init_ver(vpx_codec_ctx_t *ctx,
- vpx_codec_iface_t *iface,
- vpx_codec_enc_cfg_t *cfg,
- vpx_codec_flags_t flags,
- int ver);
-
-
- /*!\brief Convenience macro for vpx_codec_enc_init_ver()
- *
- * Ensures the ABI version parameter is properly set.
+ /*!\brief Stream timebase units
+ *
+ * Indicates the smallest interval of time, in seconds, used by the stream.
+ * For fixed frame rate material, or variable frame rate material where
+ * frames are timed at a multiple of a given clock (ex: video capture),
+ * the \ref RECOMMENDED method is to set the timebase to the reciprocal
+ * of the frame rate (ex: 1001/30000 for 29.970 Hz NTSC). This allows the
+ * pts to correspond to the frame number, which can be handy. For
+ * re-encoding video from containers with absolute time timestamps, the
+ * \ref RECOMMENDED method is to set the timebase to that of the parent
+ * container or multimedia framework (ex: 1/1000 for ms, as in FLV).
*/
-#define vpx_codec_enc_init(ctx, iface, cfg, flags) \
- vpx_codec_enc_init_ver(ctx, iface, cfg, flags, VPX_ENCODER_ABI_VERSION)
+ struct vpx_rational g_timebase;
- /*!\brief Initialize multi-encoder instance
- *
- * Initializes multi-encoder context using the given interface.
- * Applications should call the vpx_codec_enc_init_multi convenience macro
- * instead of this function directly, to ensure that the ABI version number
- * parameter is properly initialized.
+ /*!\brief Enable error resilient modes.
*
- * In XMA mode (activated by setting VPX_CODEC_USE_XMA in the flags
- * parameter), the storage pointed to by the cfg parameter must be
- * kept readable and stable until all memory maps have been set.
+ * The error resilient bitfield indicates to the encoder which features
+ * it should enable to take measures for streaming over lossy or noisy
+ * links.
+ */
+ vpx_codec_er_flags_t g_error_resilient;
+
+
+ /*!\brief Multi-pass Encoding Mode
*
- * \param[in] ctx Pointer to this instance's context.
- * \param[in] iface Pointer to the algorithm interface to use.
- * \param[in] cfg Configuration to use, if known. May be NULL.
- * \param[in] num_enc Total number of encoders.
- * \param[in] flags Bitfield of VPX_CODEC_USE_* flags
- * \param[in] dsf Pointer to down-sampling factors.
- * \param[in] ver ABI version number. Must be set to
- * VPX_ENCODER_ABI_VERSION
- * \retval #VPX_CODEC_OK
- * The decoder algorithm initialized.
- * \retval #VPX_CODEC_MEM_ERROR
- * Memory allocation failed.
+ * This value should be set to the current phase for multi-pass encoding.
+ * For single pass, set to #VPX_RC_ONE_PASS.
*/
- vpx_codec_err_t vpx_codec_enc_init_multi_ver(vpx_codec_ctx_t *ctx,
- vpx_codec_iface_t *iface,
- vpx_codec_enc_cfg_t *cfg,
- int num_enc,
- vpx_codec_flags_t flags,
- vpx_rational_t *dsf,
- int ver);
+ enum vpx_enc_pass g_pass;
- /*!\brief Convenience macro for vpx_codec_enc_init_multi_ver()
+ /*!\brief Allow lagged encoding
+ *
+ * If set, this value allows the encoder to consume a number of input
+ * frames before producing output frames. This allows the encoder to
+ * base decisions for the current frame on future frames. This does
+ * increase the latency of the encoding pipeline, so it is not appropriate
+ * in all situations (ex: realtime encoding).
*
- * Ensures the ABI version parameter is properly set.
+ * Note that this is a maximum value -- the encoder may produce frames
+ * sooner than the given limit. Set this value to 0 to disable this
+ * feature.
*/
-#define vpx_codec_enc_init_multi(ctx, iface, cfg, num_enc, flags, dsf) \
- vpx_codec_enc_init_multi_ver(ctx, iface, cfg, num_enc, flags, dsf, \
- VPX_ENCODER_ABI_VERSION)
+ unsigned int g_lag_in_frames;
- /*!\brief Get a default configuration
+ /*
+ * rate control settings (rc)
+ */
+
+ /*!\brief Temporal resampling configuration, if supported by the codec.
*
- * Initializes a encoder configuration structure with default values. Supports
- * the notion of "usages" so that an algorithm may offer different default
- * settings depending on the user's intended goal. This function \ref SHOULD
- * be called by all applications to initialize the configuration structure
- * before specializing the configuration with application specific values.
+ * Temporal resampling allows the codec to "drop" frames as a strategy to
+ * meet its target data rate. This can cause temporal discontinuities in
+ * the encoded video, which may appear as stuttering during playback. This
+ * trade-off is often acceptable, but for many applications is not. It can
+ * be disabled in these cases.
*
- * \param[in] iface Pointer to the algorithm interface to use.
- * \param[out] cfg Configuration buffer to populate
- * \param[in] usage End usage. Set to 0 or use codec specific values.
+ * Note that not all codecs support this feature. All vpx VPx codecs do.
+ * For other codecs, consult the documentation for that algorithm.
*
- * \retval #VPX_CODEC_OK
- * The configuration was populated.
- * \retval #VPX_CODEC_INCAPABLE
- * Interface is not an encoder interface.
- * \retval #VPX_CODEC_INVALID_PARAM
- * A parameter was NULL, or the usage value was not recognized.
+ * This threshold is described as a percentage of the target data buffer.
+ * When the data buffer falls below this percentage of fullness, a
+ * dropped frame is indicated. Set the threshold to zero (0) to disable
+ * this feature.
*/
- vpx_codec_err_t vpx_codec_enc_config_default(vpx_codec_iface_t *iface,
- vpx_codec_enc_cfg_t *cfg,
- unsigned int usage);
+ unsigned int rc_dropframe_thresh;
- /*!\brief Set or change configuration
+ /*!\brief Enable/disable spatial resampling, if supported by the codec.
*
- * Reconfigures an encoder instance according to the given configuration.
+ * Spatial resampling allows the codec to compress a lower resolution
+ * version of the frame, which is then upscaled by the encoder to the
+ * correct presentation resolution. This increases visual quality at
+ * low data rates, at the expense of CPU time on the encoder/decoder.
+ */
+ unsigned int rc_resize_allowed;
+
+
+ /*!\brief Spatial resampling up watermark.
*
- * \param[in] ctx Pointer to this instance's context
- * \param[in] cfg Configuration buffer to use
+ * This threshold is described as a percentage of the target data buffer.
+ * When the data buffer rises above this percentage of fullness, the
+ * encoder will step up to a higher resolution version of the frame.
+ */
+ unsigned int rc_resize_up_thresh;
+
+
+ /*!\brief Spatial resampling down watermark.
*
- * \retval #VPX_CODEC_OK
- * The configuration was populated.
- * \retval #VPX_CODEC_INCAPABLE
- * Interface is not an encoder interface.
- * \retval #VPX_CODEC_INVALID_PARAM
- * A parameter was NULL, or the usage value was not recognized.
+ * This threshold is described as a percentage of the target data buffer.
+ * When the data buffer falls below this percentage of fullness, the
+ * encoder will step down to a lower resolution version of the frame.
*/
- vpx_codec_err_t vpx_codec_enc_config_set(vpx_codec_ctx_t *ctx,
- const vpx_codec_enc_cfg_t *cfg);
+ unsigned int rc_resize_down_thresh;
- /*!\brief Get global stream headers
+ /*!\brief Rate control algorithm to use.
*
- * Retrieves a stream level global header packet, if supported by the codec.
+ * Indicates whether the end usage of this stream is to be streamed over
+ * a bandwidth constrained link, indicating that Constant Bit Rate (CBR)
+ * mode should be used, or whether it will be played back on a high
+ * bandwidth link, as from a local disk, where higher variations in
+ * bitrate are acceptable.
+ */
+ enum vpx_rc_mode rc_end_usage;
+
+
+ /*!\brief Two-pass stats buffer.
*
- * \param[in] ctx Pointer to this instance's context
+ * A buffer containing all of the stats packets produced in the first
+ * pass, concatenated.
+ */
+ struct vpx_fixed_buf rc_twopass_stats_in;
+
+
+ /*!\brief Target data rate
*
- * \retval NULL
- * Encoder does not support global header
- * \retval Non-NULL
- * Pointer to buffer containing global header packet
+ * Target bandwidth to use for this stream, in kilobits per second.
*/
- vpx_fixed_buf_t *vpx_codec_get_global_headers(vpx_codec_ctx_t *ctx);
+ unsigned int rc_target_bitrate;
-#define VPX_DL_REALTIME (1) /**< deadline parameter analogous to
- * VPx REALTIME mode. */
-#define VPX_DL_GOOD_QUALITY (1000000) /**< deadline parameter analogous to
- * VPx GOOD QUALITY mode. */
-#define VPX_DL_BEST_QUALITY (0) /**< deadline parameter analogous to
- * VPx BEST QUALITY mode. */
- /*!\brief Encode a frame
- *
- * Encodes a video frame at the given "presentation time." The presentation
- * time stamp (PTS) \ref MUST be strictly increasing.
- *
- * The encoder supports the notion of a soft real-time deadline. Given a
- * non-zero value to the deadline parameter, the encoder will make a "best
- * effort" guarantee to return before the given time slice expires. It is
- * implicit that limiting the available time to encode will degrade the
- * output quality. The encoder can be given an unlimited time to produce the
- * best possible frame by specifying a deadline of '0'. This deadline
- * supercedes the VPx notion of "best quality, good quality, realtime".
- * Applications that wish to map these former settings to the new deadline
- * based system can use the symbols #VPX_DL_REALTIME, #VPX_DL_GOOD_QUALITY,
- * and #VPX_DL_BEST_QUALITY.
- *
- * When the last frame has been passed to the encoder, this function should
- * continue to be called, with the img parameter set to NULL. This will
- * signal the end-of-stream condition to the encoder and allow it to encode
- * any held buffers. Encoding is complete when vpx_codec_encode() is called
- * and vpx_codec_get_cx_data() returns no data.
- *
- * \param[in] ctx Pointer to this instance's context
- * \param[in] img Image data to encode, NULL to flush.
- * \param[in] pts Presentation time stamp, in timebase units.
- * \param[in] duration Duration to show frame, in timebase units.
- * \param[in] flags Flags to use for encoding this frame.
- * \param[in] deadline Time to spend encoding, in microseconds. (0=infinite)
- *
- * \retval #VPX_CODEC_OK
- * The configuration was populated.
- * \retval #VPX_CODEC_INCAPABLE
- * Interface is not an encoder interface.
- * \retval #VPX_CODEC_INVALID_PARAM
- * A parameter was NULL, the image format is unsupported, etc.
+ /*
+ * quantizer settings
*/
- vpx_codec_err_t vpx_codec_encode(vpx_codec_ctx_t *ctx,
- const vpx_image_t *img,
- vpx_codec_pts_t pts,
- unsigned long duration,
- vpx_enc_frame_flags_t flags,
- unsigned long deadline);
- /*!\brief Set compressed data output buffer
+
+ /*!\brief Minimum (Best Quality) Quantizer
*
- * Sets the buffer that the codec should output the compressed data
- * into. This call effectively sets the buffer pointer returned in the
- * next VPX_CODEC_CX_FRAME_PKT packet. Subsequent packets will be
- * appended into this buffer. The buffer is preserved across frames,
- * so applications must periodically call this function after flushing
- * the accumulated compressed data to disk or to the network to reset
- * the pointer to the buffer's head.
+ * The quantizer is the most direct control over the quality of the
+ * encoded image. The range of valid values for the quantizer is codec
+ * specific. Consult the documentation for the codec to determine the
+ * values to use. To determine the range programmatically, call
+ * vpx_codec_enc_config_default() with a usage value of 0.
+ */
+ unsigned int rc_min_quantizer;
+
+
+ /*!\brief Maximum (Worst Quality) Quantizer
*
- * `pad_before` bytes will be skipped before writing the compressed
- * data, and `pad_after` bytes will be appended to the packet. The size
- * of the packet will be the sum of the size of the actual compressed
- * data, pad_before, and pad_after. The padding bytes will be preserved
- * (not overwritten).
+ * The quantizer is the most direct control over the quality of the
+ * encoded image. The range of valid values for the quantizer is codec
+ * specific. Consult the documentation for the codec to determine the
+ * values to use. To determine the range programmatically, call
+ * vpx_codec_enc_config_default() with a usage value of 0.
+ */
+ unsigned int rc_max_quantizer;
+
+
+ /*
+ * bitrate tolerance
+ */
+
+
+ /*!\brief Rate control adaptation undershoot control
*
- * Note that calling this function does not guarantee that the returned
- * compressed data will be placed into the specified buffer. In the
- * event that the encoded data will not fit into the buffer provided,
- * the returned packet \ref MAY point to an internal buffer, as it would
- * if this call were never used. In this event, the output packet will
- * NOT have any padding, and the application must free space and copy it
- * to the proper place. This is of particular note in configurations
- * that may output multiple packets for a single encoded frame (e.g., lagged
- * encoding) or if the application does not reset the buffer periodically.
+ * This value, expressed as a percentage of the target bitrate,
+ * controls the maximum allowed adaptation speed of the codec.
+ * This factor controls the maximum amount of bits that can
+ * be subtracted from the target bitrate in order to compensate
+ * for prior overshoot.
*
- * Applications may restore the default behavior of the codec providing
- * the compressed data buffer by calling this function with a NULL
- * buffer.
+ * Valid values in the range 0-1000.
+ */
+ unsigned int rc_undershoot_pct;
+
+
+ /*!\brief Rate control adaptation overshoot control
*
- * Applications \ref MUSTNOT call this function during iteration of
- * vpx_codec_get_cx_data().
+ * This value, expressed as a percentage of the target bitrate,
+ * controls the maximum allowed adaptation speed of the codec.
+ * This factor controls the maximum amount of bits that can
+ * be added to the target bitrate in order to compensate for
+ * prior undershoot.
*
- * \param[in] ctx Pointer to this instance's context
- * \param[in] buf Buffer to store compressed data into
- * \param[in] pad_before Bytes to skip before writing compressed data
- * \param[in] pad_after Bytes to skip after writing compressed data
+ * Valid values in the range 0-1000.
+ */
+ unsigned int rc_overshoot_pct;
+
+
+ /*
+ * decoder buffer model parameters
+ */
+
+
+ /*!\brief Decoder Buffer Size
*
- * \retval #VPX_CODEC_OK
- * The buffer was set successfully.
- * \retval #VPX_CODEC_INVALID_PARAM
- * A parameter was NULL, the image format is unsupported, etc.
+ * This value indicates the amount of data that may be buffered by the
+ * decoding application. Note that this value is expressed in units of
+ * time (milliseconds). For example, a value of 5000 indicates that the
+ * client will buffer (at least) 5000ms worth of encoded data. Use the
+ * target bitrate (#rc_target_bitrate) to convert to bits/bytes, if
+ * necessary.
*/
- vpx_codec_err_t vpx_codec_set_cx_data_buf(vpx_codec_ctx_t *ctx,
- const vpx_fixed_buf_t *buf,
- unsigned int pad_before,
- unsigned int pad_after);
+ unsigned int rc_buf_sz;
- /*!\brief Encoded data iterator
+ /*!\brief Decoder Buffer Initial Size
*
- * Iterates over a list of data packets to be passed from the encoder to the
- * application. The different kinds of packets available are enumerated in
- * #vpx_codec_cx_pkt_kind.
+ * This value indicates the amount of data that will be buffered by the
+ * decoding application prior to beginning playback. This value is
+ * expressed in units of time (milliseconds). Use the target bitrate
+ * (#rc_target_bitrate) to convert to bits/bytes, if necessary.
+ */
+ unsigned int rc_buf_initial_sz;
+
+
+ /*!\brief Decoder Buffer Optimal Size
*
- * #VPX_CODEC_CX_FRAME_PKT packets should be passed to the application's
- * muxer. Multiple compressed frames may be in the list.
- * #VPX_CODEC_STATS_PKT packets should be appended to a global buffer.
+ * This value indicates the amount of data that the encoder should try
+ * to maintain in the decoder's buffer. This value is expressed in units
+ * of time (milliseconds). Use the target bitrate (#rc_target_bitrate)
+ * to convert to bits/bytes, if necessary.
+ */
+ unsigned int rc_buf_optimal_sz;
+
+
+ /*
+ * 2 pass rate control parameters
+ */
+
+
+ /*!\brief Two-pass mode CBR/VBR bias
*
- * The application \ref MUST silently ignore any packet kinds that it does
- * not recognize or support.
+ * Bias, expressed on a scale of 0 to 100, for determining target size
+ * for the current frame. The value 0 indicates the optimal CBR mode
+ * value should be used. The value 100 indicates the optimal VBR mode
+ * value should be used. Values in between indicate which way the
+ * encoder should "lean."
+ */
+ unsigned int rc_2pass_vbr_bias_pct; /**< RC mode bias between CBR and VBR(0-100: 0->CBR, 100->VBR) */
+
+
+ /*!\brief Two-pass mode per-GOP minimum bitrate
*
- * The data buffers returned from this function are only guaranteed to be
- * valid until the application makes another call to any vpx_codec_* function.
+ * This value, expressed as a percentage of the target bitrate, indicates
+ * the minimum bitrate to be used for a single GOP (aka "section")
+ */
+ unsigned int rc_2pass_vbr_minsection_pct;
+
+
+ /*!\brief Two-pass mode per-GOP maximum bitrate
*
- * \param[in] ctx Pointer to this instance's context
- * \param[in,out] iter Iterator storage, initialized to NULL
+ * This value, expressed as a percentage of the target bitrate, indicates
+ * the maximum bitrate to be used for a single GOP (aka "section")
+ */
+ unsigned int rc_2pass_vbr_maxsection_pct;
+
+
+ /*
+ * keyframing settings (kf)
+ */
+
+ /*!\brief Keyframe placement mode
*
- * \return Returns a pointer to an output data packet (compressed frame data,
- * two-pass statistics, etc.) or NULL to signal end-of-list.
+ * This value indicates whether the encoder should place keyframes at a
+ * fixed interval, or determine the optimal placement automatically
+ * (as governed by the #kf_min_dist and #kf_max_dist parameters)
+ */
+ enum vpx_kf_mode kf_mode;
+
+
+ /*!\brief Keyframe minimum interval
*
+ * This value, expressed as a number of frames, prevents the encoder from
+ * placing a keyframe nearer than kf_min_dist to the previous keyframe. At
+ * least kf_min_dist frames non-keyframes will be coded before the next
+ * keyframe. Set kf_min_dist equal to kf_max_dist for a fixed interval.
*/
- const vpx_codec_cx_pkt_t *vpx_codec_get_cx_data(vpx_codec_ctx_t *ctx,
- vpx_codec_iter_t *iter);
+ unsigned int kf_min_dist;
- /*!\brief Get Preview Frame
+ /*!\brief Keyframe maximum interval
*
- * Returns an image that can be used as a preview. Shows the image as it would
- * exist at the decompressor. The application \ref MUST NOT write into this
- * image buffer.
+ * This value, expressed as a number of frames, forces the encoder to code
+ * a keyframe if one has not been coded in the last kf_max_dist frames.
+ * A value of 0 implies all frames will be keyframes. Set kf_min_dist
+ * equal to kf_max_dist for a fixed interval.
+ */
+ unsigned int kf_max_dist;
+
+ /*
+ * Temporal scalability settings (ts)
+ */
+
+ /*!\brief Number of coding layers
*
- * \param[in] ctx Pointer to this instance's context
+ * This value specifies the number of coding layers to be used.
+ */
+ unsigned int ts_number_layers;
+
+ /*!\brief Target bitrate for each layer
+ *
+ * These values specify the target coding bitrate for each coding layer.
+ */
+ unsigned int ts_target_bitrate[VPX_TS_MAX_LAYERS];
+
+ /*!\brief Frame rate decimation factor for each layer
+ *
+ * These values specify the frame rate decimation factors to apply
+ * to each layer.
+ */
+ unsigned int ts_rate_decimator[VPX_TS_MAX_LAYERS];
+
+ /*!\brief Length of the sequence defining frame layer membership
*
- * \return Returns a pointer to a preview image, or NULL if no image is
- * available.
+ * This value specifies the length of the sequence that defines the
+ * membership of frames to layers. For example, if ts_periodicity=8 then
+ * frames are assigned to coding layers with a repeated sequence of
+ * length 8.
+ */
+ unsigned int ts_periodicity;
+
+ /*!\brief Template defining the membership of frames to coding layers
*
+ * This array defines the membership of frames to coding layers. For a
+ * 2-layer encoding that assigns even numbered frames to one layer (0)
+ * and odd numbered frames to a second layer (1) with ts_periodicity=8,
+ * then ts_layer_id = (0,1,0,1,0,1,0,1).
*/
- const vpx_image_t *vpx_codec_get_preview_frame(vpx_codec_ctx_t *ctx);
+ unsigned int ts_layer_id[VPX_TS_MAX_PERIODICITY];
+ } vpx_codec_enc_cfg_t; /**< alias for struct vpx_codec_enc_cfg */
+
+
+ /*!\brief Initialize an encoder instance
+ *
+ * Initializes a encoder context using the given interface. Applications
+ * should call the vpx_codec_enc_init convenience macro instead of this
+ * function directly, to ensure that the ABI version number parameter
+ * is properly initialized.
+ *
+ * If the library was configured with --disable-multithread, this call
+ * is not thread safe and should be guarded with a lock if being used
+ * in a multithreaded context.
+ *
+ * In XMA mode (activated by setting VPX_CODEC_USE_XMA in the flags
+ * parameter), the storage pointed to by the cfg parameter must be
+ * kept readable and stable until all memory maps have been set.
+ *
+ * \param[in] ctx Pointer to this instance's context.
+ * \param[in] iface Pointer to the algorithm interface to use.
+ * \param[in] cfg Configuration to use, if known. May be NULL.
+ * \param[in] flags Bitfield of VPX_CODEC_USE_* flags
+ * \param[in] ver ABI version number. Must be set to
+ * VPX_ENCODER_ABI_VERSION
+ * \retval #VPX_CODEC_OK
+ * The decoder algorithm initialized.
+ * \retval #VPX_CODEC_MEM_ERROR
+ * Memory allocation failed.
+ */
+ vpx_codec_err_t vpx_codec_enc_init_ver(vpx_codec_ctx_t *ctx,
+ vpx_codec_iface_t *iface,
+ vpx_codec_enc_cfg_t *cfg,
+ vpx_codec_flags_t flags,
+ int ver);
+
+
+ /*!\brief Convenience macro for vpx_codec_enc_init_ver()
+ *
+ * Ensures the ABI version parameter is properly set.
+ */
+#define vpx_codec_enc_init(ctx, iface, cfg, flags) \
+ vpx_codec_enc_init_ver(ctx, iface, cfg, flags, VPX_ENCODER_ABI_VERSION)
+
+
+ /*!\brief Initialize multi-encoder instance
+ *
+ * Initializes multi-encoder context using the given interface.
+ * Applications should call the vpx_codec_enc_init_multi convenience macro
+ * instead of this function directly, to ensure that the ABI version number
+ * parameter is properly initialized.
+ *
+ * In XMA mode (activated by setting VPX_CODEC_USE_XMA in the flags
+ * parameter), the storage pointed to by the cfg parameter must be
+ * kept readable and stable until all memory maps have been set.
+ *
+ * \param[in] ctx Pointer to this instance's context.
+ * \param[in] iface Pointer to the algorithm interface to use.
+ * \param[in] cfg Configuration to use, if known. May be NULL.
+ * \param[in] num_enc Total number of encoders.
+ * \param[in] flags Bitfield of VPX_CODEC_USE_* flags
+ * \param[in] dsf Pointer to down-sampling factors.
+ * \param[in] ver ABI version number. Must be set to
+ * VPX_ENCODER_ABI_VERSION
+ * \retval #VPX_CODEC_OK
+ * The decoder algorithm initialized.
+ * \retval #VPX_CODEC_MEM_ERROR
+ * Memory allocation failed.
+ */
+ vpx_codec_err_t vpx_codec_enc_init_multi_ver(vpx_codec_ctx_t *ctx,
+ vpx_codec_iface_t *iface,
+ vpx_codec_enc_cfg_t *cfg,
+ int num_enc,
+ vpx_codec_flags_t flags,
+ vpx_rational_t *dsf,
+ int ver);
+
+
+ /*!\brief Convenience macro for vpx_codec_enc_init_multi_ver()
+ *
+ * Ensures the ABI version parameter is properly set.
+ */
+#define vpx_codec_enc_init_multi(ctx, iface, cfg, num_enc, flags, dsf) \
+ vpx_codec_enc_init_multi_ver(ctx, iface, cfg, num_enc, flags, dsf, \
+ VPX_ENCODER_ABI_VERSION)
+
+
+ /*!\brief Get a default configuration
+ *
+ * Initializes a encoder configuration structure with default values. Supports
+ * the notion of "usages" so that an algorithm may offer different default
+ * settings depending on the user's intended goal. This function \ref SHOULD
+ * be called by all applications to initialize the configuration structure
+ * before specializing the configuration with application specific values.
+ *
+ * \param[in] iface Pointer to the algorithm interface to use.
+ * \param[out] cfg Configuration buffer to populate
+ * \param[in] usage End usage. Set to 0 or use codec specific values.
+ *
+ * \retval #VPX_CODEC_OK
+ * The configuration was populated.
+ * \retval #VPX_CODEC_INCAPABLE
+ * Interface is not an encoder interface.
+ * \retval #VPX_CODEC_INVALID_PARAM
+ * A parameter was NULL, or the usage value was not recognized.
+ */
+ vpx_codec_err_t vpx_codec_enc_config_default(vpx_codec_iface_t *iface,
+ vpx_codec_enc_cfg_t *cfg,
+ unsigned int usage);
+
+
+ /*!\brief Set or change configuration
+ *
+ * Reconfigures an encoder instance according to the given configuration.
+ *
+ * \param[in] ctx Pointer to this instance's context
+ * \param[in] cfg Configuration buffer to use
+ *
+ * \retval #VPX_CODEC_OK
+ * The configuration was populated.
+ * \retval #VPX_CODEC_INCAPABLE
+ * Interface is not an encoder interface.
+ * \retval #VPX_CODEC_INVALID_PARAM
+ * A parameter was NULL, or the usage value was not recognized.
+ */
+ vpx_codec_err_t vpx_codec_enc_config_set(vpx_codec_ctx_t *ctx,
+ const vpx_codec_enc_cfg_t *cfg);
+
+
+ /*!\brief Get global stream headers
+ *
+ * Retrieves a stream level global header packet, if supported by the codec.
+ *
+ * \param[in] ctx Pointer to this instance's context
+ *
+ * \retval NULL
+ * Encoder does not support global header
+ * \retval Non-NULL
+ * Pointer to buffer containing global header packet
+ */
+ vpx_fixed_buf_t *vpx_codec_get_global_headers(vpx_codec_ctx_t *ctx);
- /*!@} - end defgroup encoder*/
+#define VPX_DL_REALTIME (1) /**< deadline parameter analogous to
+ * VPx REALTIME mode. */
+#define VPX_DL_GOOD_QUALITY (1000000) /**< deadline parameter analogous to
+ * VPx GOOD QUALITY mode. */
+#define VPX_DL_BEST_QUALITY (0) /**< deadline parameter analogous to
+ * VPx BEST QUALITY mode. */
+ /*!\brief Encode a frame
+ *
+ * Encodes a video frame at the given "presentation time." The presentation
+ * time stamp (PTS) \ref MUST be strictly increasing.
+ *
+ * The encoder supports the notion of a soft real-time deadline. Given a
+ * non-zero value to the deadline parameter, the encoder will make a "best
+ * effort" guarantee to return before the given time slice expires. It is
+ * implicit that limiting the available time to encode will degrade the
+ * output quality. The encoder can be given an unlimited time to produce the
+ * best possible frame by specifying a deadline of '0'. This deadline
+ * supercedes the VPx notion of "best quality, good quality, realtime".
+ * Applications that wish to map these former settings to the new deadline
+ * based system can use the symbols #VPX_DL_REALTIME, #VPX_DL_GOOD_QUALITY,
+ * and #VPX_DL_BEST_QUALITY.
+ *
+ * When the last frame has been passed to the encoder, this function should
+ * continue to be called, with the img parameter set to NULL. This will
+ * signal the end-of-stream condition to the encoder and allow it to encode
+ * any held buffers. Encoding is complete when vpx_codec_encode() is called
+ * and vpx_codec_get_cx_data() returns no data.
+ *
+ * \param[in] ctx Pointer to this instance's context
+ * \param[in] img Image data to encode, NULL to flush.
+ * \param[in] pts Presentation time stamp, in timebase units.
+ * \param[in] duration Duration to show frame, in timebase units.
+ * \param[in] flags Flags to use for encoding this frame.
+ * \param[in] deadline Time to spend encoding, in microseconds. (0=infinite)
+ *
+ * \retval #VPX_CODEC_OK
+ * The configuration was populated.
+ * \retval #VPX_CODEC_INCAPABLE
+ * Interface is not an encoder interface.
+ * \retval #VPX_CODEC_INVALID_PARAM
+ * A parameter was NULL, the image format is unsupported, etc.
+ */
+ vpx_codec_err_t vpx_codec_encode(vpx_codec_ctx_t *ctx,
+ const vpx_image_t *img,
+ vpx_codec_pts_t pts,
+ unsigned long duration,
+ vpx_enc_frame_flags_t flags,
+ unsigned long deadline);
+
+ /*!\brief Set compressed data output buffer
+ *
+ * Sets the buffer that the codec should output the compressed data
+ * into. This call effectively sets the buffer pointer returned in the
+ * next VPX_CODEC_CX_FRAME_PKT packet. Subsequent packets will be
+ * appended into this buffer. The buffer is preserved across frames,
+ * so applications must periodically call this function after flushing
+ * the accumulated compressed data to disk or to the network to reset
+ * the pointer to the buffer's head.
+ *
+ * `pad_before` bytes will be skipped before writing the compressed
+ * data, and `pad_after` bytes will be appended to the packet. The size
+ * of the packet will be the sum of the size of the actual compressed
+ * data, pad_before, and pad_after. The padding bytes will be preserved
+ * (not overwritten).
+ *
+ * Note that calling this function does not guarantee that the returned
+ * compressed data will be placed into the specified buffer. In the
+ * event that the encoded data will not fit into the buffer provided,
+ * the returned packet \ref MAY point to an internal buffer, as it would
+ * if this call were never used. In this event, the output packet will
+ * NOT have any padding, and the application must free space and copy it
+ * to the proper place. This is of particular note in configurations
+ * that may output multiple packets for a single encoded frame (e.g., lagged
+ * encoding) or if the application does not reset the buffer periodically.
+ *
+ * Applications may restore the default behavior of the codec providing
+ * the compressed data buffer by calling this function with a NULL
+ * buffer.
+ *
+ * Applications \ref MUSTNOT call this function during iteration of
+ * vpx_codec_get_cx_data().
+ *
+ * \param[in] ctx Pointer to this instance's context
+ * \param[in] buf Buffer to store compressed data into
+ * \param[in] pad_before Bytes to skip before writing compressed data
+ * \param[in] pad_after Bytes to skip after writing compressed data
+ *
+ * \retval #VPX_CODEC_OK
+ * The buffer was set successfully.
+ * \retval #VPX_CODEC_INVALID_PARAM
+ * A parameter was NULL, the image format is unsupported, etc.
+ */
+ vpx_codec_err_t vpx_codec_set_cx_data_buf(vpx_codec_ctx_t *ctx,
+ const vpx_fixed_buf_t *buf,
+ unsigned int pad_before,
+ unsigned int pad_after);
+
+
+ /*!\brief Encoded data iterator
+ *
+ * Iterates over a list of data packets to be passed from the encoder to the
+ * application. The different kinds of packets available are enumerated in
+ * #vpx_codec_cx_pkt_kind.
+ *
+ * #VPX_CODEC_CX_FRAME_PKT packets should be passed to the application's
+ * muxer. Multiple compressed frames may be in the list.
+ * #VPX_CODEC_STATS_PKT packets should be appended to a global buffer.
+ *
+ * The application \ref MUST silently ignore any packet kinds that it does
+ * not recognize or support.
+ *
+ * The data buffers returned from this function are only guaranteed to be
+ * valid until the application makes another call to any vpx_codec_* function.
+ *
+ * \param[in] ctx Pointer to this instance's context
+ * \param[in,out] iter Iterator storage, initialized to NULL
+ *
+ * \return Returns a pointer to an output data packet (compressed frame data,
+ * two-pass statistics, etc.) or NULL to signal end-of-list.
+ *
+ */
+ const vpx_codec_cx_pkt_t *vpx_codec_get_cx_data(vpx_codec_ctx_t *ctx,
+ vpx_codec_iter_t *iter);
+
+
+ /*!\brief Get Preview Frame
+ *
+ * Returns an image that can be used as a preview. Shows the image as it would
+ * exist at the decompressor. The application \ref MUST NOT write into this
+ * image buffer.
+ *
+ * \param[in] ctx Pointer to this instance's context
+ *
+ * \return Returns a pointer to a preview image, or NULL if no image is
+ * available.
+ *
+ */
+ const vpx_image_t *vpx_codec_get_preview_frame(vpx_codec_ctx_t *ctx);
+
+
+ /*!@} - end defgroup encoder*/
#endif
#ifdef __cplusplus
diff --git a/libvpx/vpx/vpx_image.h b/libvpx/vpx/vpx_image.h
index 3e42447..c304bac 100644
--- a/libvpx/vpx/vpx_image.h
+++ b/libvpx/vpx/vpx_image.h
@@ -20,14 +20,14 @@ extern "C" {
#ifndef VPX_IMAGE_H
#define VPX_IMAGE_H
- /*!\brief Current ABI version number
- *
- * \internal
- * If this file is altered in any way that changes the ABI, this value
- * must be bumped. Examples include, but are not limited to, changing
- * types, removing or reassigning enums, adding/removing/rearranging
- * fields to structures
- */
+ /*!\brief Current ABI version number
+ *
+ * \internal
+ * If this file is altered in any way that changes the ABI, this value
+ * must be bumped. Examples include, but are not limited to, changing
+ * types, removing or reassigning enums, adding/removing/rearranging
+ * fields to structures
+ */
#define VPX_IMAGE_ABI_VERSION (1) /**<\hideinitializer*/
@@ -36,41 +36,43 @@ extern "C" {
#define VPX_IMG_FMT_HAS_ALPHA 0x400 /**< Image has an alpha channel component */
- /*!\brief List of supported image formats */
- typedef enum vpx_img_fmt {
- VPX_IMG_FMT_NONE,
- VPX_IMG_FMT_RGB24, /**< 24 bit per pixel packed RGB */
- VPX_IMG_FMT_RGB32, /**< 32 bit per pixel packed 0RGB */
- VPX_IMG_FMT_RGB565, /**< 16 bit per pixel, 565 */
- VPX_IMG_FMT_RGB555, /**< 16 bit per pixel, 555 */
- VPX_IMG_FMT_UYVY, /**< UYVY packed YUV */
- VPX_IMG_FMT_YUY2, /**< YUYV packed YUV */
- VPX_IMG_FMT_YVYU, /**< YVYU packed YUV */
- VPX_IMG_FMT_BGR24, /**< 24 bit per pixel packed BGR */
- VPX_IMG_FMT_RGB32_LE, /**< 32 bit packed BGR0 */
- VPX_IMG_FMT_ARGB, /**< 32 bit packed ARGB, alpha=255 */
- VPX_IMG_FMT_ARGB_LE, /**< 32 bit packed BGRA, alpha=255 */
- VPX_IMG_FMT_RGB565_LE, /**< 16 bit per pixel, gggbbbbb rrrrrggg */
- VPX_IMG_FMT_RGB555_LE, /**< 16 bit per pixel, gggbbbbb 0rrrrrgg */
- VPX_IMG_FMT_YV12 = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_UV_FLIP | 1, /**< planar YVU */
- VPX_IMG_FMT_I420 = VPX_IMG_FMT_PLANAR | 2,
- VPX_IMG_FMT_VPXYV12 = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_UV_FLIP | 3, /** < planar 4:2:0 format with vpx color space */
- VPX_IMG_FMT_VPXI420 = VPX_IMG_FMT_PLANAR | 4 /** < planar 4:2:0 format with vpx color space */
- }
- vpx_img_fmt_t; /**< alias for enum vpx_img_fmt */
+ /*!\brief List of supported image formats */
+ typedef enum vpx_img_fmt {
+ VPX_IMG_FMT_NONE,
+ VPX_IMG_FMT_RGB24, /**< 24 bit per pixel packed RGB */
+ VPX_IMG_FMT_RGB32, /**< 32 bit per pixel packed 0RGB */
+ VPX_IMG_FMT_RGB565, /**< 16 bit per pixel, 565 */
+ VPX_IMG_FMT_RGB555, /**< 16 bit per pixel, 555 */
+ VPX_IMG_FMT_UYVY, /**< UYVY packed YUV */
+ VPX_IMG_FMT_YUY2, /**< YUYV packed YUV */
+ VPX_IMG_FMT_YVYU, /**< YVYU packed YUV */
+ VPX_IMG_FMT_BGR24, /**< 24 bit per pixel packed BGR */
+ VPX_IMG_FMT_RGB32_LE, /**< 32 bit packed BGR0 */
+ VPX_IMG_FMT_ARGB, /**< 32 bit packed ARGB, alpha=255 */
+ VPX_IMG_FMT_ARGB_LE, /**< 32 bit packed BGRA, alpha=255 */
+ VPX_IMG_FMT_RGB565_LE, /**< 16 bit per pixel, gggbbbbb rrrrrggg */
+ VPX_IMG_FMT_RGB555_LE, /**< 16 bit per pixel, gggbbbbb 0rrrrrgg */
+ VPX_IMG_FMT_YV12 = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_UV_FLIP | 1, /**< planar YVU */
+ VPX_IMG_FMT_I420 = VPX_IMG_FMT_PLANAR | 2,
+ VPX_IMG_FMT_VPXYV12 = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_UV_FLIP | 3, /** < planar 4:2:0 format with vpx color space */
+ VPX_IMG_FMT_VPXI420 = VPX_IMG_FMT_PLANAR | 4,
+ VPX_IMG_FMT_I422 = VPX_IMG_FMT_PLANAR | 5,
+ VPX_IMG_FMT_I444 = VPX_IMG_FMT_PLANAR | 6,
+ VPX_IMG_FMT_444A = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_HAS_ALPHA | 7
+ } vpx_img_fmt_t; /**< alias for enum vpx_img_fmt */
#if !defined(VPX_CODEC_DISABLE_COMPAT) || !VPX_CODEC_DISABLE_COMPAT
#define IMG_FMT_PLANAR VPX_IMG_FMT_PLANAR /**< \deprecated Use #VPX_IMG_FMT_PLANAR */
#define IMG_FMT_UV_FLIP VPX_IMG_FMT_UV_FLIP /**< \deprecated Use #VPX_IMG_FMT_UV_FLIP */
#define IMG_FMT_HAS_ALPHA VPX_IMG_FMT_HAS_ALPHA /**< \deprecated Use #VPX_IMG_FMT_HAS_ALPHA */
- /*!\brief Deprecated list of supported image formats
- * \deprecated New code should use #vpx_img_fmt
- */
+ /*!\brief Deprecated list of supported image formats
+ * \deprecated New code should use #vpx_img_fmt
+ */
#define img_fmt vpx_img_fmt
- /*!\brief alias for enum img_fmt.
- * \deprecated New code should use #vpx_img_fmt_t
- */
+ /*!\brief alias for enum img_fmt.
+ * \deprecated New code should use #vpx_img_fmt_t
+ */
#define img_fmt_t vpx_img_fmt_t
#define IMG_FMT_NONE VPX_IMG_FMT_NONE /**< \deprecated Use #VPX_IMG_FMT_NONE */
@@ -93,24 +95,23 @@ extern "C" {
#define IMG_FMT_VPXI420 VPX_IMG_FMT_VPXI420 /**< \deprecated Use #VPX_IMG_FMT_VPXI420 */
#endif /* VPX_CODEC_DISABLE_COMPAT */
- /**\brief Image Descriptor */
- typedef struct vpx_image
- {
- vpx_img_fmt_t fmt; /**< Image Format */
+ /**\brief Image Descriptor */
+ typedef struct vpx_image {
+ vpx_img_fmt_t fmt; /**< Image Format */
- /* Image storage dimensions */
- unsigned int w; /**< Stored image width */
- unsigned int h; /**< Stored image height */
+ /* Image storage dimensions */
+ unsigned int w; /**< Stored image width */
+ unsigned int h; /**< Stored image height */
- /* Image display dimensions */
- unsigned int d_w; /**< Displayed image width */
- unsigned int d_h; /**< Displayed image height */
+ /* Image display dimensions */
+ unsigned int d_w; /**< Displayed image width */
+ unsigned int d_h; /**< Displayed image height */
- /* Chroma subsampling info */
- unsigned int x_chroma_shift; /**< subsampling order, X */
- unsigned int y_chroma_shift; /**< subsampling order, Y */
+ /* Chroma subsampling info */
+ unsigned int x_chroma_shift; /**< subsampling order, X */
+ unsigned int y_chroma_shift; /**< subsampling order, Y */
- /* Image data pointers. */
+ /* Image data pointers. */
#define VPX_PLANE_PACKED 0 /**< To be used for all packed formats */
#define VPX_PLANE_Y 0 /**< Y (Luminance) plane */
#define VPX_PLANE_U 1 /**< U (Chroma) plane */
@@ -123,119 +124,118 @@ extern "C" {
#define PLANE_V VPX_PLANE_V
#define PLANE_ALPHA VPX_PLANE_ALPHA
#endif
- unsigned char *planes[4]; /**< pointer to the top left pixel for each plane */
- int stride[4]; /**< stride between rows for each plane */
+ unsigned char *planes[4]; /**< pointer to the top left pixel for each plane */
+ int stride[4]; /**< stride between rows for each plane */
- int bps; /**< bits per sample (for packed formats) */
-
- /* The following member may be set by the application to associate data
- * with this image.
- */
- void *user_priv; /**< may be set by the application to associate data
- * with this image. */
+ int bps; /**< bits per sample (for packed formats) */
- /* The following members should be treated as private. */
- unsigned char *img_data; /**< private */
- int img_data_owner; /**< private */
- int self_allocd; /**< private */
- } vpx_image_t; /**< alias for struct vpx_image */
-
- /**\brief Representation of a rectangle on a surface */
- typedef struct vpx_image_rect
- {
- unsigned int x; /**< leftmost column */
- unsigned int y; /**< topmost row */
- unsigned int w; /**< width */
- unsigned int h; /**< height */
- } vpx_image_rect_t; /**< alias for struct vpx_image_rect */
-
- /*!\brief Open a descriptor, allocating storage for the underlying image
- *
- * Returns a descriptor for storing an image of the given format. The
- * storage for the descriptor is allocated on the heap.
- *
- * \param[in] img Pointer to storage for descriptor. If this parameter
- * is NULL, the storage for the descriptor will be
- * allocated on the heap.
- * \param[in] fmt Format for the image
- * \param[in] d_w Width of the image
- * \param[in] d_h Height of the image
- * \param[in] align Alignment, in bytes, of the image buffer and
- * each row in the image(stride).
- *
- * \return Returns a pointer to the initialized image descriptor. If the img
- * parameter is non-null, the value of the img parameter will be
- * returned.
+ /* The following member may be set by the application to associate data
+ * with this image.
*/
- vpx_image_t *vpx_img_alloc(vpx_image_t *img,
- vpx_img_fmt_t fmt,
- unsigned int d_w,
- unsigned int d_h,
- unsigned int align);
-
- /*!\brief Open a descriptor, using existing storage for the underlying image
- *
- * Returns a descriptor for storing an image of the given format. The
- * storage for descriptor has been allocated elsewhere, and a descriptor is
- * desired to "wrap" that storage.
- *
- * \param[in] img Pointer to storage for descriptor. If this parameter
- * is NULL, the storage for the descriptor will be
- * allocated on the heap.
- * \param[in] fmt Format for the image
- * \param[in] d_w Width of the image
- * \param[in] d_h Height of the image
- * \param[in] align Alignment, in bytes, of each row in the image.
- * \param[in] img_data Storage to use for the image
- *
- * \return Returns a pointer to the initialized image descriptor. If the img
- * parameter is non-null, the value of the img parameter will be
- * returned.
- */
- vpx_image_t *vpx_img_wrap(vpx_image_t *img,
- vpx_img_fmt_t fmt,
- unsigned int d_w,
- unsigned int d_h,
- unsigned int align,
- unsigned char *img_data);
-
-
- /*!\brief Set the rectangle identifying the displayed portion of the image
- *
- * Updates the displayed rectangle (aka viewport) on the image surface to
- * match the specified coordinates and size.
- *
- * \param[in] img Image descriptor
- * \param[in] x leftmost column
- * \param[in] y topmost row
- * \param[in] w width
- * \param[in] h height
- *
- * \return 0 if the requested rectangle is valid, nonzero otherwise.
- */
- int vpx_img_set_rect(vpx_image_t *img,
- unsigned int x,
- unsigned int y,
- unsigned int w,
- unsigned int h);
-
-
- /*!\brief Flip the image vertically (top for bottom)
- *
- * Adjusts the image descriptor's pointers and strides to make the image
- * be referenced upside-down.
- *
- * \param[in] img Image descriptor
- */
- void vpx_img_flip(vpx_image_t *img);
+ void *user_priv; /**< may be set by the application to associate data
+ * with this image. */
- /*!\brief Close an image descriptor
- *
- * Frees all allocated storage associated with an image descriptor.
- *
- * \param[in] img Image descriptor
- */
- void vpx_img_free(vpx_image_t *img);
+ /* The following members should be treated as private. */
+ unsigned char *img_data; /**< private */
+ int img_data_owner; /**< private */
+ int self_allocd; /**< private */
+ } vpx_image_t; /**< alias for struct vpx_image */
+
+ /**\brief Representation of a rectangle on a surface */
+ typedef struct vpx_image_rect {
+ unsigned int x; /**< leftmost column */
+ unsigned int y; /**< topmost row */
+ unsigned int w; /**< width */
+ unsigned int h; /**< height */
+ } vpx_image_rect_t; /**< alias for struct vpx_image_rect */
+
+ /*!\brief Open a descriptor, allocating storage for the underlying image
+ *
+ * Returns a descriptor for storing an image of the given format. The
+ * storage for the descriptor is allocated on the heap.
+ *
+ * \param[in] img Pointer to storage for descriptor. If this parameter
+ * is NULL, the storage for the descriptor will be
+ * allocated on the heap.
+ * \param[in] fmt Format for the image
+ * \param[in] d_w Width of the image
+ * \param[in] d_h Height of the image
+ * \param[in] align Alignment, in bytes, of the image buffer and
+ * each row in the image(stride).
+ *
+ * \return Returns a pointer to the initialized image descriptor. If the img
+ * parameter is non-null, the value of the img parameter will be
+ * returned.
+ */
+ vpx_image_t *vpx_img_alloc(vpx_image_t *img,
+ vpx_img_fmt_t fmt,
+ unsigned int d_w,
+ unsigned int d_h,
+ unsigned int align);
+
+ /*!\brief Open a descriptor, using existing storage for the underlying image
+ *
+ * Returns a descriptor for storing an image of the given format. The
+ * storage for descriptor has been allocated elsewhere, and a descriptor is
+ * desired to "wrap" that storage.
+ *
+ * \param[in] img Pointer to storage for descriptor. If this parameter
+ * is NULL, the storage for the descriptor will be
+ * allocated on the heap.
+ * \param[in] fmt Format for the image
+ * \param[in] d_w Width of the image
+ * \param[in] d_h Height of the image
+ * \param[in] align Alignment, in bytes, of each row in the image.
+ * \param[in] img_data Storage to use for the image
+ *
+ * \return Returns a pointer to the initialized image descriptor. If the img
+ * parameter is non-null, the value of the img parameter will be
+ * returned.
+ */
+ vpx_image_t *vpx_img_wrap(vpx_image_t *img,
+ vpx_img_fmt_t fmt,
+ unsigned int d_w,
+ unsigned int d_h,
+ unsigned int align,
+ unsigned char *img_data);
+
+
+ /*!\brief Set the rectangle identifying the displayed portion of the image
+ *
+ * Updates the displayed rectangle (aka viewport) on the image surface to
+ * match the specified coordinates and size.
+ *
+ * \param[in] img Image descriptor
+ * \param[in] x leftmost column
+ * \param[in] y topmost row
+ * \param[in] w width
+ * \param[in] h height
+ *
+ * \return 0 if the requested rectangle is valid, nonzero otherwise.
+ */
+ int vpx_img_set_rect(vpx_image_t *img,
+ unsigned int x,
+ unsigned int y,
+ unsigned int w,
+ unsigned int h);
+
+
+ /*!\brief Flip the image vertically (top for bottom)
+ *
+ * Adjusts the image descriptor's pointers and strides to make the image
+ * be referenced upside-down.
+ *
+ * \param[in] img Image descriptor
+ */
+ void vpx_img_flip(vpx_image_t *img);
+
+ /*!\brief Close an image descriptor
+ *
+ * Frees all allocated storage associated with an image descriptor.
+ *
+ * \param[in] img Image descriptor
+ */
+ void vpx_img_free(vpx_image_t *img);
#endif
#ifdef __cplusplus
diff --git a/libvpx/vpx/vpx_integer.h b/libvpx/vpx/vpx_integer.h
index 218bca7..0ccc96c 100644
--- a/libvpx/vpx/vpx_integer.h
+++ b/libvpx/vpx/vpx_integer.h
@@ -27,6 +27,9 @@ typedef unsigned int uint32_t;
#if (defined(_MSC_VER) && (_MSC_VER < 1600))
typedef signed __int64 int64_t;
typedef unsigned __int64 uint64_t;
+#define INT64_MAX _I64_MAX
+#define INT16_MAX _I16_MAX
+#define INT16_MIN _I16_MIN
#endif
#ifndef _UINTPTR_T_DEFINED
diff --git a/libvpx/vpx_mem/include/vpx_mem_intrnl.h b/libvpx/vpx_mem/include/vpx_mem_intrnl.h
index 63c6b77..60b5165 100644
--- a/libvpx/vpx_mem/include/vpx_mem_intrnl.h
+++ b/libvpx/vpx_mem/include/vpx_mem_intrnl.h
@@ -11,7 +11,7 @@
#ifndef __VPX_MEM_INTRNL_H__
#define __VPX_MEM_INTRNL_H__
-#include "vpx_config.h"
+#include "./vpx_config.h"
#ifndef CONFIG_MEM_MANAGER
# if defined(VXWORKS)
@@ -47,8 +47,8 @@ vpx_memcpy, _memset, and _memmove*/
#ifndef DEFAULT_ALIGNMENT
# if defined(VXWORKS)
# define DEFAULT_ALIGNMENT 32 /*default addr alignment to use in
- calls to vpx_* functions other
- than vpx_memalign*/
+calls to vpx_* functions other
+than vpx_memalign*/
# else
# define DEFAULT_ALIGNMENT 1
# endif
@@ -60,9 +60,9 @@ vpx_memcpy, _memset, and _memmove*/
#if CONFIG_MEM_TRACKER
# define TRY_BOUNDS_CHECK 1 /*when set to 1 pads each allocation,
- integrity can be checked using
- vpx_memory_tracker_check_integrity
- or on free by defining*/
+integrity can be checked using
+vpx_memory_tracker_check_integrity
+or on free by defining*/
/*TRY_BOUNDS_CHECK_ON_FREE*/
#else
# define TRY_BOUNDS_CHECK 0
@@ -70,13 +70,13 @@ vpx_memcpy, _memset, and _memmove*/
#if TRY_BOUNDS_CHECK
# define TRY_BOUNDS_CHECK_ON_FREE 0 /*checks mem integrity on every
- free, very expensive*/
+free, very expensive*/
# define BOUNDS_CHECK_VALUE 0xdeadbeef /*value stored before/after ea.
- mem addr for bounds checking*/
+mem addr for bounds checking*/
# define BOUNDS_CHECK_PAD_SIZE 32 /*size of the padding before and
- after ea allocation to be filled
- with BOUNDS_CHECK_VALUE.
- this should be a multiple of 4*/
+after ea allocation to be filled
+with BOUNDS_CHECK_VALUE.
+this should be a multiple of 4*/
#else
# define BOUNDS_CHECK_VALUE 0
# define BOUNDS_CHECK_PAD_SIZE 0
diff --git a/libvpx/vpx_mem/include/vpx_mem_tracker.h b/libvpx/vpx_mem/include/vpx_mem_tracker.h
index ef2b29b..3be0d2d 100644
--- a/libvpx/vpx_mem/include/vpx_mem_tracker.h
+++ b/libvpx/vpx_mem/include/vpx_mem_tracker.h
@@ -23,158 +23,157 @@
#include <stdarg.h>
-struct mem_block
-{
- size_t addr;
- unsigned int size,
- line;
- char *file;
- struct mem_block *prev,
- * next;
-
- int padded; // This mem_block has padding for integrity checks.
- // As of right now, this should only be 0 if
- // using vpx_mem_alloc to allocate cache memory.
- // 2005-01-11 tjf
+struct mem_block {
+ size_t addr;
+ unsigned int size,
+ line;
+ char *file;
+ struct mem_block *prev,
+ * next;
+
+ int padded; // This mem_block has padding for integrity checks.
+ // As of right now, this should only be 0 if
+ // using vpx_mem_alloc to allocate cache memory.
+ // 2005-01-11 tjf
};
#if defined(__cplusplus)
extern "C" {
#endif
- /*
- vpx_memory_tracker_init(int padding_size, int pad_value)
- padding_size - the size of the padding before and after each mem addr.
- Values > 0 indicate that integrity checks can be performed
- by inspecting these areas.
- pad_value - the initial value within the padding area before and after
- each mem addr.
-
- Initializes the memory tracker interface. Should be called before any
- other calls to the memory tracker.
- */
- int vpx_memory_tracker_init(int padding_size, int pad_value);
-
- /*
- vpx_memory_tracker_destroy()
- Deinitializes the memory tracker interface
- */
- void vpx_memory_tracker_destroy();
-
- /*
- vpx_memory_tracker_add(size_t addr, unsigned int size,
- char * file, unsigned int line)
- addr - memory address to be added to list
- size - size of addr
- file - the file addr was referenced from
- line - the line in file addr was referenced from
- Adds memory address addr, it's size, file and line it came from
- to the memory tracker allocation table
- */
- void vpx_memory_tracker_add(size_t addr, unsigned int size,
- char *file, unsigned int line,
- int padded);
-
- /*
- vpx_memory_tracker_add(size_t addr, unsigned int size, char * file, unsigned int line)
- addr - memory address to be added to be removed
- padded - if 0, disables bounds checking on this memory block even if bounds
- checking is enabled. (for example, when allocating cache memory, we still want
- to check for memory leaks, but we do not waste cache space for bounds check padding)
- Removes the specified address from the memory tracker's allocation
- table
- Return:
- 0: on success
- -1: if memory allocation table's mutex could not be locked
- -2: if the addr was not found in the list
- */
- int vpx_memory_tracker_remove(size_t addr);
-
- /*
- vpx_memory_tracker_find(unsigned int addr)
- addr - address to be found in the memory tracker's
- allocation table
- Return:
- If found, pointer to the memory block that matches addr
- NULL otherwise
- */
- struct mem_block *vpx_memory_tracker_find(size_t addr);
-
- /*
- vpx_memory_tracker_dump()
- Dumps the current contents of the memory
- tracker allocation table
- */
- void vpx_memory_tracker_dump();
-
- /*
- vpx_memory_tracker_check_integrity()
- If a padding_size was provided to vpx_memory_tracker_init()
- This function will verify that the region before and after each
- memory address contains the specified pad_value. Should the check
- fail, the filename and line of the check will be printed out.
- */
- void vpx_memory_tracker_check_integrity(char *file, unsigned int line);
-
- /*
- vpx_memory_tracker_set_log_type
- type - value representing the logging type to use
- option - type specific option. This will be interpreted differently
- based on the type.
- Sets the logging type for the memory tracker.
- Values currently supported:
- 0: if option is NULL, log to stderr, otherwise interpret option as a
- filename and attempt to open it.
- 1: Use output_debug_string (WIN32 only), option ignored
- Return:
- 0: on success
- -1: if the logging type could not be set, because the value was invalid
- or because a file could not be opened
- */
- int vpx_memory_tracker_set_log_type(int type, char *option);
-
- /*
- vpx_memory_tracker_set_log_func
- userdata - ptr to be passed to the supplied logfunc, can be NULL
- logfunc - the logging function to be used to output data from
- vpx_memory_track_dump/check_integrity
- Sets a logging function to be used by the memory tracker.
- Return:
- 0: on success
- -1: if the logging type could not be set because logfunc was NULL
- */
- int vpx_memory_tracker_set_log_func(void *userdata,
- void(*logfunc)(void *userdata,
- const char *fmt, va_list args));
-
- /* Wrappers to standard library functions. */
- typedef void*(* mem_track_malloc_func)(size_t);
- typedef void*(* mem_track_calloc_func)(size_t, size_t);
- typedef void*(* mem_track_realloc_func)(void *, size_t);
- typedef void (* mem_track_free_func)(void *);
- typedef void*(* mem_track_memcpy_func)(void *, const void *, size_t);
- typedef void*(* mem_track_memset_func)(void *, int, size_t);
- typedef void*(* mem_track_memmove_func)(void *, const void *, size_t);
-
- /*
- vpx_memory_tracker_set_functions
-
- Sets the function pointers for the standard library functions.
-
- Return:
- 0: on success
- -1: if the use global function pointers is not set.
- */
- int vpx_memory_tracker_set_functions(mem_track_malloc_func g_malloc_l
- , mem_track_calloc_func g_calloc_l
- , mem_track_realloc_func g_realloc_l
- , mem_track_free_func g_free_l
- , mem_track_memcpy_func g_memcpy_l
- , mem_track_memset_func g_memset_l
- , mem_track_memmove_func g_memmove_l);
+ /*
+ vpx_memory_tracker_init(int padding_size, int pad_value)
+ padding_size - the size of the padding before and after each mem addr.
+ Values > 0 indicate that integrity checks can be performed
+ by inspecting these areas.
+ pad_value - the initial value within the padding area before and after
+ each mem addr.
+
+ Initializes the memory tracker interface. Should be called before any
+ other calls to the memory tracker.
+ */
+ int vpx_memory_tracker_init(int padding_size, int pad_value);
+
+ /*
+ vpx_memory_tracker_destroy()
+ Deinitializes the memory tracker interface
+ */
+ void vpx_memory_tracker_destroy();
+
+ /*
+ vpx_memory_tracker_add(size_t addr, unsigned int size,
+ char * file, unsigned int line)
+ addr - memory address to be added to list
+ size - size of addr
+ file - the file addr was referenced from
+ line - the line in file addr was referenced from
+ Adds memory address addr, it's size, file and line it came from
+ to the memory tracker allocation table
+ */
+ void vpx_memory_tracker_add(size_t addr, unsigned int size,
+ char *file, unsigned int line,
+ int padded);
+
+ /*
+ vpx_memory_tracker_add(size_t addr, unsigned int size, char * file, unsigned int line)
+ addr - memory address to be added to be removed
+ padded - if 0, disables bounds checking on this memory block even if bounds
+ checking is enabled. (for example, when allocating cache memory, we still want
+ to check for memory leaks, but we do not waste cache space for bounds check padding)
+ Removes the specified address from the memory tracker's allocation
+ table
+ Return:
+ 0: on success
+ -1: if memory allocation table's mutex could not be locked
+ -2: if the addr was not found in the list
+ */
+ int vpx_memory_tracker_remove(size_t addr);
+
+ /*
+ vpx_memory_tracker_find(unsigned int addr)
+ addr - address to be found in the memory tracker's
+ allocation table
+ Return:
+ If found, pointer to the memory block that matches addr
+ NULL otherwise
+ */
+ struct mem_block *vpx_memory_tracker_find(size_t addr);
+
+ /*
+ vpx_memory_tracker_dump()
+ Dumps the current contents of the memory
+ tracker allocation table
+ */
+ void vpx_memory_tracker_dump();
+
+ /*
+ vpx_memory_tracker_check_integrity()
+ If a padding_size was provided to vpx_memory_tracker_init()
+ This function will verify that the region before and after each
+ memory address contains the specified pad_value. Should the check
+ fail, the filename and line of the check will be printed out.
+ */
+ void vpx_memory_tracker_check_integrity(char *file, unsigned int line);
+
+ /*
+ vpx_memory_tracker_set_log_type
+ type - value representing the logging type to use
+ option - type specific option. This will be interpreted differently
+ based on the type.
+ Sets the logging type for the memory tracker.
+ Values currently supported:
+ 0: if option is NULL, log to stderr, otherwise interpret option as a
+ filename and attempt to open it.
+ 1: Use output_debug_string (WIN32 only), option ignored
+ Return:
+ 0: on success
+ -1: if the logging type could not be set, because the value was invalid
+ or because a file could not be opened
+ */
+ int vpx_memory_tracker_set_log_type(int type, char *option);
+
+ /*
+ vpx_memory_tracker_set_log_func
+ userdata - ptr to be passed to the supplied logfunc, can be NULL
+ logfunc - the logging function to be used to output data from
+ vpx_memory_track_dump/check_integrity
+ Sets a logging function to be used by the memory tracker.
+ Return:
+ 0: on success
+ -1: if the logging type could not be set because logfunc was NULL
+ */
+ int vpx_memory_tracker_set_log_func(void *userdata,
+ void(*logfunc)(void *userdata,
+ const char *fmt, va_list args));
+
+ /* Wrappers to standard library functions. */
+ typedef void *(* mem_track_malloc_func)(size_t);
+ typedef void *(* mem_track_calloc_func)(size_t, size_t);
+ typedef void *(* mem_track_realloc_func)(void *, size_t);
+ typedef void (* mem_track_free_func)(void *);
+ typedef void *(* mem_track_memcpy_func)(void *, const void *, size_t);
+ typedef void *(* mem_track_memset_func)(void *, int, size_t);
+ typedef void *(* mem_track_memmove_func)(void *, const void *, size_t);
+
+ /*
+ vpx_memory_tracker_set_functions
+
+ Sets the function pointers for the standard library functions.
+
+ Return:
+ 0: on success
+ -1: if the use global function pointers is not set.
+ */
+ int vpx_memory_tracker_set_functions(mem_track_malloc_func g_malloc_l
+, mem_track_calloc_func g_calloc_l
+, mem_track_realloc_func g_realloc_l
+, mem_track_free_func g_free_l
+, mem_track_memcpy_func g_memcpy_l
+, mem_track_memset_func g_memset_l
+, mem_track_memmove_func g_memmove_l);
#if defined(__cplusplus)
}
#endif
-#endif //__VPX_MEM_TRACKER_H__
+#endif // __VPX_MEM_TRACKER_H__
diff --git a/libvpx/vpx_mem/memory_manager/hmm_alloc.c b/libvpx/vpx_mem/memory_manager/hmm_alloc.c
index 22c4a54..ab3562d 100644
--- a/libvpx/vpx_mem/memory_manager/hmm_alloc.c
+++ b/libvpx/vpx_mem/memory_manager/hmm_alloc.c
@@ -15,46 +15,44 @@
#include "hmm_intrnl.h"
-void *U(alloc)(U(descriptor) *desc, U(size_aau) n)
-{
+void *U(alloc)(U(descriptor) *desc, U(size_aau) n) {
#ifdef HMM_AUDIT_FAIL
- if (desc->avl_tree_root)
- AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
+ if (desc->avl_tree_root)
+ AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
#endif
- if (desc->last_freed)
- {
+ if (desc->last_freed) {
#ifdef HMM_AUDIT_FAIL
- AUDIT_BLOCK(desc->last_freed)
+ AUDIT_BLOCK(desc->last_freed)
#endif
- U(into_free_collection)(desc, (head_record *)(desc->last_freed));
+ U(into_free_collection)(desc, (head_record *)(desc->last_freed));
- desc->last_freed = 0;
- }
-
- /* Add space for block header. */
- n += HEAD_AAUS;
-
- /* Convert n from number of address alignment units to block alignment
- ** units. */
- n = DIV_ROUND_UP(n, HMM_BLOCK_ALIGN_UNIT);
-
- if (n < MIN_BLOCK_BAUS)
- n = MIN_BLOCK_BAUS;
-
- {
- /* Search for the first node of the bin containing the smallest
- ** block big enough to satisfy request. */
- ptr_record *ptr_rec_ptr =
- U(avl_search)(
- (U(avl_avl) *) & (desc->avl_tree_root), (U(size_bau)) n,
- AVL_GREATER_EQUAL);
-
- /* If an approprate bin is found, satisfy the allocation request,
- ** otherwise return null pointer. */
- return(ptr_rec_ptr ?
- U(alloc_from_bin)(desc, ptr_rec_ptr, (U(size_bau)) n) : 0);
+ desc->last_freed = 0;
}
+
+ /* Add space for block header. */
+ n += HEAD_AAUS;
+
+ /* Convert n from number of address alignment units to block alignment
+ ** units. */
+ n = DIV_ROUND_UP(n, HMM_BLOCK_ALIGN_UNIT);
+
+ if (n < MIN_BLOCK_BAUS)
+ n = MIN_BLOCK_BAUS;
+
+ {
+ /* Search for the first node of the bin containing the smallest
+ ** block big enough to satisfy request. */
+ ptr_record *ptr_rec_ptr =
+ U(avl_search)(
+ (U(avl_avl) *) & (desc->avl_tree_root), (U(size_bau)) n,
+ AVL_GREATER_EQUAL);
+
+ /* If an approprate bin is found, satisfy the allocation request,
+ ** otherwise return null pointer. */
+ return(ptr_rec_ptr ?
+ U(alloc_from_bin)(desc, ptr_rec_ptr, (U(size_bau)) n) : 0);
+ }
}
diff --git a/libvpx/vpx_mem/memory_manager/hmm_base.c b/libvpx/vpx_mem/memory_manager/hmm_base.c
index ad1da03..0eff59d 100644
--- a/libvpx/vpx_mem/memory_manager/hmm_base.c
+++ b/libvpx/vpx_mem/memory_manager/hmm_base.c
@@ -15,58 +15,53 @@
#include "hmm_intrnl.h"
-void U(init)(U(descriptor) *desc)
-{
- desc->avl_tree_root = 0;
- desc->last_freed = 0;
+void U(init)(U(descriptor) *desc) {
+ desc->avl_tree_root = 0;
+ desc->last_freed = 0;
}
/* Remove a free block from a bin's doubly-linked list when it is not,
** the first block in the bin.
*/
void U(dll_remove)(
- /* Pointer to pointer record in the block to be removed. */
- ptr_record *to_remove)
-{
- to_remove->prev->next = to_remove->next;
+ /* Pointer to pointer record in the block to be removed. */
+ ptr_record *to_remove) {
+ to_remove->prev->next = to_remove->next;
- if (to_remove->next)
- to_remove->next->prev = to_remove->prev;
+ if (to_remove->next)
+ to_remove->next->prev = to_remove->prev;
}
/* Put a block into the free collection of a heap.
*/
void U(into_free_collection)(
- /* Pointer to heap descriptor. */
- U(descriptor) *desc,
- /* Pointer to head record of block. */
- head_record *head_ptr)
-{
- ptr_record *ptr_rec_ptr = HEAD_TO_PTR_REC(head_ptr);
-
- ptr_record *bin_front_ptr =
- U(avl_insert)((U(avl_avl) *) & (desc->avl_tree_root), ptr_rec_ptr);
-
- if (bin_front_ptr != ptr_rec_ptr)
- {
- /* The block was not inserted into the AVL tree because there is
- ** already a bin for the size of the block. */
-
- MARK_SUCCESSIVE_BLOCK_IN_FREE_BIN(head_ptr)
- ptr_rec_ptr->self = ptr_rec_ptr;
-
- /* Make the block the new second block in the bin's doubly-linked
- ** list. */
- ptr_rec_ptr->prev = bin_front_ptr;
- ptr_rec_ptr->next = bin_front_ptr->next;
- bin_front_ptr->next = ptr_rec_ptr;
-
- if (ptr_rec_ptr->next)
- ptr_rec_ptr->next->prev = ptr_rec_ptr;
- }
- else
- /* Block is first block in new bin. */
- ptr_rec_ptr->next = 0;
+ /* Pointer to heap descriptor. */
+ U(descriptor) *desc,
+ /* Pointer to head record of block. */
+ head_record *head_ptr) {
+ ptr_record *ptr_rec_ptr = HEAD_TO_PTR_REC(head_ptr);
+
+ ptr_record *bin_front_ptr =
+ U(avl_insert)((U(avl_avl) *) & (desc->avl_tree_root), ptr_rec_ptr);
+
+ if (bin_front_ptr != ptr_rec_ptr) {
+ /* The block was not inserted into the AVL tree because there is
+ ** already a bin for the size of the block. */
+
+ MARK_SUCCESSIVE_BLOCK_IN_FREE_BIN(head_ptr)
+ ptr_rec_ptr->self = ptr_rec_ptr;
+
+ /* Make the block the new second block in the bin's doubly-linked
+ ** list. */
+ ptr_rec_ptr->prev = bin_front_ptr;
+ ptr_rec_ptr->next = bin_front_ptr->next;
+ bin_front_ptr->next = ptr_rec_ptr;
+
+ if (ptr_rec_ptr->next)
+ ptr_rec_ptr->next->prev = ptr_rec_ptr;
+ } else
+ /* Block is first block in new bin. */
+ ptr_rec_ptr->next = 0;
}
/* Allocate a block from a given bin. Returns a pointer to the payload
@@ -74,268 +69,245 @@ void U(into_free_collection)(
** to calling this function.
*/
void *U(alloc_from_bin)(
- /* Pointer to heap descriptor. */
- U(descriptor) *desc,
- /* Pointer to pointer record of first block in bin. */
- ptr_record *bin_front_ptr,
- /* Number of BAUs needed in the allocated block. If the block taken
- ** from the bin is significantly larger than the number of BAUs needed,
- ** the "extra" BAUs are split off to form a new free block. */
- U(size_bau) n_baus)
-{
- head_record *head_ptr;
- U(size_bau) rem_baus;
-
- if (bin_front_ptr->next)
- {
- /* There are multiple blocks in this bin. Use the 2nd block in
- ** the bin to avoid needless change to the AVL tree.
- */
-
- ptr_record *ptr_rec_ptr = bin_front_ptr->next;
- head_ptr = PTR_REC_TO_HEAD(ptr_rec_ptr);
+ /* Pointer to heap descriptor. */
+ U(descriptor) *desc,
+ /* Pointer to pointer record of first block in bin. */
+ ptr_record *bin_front_ptr,
+ /* Number of BAUs needed in the allocated block. If the block taken
+ ** from the bin is significantly larger than the number of BAUs needed,
+ ** the "extra" BAUs are split off to form a new free block. */
+ U(size_bau) n_baus) {
+ head_record *head_ptr;
+ U(size_bau) rem_baus;
+
+ if (bin_front_ptr->next) {
+ /* There are multiple blocks in this bin. Use the 2nd block in
+ ** the bin to avoid needless change to the AVL tree.
+ */
+
+ ptr_record *ptr_rec_ptr = bin_front_ptr->next;
+ head_ptr = PTR_REC_TO_HEAD(ptr_rec_ptr);
#ifdef AUDIT_FAIL
- AUDIT_BLOCK(head_ptr)
+ AUDIT_BLOCK(head_ptr)
#endif
- U(dll_remove)(ptr_rec_ptr);
- }
- else
- {
- /* There is only one block in the bin, so it has to be removed
- ** from the AVL tree.
- */
+ U(dll_remove)(ptr_rec_ptr);
+ } else {
+ /* There is only one block in the bin, so it has to be removed
+ ** from the AVL tree.
+ */
- head_ptr = PTR_REC_TO_HEAD(bin_front_ptr);
+ head_ptr = PTR_REC_TO_HEAD(bin_front_ptr);
- U(avl_remove)(
- (U(avl_avl) *) &(desc->avl_tree_root), BLOCK_BAUS(head_ptr));
- }
+ U(avl_remove)(
+ (U(avl_avl) *) & (desc->avl_tree_root), BLOCK_BAUS(head_ptr));
+ }
- MARK_BLOCK_ALLOCATED(head_ptr)
+ MARK_BLOCK_ALLOCATED(head_ptr)
- rem_baus = BLOCK_BAUS(head_ptr) - n_baus;
+ rem_baus = BLOCK_BAUS(head_ptr) - n_baus;
- if (rem_baus >= MIN_BLOCK_BAUS)
- {
- /* Since there are enough "extra" BAUs, split them off to form
- ** a new free block.
- */
+ if (rem_baus >= MIN_BLOCK_BAUS) {
+ /* Since there are enough "extra" BAUs, split them off to form
+ ** a new free block.
+ */
- head_record *rem_head_ptr =
- (head_record *) BAUS_FORWARD(head_ptr, n_baus);
+ head_record *rem_head_ptr =
+ (head_record *) BAUS_FORWARD(head_ptr, n_baus);
- /* Change the next block's header to reflect the fact that the
- ** block preceeding it is now smaller.
- */
- SET_PREV_BLOCK_BAUS(
- BAUS_FORWARD(head_ptr, head_ptr->block_size), rem_baus)
+ /* Change the next block's header to reflect the fact that the
+ ** block preceeding it is now smaller.
+ */
+ SET_PREV_BLOCK_BAUS(
+ BAUS_FORWARD(head_ptr, head_ptr->block_size), rem_baus)
- head_ptr->block_size = n_baus;
+ head_ptr->block_size = n_baus;
- rem_head_ptr->previous_block_size = n_baus;
- rem_head_ptr->block_size = rem_baus;
+ rem_head_ptr->previous_block_size = n_baus;
+ rem_head_ptr->block_size = rem_baus;
- desc->last_freed = rem_head_ptr;
- }
+ desc->last_freed = rem_head_ptr;
+ }
- return(HEAD_TO_PTR_REC(head_ptr));
+ return(HEAD_TO_PTR_REC(head_ptr));
}
/* Take a block out of the free collection.
*/
void U(out_of_free_collection)(
- /* Descriptor of heap that block is in. */
- U(descriptor) *desc,
- /* Pointer to head of block to take out of free collection. */
- head_record *head_ptr)
-{
- ptr_record *ptr_rec_ptr = HEAD_TO_PTR_REC(head_ptr);
-
- if (ptr_rec_ptr->self == ptr_rec_ptr)
- /* Block is not the front block in its bin, so all we have to
- ** do is take it out of the bin's doubly-linked list. */
- U(dll_remove)(ptr_rec_ptr);
+ /* Descriptor of heap that block is in. */
+ U(descriptor) *desc,
+ /* Pointer to head of block to take out of free collection. */
+ head_record *head_ptr) {
+ ptr_record *ptr_rec_ptr = HEAD_TO_PTR_REC(head_ptr);
+
+ if (ptr_rec_ptr->self == ptr_rec_ptr)
+ /* Block is not the front block in its bin, so all we have to
+ ** do is take it out of the bin's doubly-linked list. */
+ U(dll_remove)(ptr_rec_ptr);
+ else {
+ ptr_record *next = ptr_rec_ptr->next;
+
+ if (next)
+ /* Block is the front block in its bin, and there is at least
+ ** one other block in the bin. Substitute the next block for
+ ** the front block. */
+ U(avl_subst)((U(avl_avl) *) & (desc->avl_tree_root), next);
else
- {
- ptr_record *next = ptr_rec_ptr->next;
-
- if (next)
- /* Block is the front block in its bin, and there is at least
- ** one other block in the bin. Substitute the next block for
- ** the front block. */
- U(avl_subst)((U(avl_avl) *) &(desc->avl_tree_root), next);
- else
- /* Block is the front block in its bin, but there is no other
- ** block in the bin. Eliminate the bin. */
- U(avl_remove)(
- (U(avl_avl) *) &(desc->avl_tree_root), BLOCK_BAUS(head_ptr));
- }
+ /* Block is the front block in its bin, but there is no other
+ ** block in the bin. Eliminate the bin. */
+ U(avl_remove)(
+ (U(avl_avl) *) & (desc->avl_tree_root), BLOCK_BAUS(head_ptr));
+ }
}
-void U(free)(U(descriptor) *desc, void *payload_ptr)
-{
- /* Flags if coalesce with adjacent block. */
- int coalesce;
+void U(free)(U(descriptor) *desc, void *payload_ptr) {
+ /* Flags if coalesce with adjacent block. */
+ int coalesce;
- head_record *fwd_head_ptr;
- head_record *free_head_ptr = PTR_REC_TO_HEAD(payload_ptr);
+ head_record *fwd_head_ptr;
+ head_record *free_head_ptr = PTR_REC_TO_HEAD(payload_ptr);
- desc->num_baus_can_shrink = 0;
+ desc->num_baus_can_shrink = 0;
#ifdef HMM_AUDIT_FAIL
- AUDIT_BLOCK(free_head_ptr)
+ AUDIT_BLOCK(free_head_ptr)
- /* Make sure not freeing an already free block. */
- if (!IS_BLOCK_ALLOCATED(free_head_ptr))
- HMM_AUDIT_FAIL
+ /* Make sure not freeing an already free block. */
+ if (!IS_BLOCK_ALLOCATED(free_head_ptr))
+ HMM_AUDIT_FAIL
- if (desc->avl_tree_root)
- /* Audit root block in AVL tree. */
- AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
+ if (desc->avl_tree_root)
+ /* Audit root block in AVL tree. */
+ AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
#endif
- fwd_head_ptr =
- (head_record *) BAUS_FORWARD(free_head_ptr, free_head_ptr->block_size);
+ fwd_head_ptr =
+ (head_record *) BAUS_FORWARD(free_head_ptr, free_head_ptr->block_size);
- if (free_head_ptr->previous_block_size)
- {
- /* Coalesce with backward block if possible. */
+ if (free_head_ptr->previous_block_size) {
+ /* Coalesce with backward block if possible. */
- head_record *bkwd_head_ptr =
- (head_record *) BAUS_BACKWARD(
- free_head_ptr, free_head_ptr->previous_block_size);
+ head_record *bkwd_head_ptr =
+ (head_record *) BAUS_BACKWARD(
+ free_head_ptr, free_head_ptr->previous_block_size);
#ifdef HMM_AUDIT_FAIL
- AUDIT_BLOCK(bkwd_head_ptr)
+ AUDIT_BLOCK(bkwd_head_ptr)
#endif
- if (bkwd_head_ptr == (head_record *)(desc->last_freed))
- {
- desc->last_freed = 0;
- coalesce = 1;
- }
- else if (IS_BLOCK_ALLOCATED(bkwd_head_ptr))
- coalesce = 0;
- else
- {
- U(out_of_free_collection)(desc, bkwd_head_ptr);
- coalesce = 1;
- }
-
- if (coalesce)
- {
- bkwd_head_ptr->block_size += free_head_ptr->block_size;
- SET_PREV_BLOCK_BAUS(fwd_head_ptr, BLOCK_BAUS(bkwd_head_ptr))
- free_head_ptr = bkwd_head_ptr;
- }
+ if (bkwd_head_ptr == (head_record *)(desc->last_freed)) {
+ desc->last_freed = 0;
+ coalesce = 1;
+ } else if (IS_BLOCK_ALLOCATED(bkwd_head_ptr))
+ coalesce = 0;
+ else {
+ U(out_of_free_collection)(desc, bkwd_head_ptr);
+ coalesce = 1;
}
- if (fwd_head_ptr->block_size == 0)
- {
- /* Block to be freed is last block before dummy end-of-chunk block. */
- desc->end_of_shrinkable_chunk =
- BAUS_FORWARD(fwd_head_ptr, DUMMY_END_BLOCK_BAUS);
- desc->num_baus_can_shrink = BLOCK_BAUS(free_head_ptr);
-
- if (PREV_BLOCK_BAUS(free_head_ptr) == 0)
- /* Free block is the entire chunk, so shrinking can eliminate
- ** entire chunk including dummy end block. */
- desc->num_baus_can_shrink += DUMMY_END_BLOCK_BAUS;
+ if (coalesce) {
+ bkwd_head_ptr->block_size += free_head_ptr->block_size;
+ SET_PREV_BLOCK_BAUS(fwd_head_ptr, BLOCK_BAUS(bkwd_head_ptr))
+ free_head_ptr = bkwd_head_ptr;
}
- else
- {
- /* Coalesce with forward block if possible. */
+ }
+
+ if (fwd_head_ptr->block_size == 0) {
+ /* Block to be freed is last block before dummy end-of-chunk block. */
+ desc->end_of_shrinkable_chunk =
+ BAUS_FORWARD(fwd_head_ptr, DUMMY_END_BLOCK_BAUS);
+ desc->num_baus_can_shrink = BLOCK_BAUS(free_head_ptr);
+
+ if (PREV_BLOCK_BAUS(free_head_ptr) == 0)
+ /* Free block is the entire chunk, so shrinking can eliminate
+ ** entire chunk including dummy end block. */
+ desc->num_baus_can_shrink += DUMMY_END_BLOCK_BAUS;
+ } else {
+ /* Coalesce with forward block if possible. */
#ifdef HMM_AUDIT_FAIL
- AUDIT_BLOCK(fwd_head_ptr)
+ AUDIT_BLOCK(fwd_head_ptr)
#endif
- if (fwd_head_ptr == (head_record *)(desc->last_freed))
- {
- desc->last_freed = 0;
- coalesce = 1;
- }
- else if (IS_BLOCK_ALLOCATED(fwd_head_ptr))
- coalesce = 0;
- else
- {
- U(out_of_free_collection)(desc, fwd_head_ptr);
- coalesce = 1;
- }
-
- if (coalesce)
- {
- free_head_ptr->block_size += fwd_head_ptr->block_size;
-
- fwd_head_ptr =
- (head_record *) BAUS_FORWARD(
- fwd_head_ptr, BLOCK_BAUS(fwd_head_ptr));
-
- SET_PREV_BLOCK_BAUS(fwd_head_ptr, BLOCK_BAUS(free_head_ptr))
-
- if (fwd_head_ptr->block_size == 0)
- {
- /* Coalesced block to be freed is last block before dummy
- ** end-of-chunk block. */
- desc->end_of_shrinkable_chunk =
- BAUS_FORWARD(fwd_head_ptr, DUMMY_END_BLOCK_BAUS);
- desc->num_baus_can_shrink = BLOCK_BAUS(free_head_ptr);
-
- if (PREV_BLOCK_BAUS(free_head_ptr) == 0)
- /* Free block is the entire chunk, so shrinking can
- ** eliminate entire chunk including dummy end block. */
- desc->num_baus_can_shrink += DUMMY_END_BLOCK_BAUS;
- }
- }
+ if (fwd_head_ptr == (head_record *)(desc->last_freed)) {
+ desc->last_freed = 0;
+ coalesce = 1;
+ } else if (IS_BLOCK_ALLOCATED(fwd_head_ptr))
+ coalesce = 0;
+ else {
+ U(out_of_free_collection)(desc, fwd_head_ptr);
+ coalesce = 1;
+ }
+
+ if (coalesce) {
+ free_head_ptr->block_size += fwd_head_ptr->block_size;
+
+ fwd_head_ptr =
+ (head_record *) BAUS_FORWARD(
+ fwd_head_ptr, BLOCK_BAUS(fwd_head_ptr));
+
+ SET_PREV_BLOCK_BAUS(fwd_head_ptr, BLOCK_BAUS(free_head_ptr))
+
+ if (fwd_head_ptr->block_size == 0) {
+ /* Coalesced block to be freed is last block before dummy
+ ** end-of-chunk block. */
+ desc->end_of_shrinkable_chunk =
+ BAUS_FORWARD(fwd_head_ptr, DUMMY_END_BLOCK_BAUS);
+ desc->num_baus_can_shrink = BLOCK_BAUS(free_head_ptr);
+
+ if (PREV_BLOCK_BAUS(free_head_ptr) == 0)
+ /* Free block is the entire chunk, so shrinking can
+ ** eliminate entire chunk including dummy end block. */
+ desc->num_baus_can_shrink += DUMMY_END_BLOCK_BAUS;
+ }
}
+ }
- if (desc->last_freed)
- {
- /* There is a last freed block, but it is not adjacent to the
- ** block being freed by this call to free, so put the last
- ** freed block into the free collection.
- */
+ if (desc->last_freed) {
+ /* There is a last freed block, but it is not adjacent to the
+ ** block being freed by this call to free, so put the last
+ ** freed block into the free collection.
+ */
#ifdef HMM_AUDIT_FAIL
- AUDIT_BLOCK(desc->last_freed)
+ AUDIT_BLOCK(desc->last_freed)
#endif
- U(into_free_collection)(desc, (head_record *)(desc->last_freed));
- }
+ U(into_free_collection)(desc, (head_record *)(desc->last_freed));
+ }
- desc->last_freed = free_head_ptr;
+ desc->last_freed = free_head_ptr;
}
-void U(new_chunk)(U(descriptor) *desc, void *start, U(size_bau) n_baus)
-{
+void U(new_chunk)(U(descriptor) *desc, void *start, U(size_bau) n_baus) {
#ifdef HMM_AUDIT_FAIL
- if (desc->avl_tree_root)
- /* Audit root block in AVL tree. */
- AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
+ if (desc->avl_tree_root)
+ /* Audit root block in AVL tree. */
+ AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
#endif
#undef HEAD_PTR
#define HEAD_PTR ((head_record *) start)
- /* Make the chunk one big free block followed by a dummy end block.
- */
+ /* Make the chunk one big free block followed by a dummy end block.
+ */
- n_baus -= DUMMY_END_BLOCK_BAUS;
+ n_baus -= DUMMY_END_BLOCK_BAUS;
- HEAD_PTR->previous_block_size = 0;
- HEAD_PTR->block_size = n_baus;
+ HEAD_PTR->previous_block_size = 0;
+ HEAD_PTR->block_size = n_baus;
- U(into_free_collection)(desc, HEAD_PTR);
+ U(into_free_collection)(desc, HEAD_PTR);
- /* Set up the dummy end block. */
- start = BAUS_FORWARD(start, n_baus);
- HEAD_PTR->previous_block_size = n_baus;
- HEAD_PTR->block_size = 0;
+ /* Set up the dummy end block. */
+ start = BAUS_FORWARD(start, n_baus);
+ HEAD_PTR->previous_block_size = n_baus;
+ HEAD_PTR->block_size = 0;
#undef HEAD_PTR
}
@@ -345,12 +317,11 @@ void U(new_chunk)(U(descriptor) *desc, void *start, U(size_bau) n_baus)
/* Function that does audit fail actions defined my preprocessor symbol,
** and returns a dummy integer value.
*/
-int U(audit_block_fail_dummy_return)(void)
-{
- HMM_AUDIT_FAIL
+int U(audit_block_fail_dummy_return)(void) {
+ HMM_AUDIT_FAIL
- /* Dummy return. */
- return(0);
+ /* Dummy return. */
+ return(0);
}
#endif
@@ -372,9 +343,9 @@ int U(audit_block_fail_dummy_return)(void)
*/
#define AVL_GET_LESS(H, ACCESS) \
- (((ACCESS) ? AUDIT_BLOCK_AS_EXPR(PTR_REC_TO_HEAD(H)) : 0), (H)->self)
+ (((ACCESS) ? AUDIT_BLOCK_AS_EXPR(PTR_REC_TO_HEAD(H)) : 0), (H)->self)
#define AVL_GET_GREATER(H, ACCESS) \
- (((ACCESS) ? AUDIT_BLOCK_AS_EXPR(PTR_REC_TO_HEAD(H)) : 0), (H)->prev)
+ (((ACCESS) ? AUDIT_BLOCK_AS_EXPR(PTR_REC_TO_HEAD(H)) : 0), (H)->prev)
#else
@@ -396,39 +367,39 @@ int U(audit_block_fail_dummy_return)(void)
*/
#define AVL_GET_BALANCE_FACTOR(H) \
- ((((head_record *) (PTR_REC_TO_HEAD(H)))->block_size & \
- HIGH_BIT_BAU_SIZE) ? \
- (((head_record *) (PTR_REC_TO_HEAD(H)))->previous_block_size & \
- HIGH_BIT_BAU_SIZE ? 0 : -1) : 1)
+ ((((head_record *) (PTR_REC_TO_HEAD(H)))->block_size & \
+ HIGH_BIT_BAU_SIZE) ? \
+ (((head_record *) (PTR_REC_TO_HEAD(H)))->previous_block_size & \
+ HIGH_BIT_BAU_SIZE ? 0 : -1) : 1)
#define AVL_SET_BALANCE_FACTOR(H, BF) \
- { \
- register head_record *p = \
- (head_record *) PTR_REC_TO_HEAD(H); \
- register int bal_f = (BF); \
- \
- if (bal_f <= 0) \
- p->block_size |= HIGH_BIT_BAU_SIZE; \
- else \
- p->block_size &= ~HIGH_BIT_BAU_SIZE; \
- if (bal_f >= 0) \
- p->previous_block_size |= HIGH_BIT_BAU_SIZE; \
- else \
- p->previous_block_size &= ~HIGH_BIT_BAU_SIZE; \
- }
+ { \
+ register head_record *p = \
+ (head_record *) PTR_REC_TO_HEAD(H); \
+ register int bal_f = (BF); \
+ \
+ if (bal_f <= 0) \
+ p->block_size |= HIGH_BIT_BAU_SIZE; \
+ else \
+ p->block_size &= ~HIGH_BIT_BAU_SIZE; \
+ if (bal_f >= 0) \
+ p->previous_block_size |= HIGH_BIT_BAU_SIZE; \
+ else \
+ p->previous_block_size &= ~HIGH_BIT_BAU_SIZE; \
+ }
#define COMPARE_KEY_KEY(K1, K2) ((K1) == (K2) ? 0 : ((K1) > (K2) ? 1 : -1))
#define AVL_COMPARE_KEY_NODE(K, H) \
- COMPARE_KEY_KEY(K, BLOCK_BAUS(PTR_REC_TO_HEAD(H)))
+ COMPARE_KEY_KEY(K, BLOCK_BAUS(PTR_REC_TO_HEAD(H)))
#define AVL_COMPARE_NODE_NODE(H1, H2) \
- COMPARE_KEY_KEY(BLOCK_BAUS(PTR_REC_TO_HEAD(H1)), \
- BLOCK_BAUS(PTR_REC_TO_HEAD(H2)))
+ COMPARE_KEY_KEY(BLOCK_BAUS(PTR_REC_TO_HEAD(H1)), \
+ BLOCK_BAUS(PTR_REC_TO_HEAD(H2)))
#define AVL_NULL ((ptr_record *) 0)
#define AVL_IMPL_MASK \
- ( AVL_IMPL_INSERT | AVL_IMPL_SEARCH | AVL_IMPL_REMOVE | AVL_IMPL_SUBST )
+ ( AVL_IMPL_INSERT | AVL_IMPL_SEARCH | AVL_IMPL_REMOVE | AVL_IMPL_SUBST )
#include "cavl_impl.h"
diff --git a/libvpx/vpx_mem/memory_manager/hmm_dflt_abort.c b/libvpx/vpx_mem/memory_manager/hmm_dflt_abort.c
index d92435c..51c3cc2 100644
--- a/libvpx/vpx_mem/memory_manager/hmm_dflt_abort.c
+++ b/libvpx/vpx_mem/memory_manager/hmm_dflt_abort.c
@@ -29,26 +29,25 @@ static int entered = 0;
/* Print abort message, file and line. Terminate execution.
*/
-void hmm_dflt_abort(const char *file, const char *line)
-{
- /* Avoid use of printf(), which is more likely to use heap. */
+void hmm_dflt_abort(const char *file, const char *line) {
+ /* Avoid use of printf(), which is more likely to use heap. */
- if (entered)
+ if (entered)
- /* The standard I/O functions called a heap function and caused
- ** an indirect recursive call to this function. So we'll have
- ** to just exit without printing a message. */
- while (1);
+ /* The standard I/O functions called a heap function and caused
+ ** an indirect recursive call to this function. So we'll have
+ ** to just exit without printing a message. */
+ while (1);
- entered = 1;
+ entered = 1;
- fputs("\n_abort - Heap corruption\n" "File: ", stderr);
- fputs(file, stderr);
- fputs(" Line: ", stderr);
- fputs(line, stderr);
- fputs("\n\n", stderr);
- fputs("hmm_dflt_abort: while(1)!!!\n", stderr);
- fflush(stderr);
+ fputs("\n_abort - Heap corruption\n" "File: ", stderr);
+ fputs(file, stderr);
+ fputs(" Line: ", stderr);
+ fputs(line, stderr);
+ fputs("\n\n", stderr);
+ fputs("hmm_dflt_abort: while(1)!!!\n", stderr);
+ fflush(stderr);
- while (1);
+ while (1);
}
diff --git a/libvpx/vpx_mem/memory_manager/hmm_grow.c b/libvpx/vpx_mem/memory_manager/hmm_grow.c
index 9a4b6e4..0e86373 100644
--- a/libvpx/vpx_mem/memory_manager/hmm_grow.c
+++ b/libvpx/vpx_mem/memory_manager/hmm_grow.c
@@ -15,36 +15,35 @@
#include "hmm_intrnl.h"
-void U(grow_chunk)(U(descriptor) *desc, void *end, U(size_bau) n_baus)
-{
+void U(grow_chunk)(U(descriptor) *desc, void *end, U(size_bau) n_baus) {
#undef HEAD_PTR
#define HEAD_PTR ((head_record *) end)
- end = BAUS_BACKWARD(end, DUMMY_END_BLOCK_BAUS);
+ end = BAUS_BACKWARD(end, DUMMY_END_BLOCK_BAUS);
#ifdef HMM_AUDIT_FAIL
- if (HEAD_PTR->block_size != 0)
- /* Chunk does not have valid dummy end block. */
- HMM_AUDIT_FAIL
+ if (HEAD_PTR->block_size != 0)
+ /* Chunk does not have valid dummy end block. */
+ HMM_AUDIT_FAIL
#endif
- /* Create a new block that absorbs the old dummy end block. */
- HEAD_PTR->block_size = n_baus;
-
- /* Set up the new dummy end block. */
- {
- head_record *dummy = (head_record *) BAUS_FORWARD(end, n_baus);
- dummy->previous_block_size = n_baus;
- dummy->block_size = 0;
- }
-
- /* Simply free the new block, allowing it to coalesce with any
- ** free block at that was the last block in the chunk prior to
- ** growth.
- */
- U(free)(desc, HEAD_TO_PTR_REC(end));
+ /* Create a new block that absorbs the old dummy end block. */
+ HEAD_PTR->block_size = n_baus;
+
+ /* Set up the new dummy end block. */
+ {
+ head_record *dummy = (head_record *) BAUS_FORWARD(end, n_baus);
+ dummy->previous_block_size = n_baus;
+ dummy->block_size = 0;
+ }
+
+ /* Simply free the new block, allowing it to coalesce with any
+ ** free block at that was the last block in the chunk prior to
+ ** growth.
+ */
+ U(free)(desc, HEAD_TO_PTR_REC(end));
#undef HEAD_PTR
}
diff --git a/libvpx/vpx_mem/memory_manager/hmm_largest.c b/libvpx/vpx_mem/memory_manager/hmm_largest.c
index c3c6f2c..192758d 100644
--- a/libvpx/vpx_mem/memory_manager/hmm_largest.c
+++ b/libvpx/vpx_mem/memory_manager/hmm_largest.c
@@ -15,46 +15,43 @@
#include "hmm_intrnl.h"
-U(size_aau) U(largest_available)(U(descriptor) *desc)
-{
- U(size_bau) largest;
-
- if (!(desc->avl_tree_root))
- largest = 0;
- else
- {
+U(size_aau) U(largest_available)(U(descriptor) *desc) {
+ U(size_bau) largest;
+
+ if (!(desc->avl_tree_root))
+ largest = 0;
+ else {
#ifdef HMM_AUDIT_FAIL
- /* Audit root block in AVL tree. */
- AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
+ /* Audit root block in AVL tree. */
+ AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
#endif
- largest =
- BLOCK_BAUS(
- PTR_REC_TO_HEAD(
- U(avl_search)(
- (U(avl_avl) *) & (desc->avl_tree_root),
- (U(size_bau)) ~(U(size_bau)) 0, AVL_LESS)));
- }
+ largest =
+ BLOCK_BAUS(
+ PTR_REC_TO_HEAD(
+ U(avl_search)(
+ (U(avl_avl) *) & (desc->avl_tree_root),
+ (U(size_bau)) ~(U(size_bau)) 0, AVL_LESS)));
+ }
- if (desc->last_freed)
- {
- /* Size of last freed block. */
- register U(size_bau) lf_size;
+ if (desc->last_freed) {
+ /* Size of last freed block. */
+ register U(size_bau) lf_size;
#ifdef HMM_AUDIT_FAIL
- AUDIT_BLOCK(desc->last_freed)
+ AUDIT_BLOCK(desc->last_freed)
#endif
- lf_size = BLOCK_BAUS(desc->last_freed);
+ lf_size = BLOCK_BAUS(desc->last_freed);
- if (lf_size > largest)
- largest = lf_size;
- }
+ if (lf_size > largest)
+ largest = lf_size;
+ }
- /* Convert largest size to AAUs and subract head size leaving payload
- ** size.
- */
- return(largest ?
- ((largest * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT)) - HEAD_AAUS) :
- 0);
+ /* Convert largest size to AAUs and subract head size leaving payload
+ ** size.
+ */
+ return(largest ?
+ ((largest * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT)) - HEAD_AAUS) :
+ 0);
}
diff --git a/libvpx/vpx_mem/memory_manager/hmm_resize.c b/libvpx/vpx_mem/memory_manager/hmm_resize.c
index f90da96..baa5a8f 100644
--- a/libvpx/vpx_mem/memory_manager/hmm_resize.c
+++ b/libvpx/vpx_mem/memory_manager/hmm_resize.c
@@ -15,105 +15,100 @@
#include "hmm_intrnl.h"
-int U(resize)(U(descriptor) *desc, void *mem, U(size_aau) n)
-{
- U(size_aau) i;
- head_record *next_head_ptr;
- head_record *head_ptr = PTR_REC_TO_HEAD(mem);
+int U(resize)(U(descriptor) *desc, void *mem, U(size_aau) n) {
+ U(size_aau) i;
+ head_record *next_head_ptr;
+ head_record *head_ptr = PTR_REC_TO_HEAD(mem);
- /* Flag. */
- int next_block_free;
+ /* Flag. */
+ int next_block_free;
- /* Convert n from desired block size in AAUs to BAUs. */
- n += HEAD_AAUS;
- n = DIV_ROUND_UP(n, HMM_BLOCK_ALIGN_UNIT);
+ /* Convert n from desired block size in AAUs to BAUs. */
+ n += HEAD_AAUS;
+ n = DIV_ROUND_UP(n, HMM_BLOCK_ALIGN_UNIT);
- if (n < MIN_BLOCK_BAUS)
- n = MIN_BLOCK_BAUS;
+ if (n < MIN_BLOCK_BAUS)
+ n = MIN_BLOCK_BAUS;
#ifdef HMM_AUDIT_FAIL
- AUDIT_BLOCK(head_ptr)
+ AUDIT_BLOCK(head_ptr)
- if (!IS_BLOCK_ALLOCATED(head_ptr))
- HMM_AUDIT_FAIL
+ if (!IS_BLOCK_ALLOCATED(head_ptr))
+ HMM_AUDIT_FAIL
- if (desc->avl_tree_root)
- AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
+ if (desc->avl_tree_root)
+ AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
#endif
- i = head_ptr->block_size;
+ i = head_ptr->block_size;
- next_head_ptr =
- (head_record *) BAUS_FORWARD(head_ptr, head_ptr->block_size);
+ next_head_ptr =
+ (head_record *) BAUS_FORWARD(head_ptr, head_ptr->block_size);
- next_block_free =
- (next_head_ptr == desc->last_freed) ||
- !IS_BLOCK_ALLOCATED(next_head_ptr);
+ next_block_free =
+ (next_head_ptr == desc->last_freed) ||
+ !IS_BLOCK_ALLOCATED(next_head_ptr);
- if (next_block_free)
- /* Block can expand into next free block. */
- i += BLOCK_BAUS(next_head_ptr);
+ if (next_block_free)
+ /* Block can expand into next free block. */
+ i += BLOCK_BAUS(next_head_ptr);
- if (n > i)
- /* Not enough room for block to expand. */
- return(-1);
+ if (n > i)
+ /* Not enough room for block to expand. */
+ return(-1);
- if (next_block_free)
- {
+ if (next_block_free) {
#ifdef HMM_AUDIT_FAIL
- AUDIT_BLOCK(next_head_ptr)
+ AUDIT_BLOCK(next_head_ptr)
#endif
- if (next_head_ptr == desc->last_freed)
- desc->last_freed = 0;
- else
- U(out_of_free_collection)(desc, next_head_ptr);
+ if (next_head_ptr == desc->last_freed)
+ desc->last_freed = 0;
+ else
+ U(out_of_free_collection)(desc, next_head_ptr);
- next_head_ptr =
- (head_record *) BAUS_FORWARD(head_ptr, (U(size_bau)) i);
- }
+ next_head_ptr =
+ (head_record *) BAUS_FORWARD(head_ptr, (U(size_bau)) i);
+ }
- /* Set i to number of "extra" BAUs. */
- i -= n;
+ /* Set i to number of "extra" BAUs. */
+ i -= n;
- if (i < MIN_BLOCK_BAUS)
- /* Not enough extra BAUs to be a block on their own, so just keep them
- ** in the block being resized.
- */
- {
- n += i;
- i = n;
- }
- else
- {
- /* There are enough "leftover" BAUs in the next block to
- ** form a remainder block. */
+ if (i < MIN_BLOCK_BAUS)
+ /* Not enough extra BAUs to be a block on their own, so just keep them
+ ** in the block being resized.
+ */
+ {
+ n += i;
+ i = n;
+ } else {
+ /* There are enough "leftover" BAUs in the next block to
+ ** form a remainder block. */
- head_record *rem_head_ptr;
+ head_record *rem_head_ptr;
- rem_head_ptr = (head_record *) BAUS_FORWARD(head_ptr, n);
+ rem_head_ptr = (head_record *) BAUS_FORWARD(head_ptr, n);
- rem_head_ptr->previous_block_size = (U(size_bau)) n;
- rem_head_ptr->block_size = (U(size_bau)) i;
+ rem_head_ptr->previous_block_size = (U(size_bau)) n;
+ rem_head_ptr->block_size = (U(size_bau)) i;
- if (desc->last_freed)
- {
+ if (desc->last_freed) {
#ifdef HMM_AUDIT_FAIL
- AUDIT_BLOCK(desc->last_freed)
+ AUDIT_BLOCK(desc->last_freed)
#endif
- U(into_free_collection)(desc, (head_record *)(desc->last_freed));
+ U(into_free_collection)(desc, (head_record *)(desc->last_freed));
- desc->last_freed = 0;
- }
-
- desc->last_freed = rem_head_ptr;
+ desc->last_freed = 0;
}
- head_ptr->block_size = (U(size_bau)) n;
- next_head_ptr->previous_block_size = (U(size_bau)) i;
+ desc->last_freed = rem_head_ptr;
+ }
+
+ head_ptr->block_size = (U(size_bau)) n;
+ next_head_ptr->previous_block_size = (U(size_bau)) i;
- return(0);
+ return(0);
}
diff --git a/libvpx/vpx_mem/memory_manager/hmm_shrink.c b/libvpx/vpx_mem/memory_manager/hmm_shrink.c
index 78fe268..f80aeea 100644
--- a/libvpx/vpx_mem/memory_manager/hmm_shrink.c
+++ b/libvpx/vpx_mem/memory_manager/hmm_shrink.c
@@ -15,97 +15,89 @@
#include "hmm_intrnl.h"
-void U(shrink_chunk)(U(descriptor) *desc, U(size_bau) n_baus_to_shrink)
-{
- head_record *dummy_end_block = (head_record *)
- BAUS_BACKWARD(desc->end_of_shrinkable_chunk, DUMMY_END_BLOCK_BAUS);
+void U(shrink_chunk)(U(descriptor) *desc, U(size_bau) n_baus_to_shrink) {
+ head_record *dummy_end_block = (head_record *)
+ BAUS_BACKWARD(desc->end_of_shrinkable_chunk, DUMMY_END_BLOCK_BAUS);
#ifdef HMM_AUDIT_FAIL
- if (dummy_end_block->block_size != 0)
- /* Chunk does not have valid dummy end block. */
- HMM_AUDIT_FAIL
+ if (dummy_end_block->block_size != 0)
+ /* Chunk does not have valid dummy end block. */
+ HMM_AUDIT_FAIL
#endif
- if (n_baus_to_shrink)
- {
- head_record *last_block = (head_record *)
- BAUS_BACKWARD(
- dummy_end_block, dummy_end_block->previous_block_size);
+ if (n_baus_to_shrink) {
+ head_record *last_block = (head_record *)
+ BAUS_BACKWARD(
+ dummy_end_block, dummy_end_block->previous_block_size);
#ifdef HMM_AUDIT_FAIL
- AUDIT_BLOCK(last_block)
+ AUDIT_BLOCK(last_block)
#endif
- if (last_block == desc->last_freed)
- {
- U(size_bau) bs = BLOCK_BAUS(last_block);
-
- /* Chunk will not be shrunk out of existence if
- ** 1. There is at least one allocated block in the chunk
- ** and the amount to shrink is exactly the size of the
- ** last block, OR
- ** 2. After the last block is shrunk, there will be enough
- ** BAUs left in it to form a minimal size block. */
- int chunk_will_survive =
- (PREV_BLOCK_BAUS(last_block) && (n_baus_to_shrink == bs)) ||
- (n_baus_to_shrink <= (U(size_bau))(bs - MIN_BLOCK_BAUS));
-
- if (chunk_will_survive ||
- (!PREV_BLOCK_BAUS(last_block) &&
- (n_baus_to_shrink ==
- (U(size_bau))(bs + DUMMY_END_BLOCK_BAUS))))
- {
- desc->last_freed = 0;
-
- if (chunk_will_survive)
- {
- bs -= n_baus_to_shrink;
-
- if (bs)
- {
- /* The last (non-dummy) block was not completely
- ** eliminated by the shrink. */
-
- last_block->block_size = bs;
-
- /* Create new dummy end record.
- */
- dummy_end_block =
- (head_record *) BAUS_FORWARD(last_block, bs);
- dummy_end_block->previous_block_size = bs;
- dummy_end_block->block_size = 0;
+ if (last_block == desc->last_freed) {
+ U(size_bau) bs = BLOCK_BAUS(last_block);
+
+ /* Chunk will not be shrunk out of existence if
+ ** 1. There is at least one allocated block in the chunk
+ ** and the amount to shrink is exactly the size of the
+ ** last block, OR
+ ** 2. After the last block is shrunk, there will be enough
+ ** BAUs left in it to form a minimal size block. */
+ int chunk_will_survive =
+ (PREV_BLOCK_BAUS(last_block) && (n_baus_to_shrink == bs)) ||
+ (n_baus_to_shrink <= (U(size_bau))(bs - MIN_BLOCK_BAUS));
+
+ if (chunk_will_survive ||
+ (!PREV_BLOCK_BAUS(last_block) &&
+ (n_baus_to_shrink ==
+ (U(size_bau))(bs + DUMMY_END_BLOCK_BAUS)))) {
+ desc->last_freed = 0;
+
+ if (chunk_will_survive) {
+ bs -= n_baus_to_shrink;
+
+ if (bs) {
+ /* The last (non-dummy) block was not completely
+ ** eliminated by the shrink. */
+
+ last_block->block_size = bs;
+
+ /* Create new dummy end record.
+ */
+ dummy_end_block =
+ (head_record *) BAUS_FORWARD(last_block, bs);
+ dummy_end_block->previous_block_size = bs;
+ dummy_end_block->block_size = 0;
#ifdef HMM_AUDIT_FAIL
- if (desc->avl_tree_root)
- AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
+ if (desc->avl_tree_root)
+ AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
#endif
- U(into_free_collection)(desc, last_block);
- }
- else
- {
- /* The last (non-dummy) block was completely
- ** eliminated by the shrink. Make its head
- ** the new dummy end block.
- */
- last_block->block_size = 0;
- last_block->previous_block_size &= ~HIGH_BIT_BAU_SIZE;
- }
- }
- }
+ U(into_free_collection)(desc, last_block);
+ } else {
+ /* The last (non-dummy) block was completely
+ ** eliminated by the shrink. Make its head
+ ** the new dummy end block.
+ */
+ last_block->block_size = 0;
+ last_block->previous_block_size &= ~HIGH_BIT_BAU_SIZE;
+ }
+ }
+ }
#ifdef HMM_AUDIT_FAIL
- else
- HMM_AUDIT_FAIL
+ else
+ HMM_AUDIT_FAIL
#endif
- }
+ }
#ifdef HMM_AUDIT_FAIL
- else
- HMM_AUDIT_FAIL
+ else
+ HMM_AUDIT_FAIL
#endif
- }
+ }
}
diff --git a/libvpx/vpx_mem/memory_manager/hmm_true.c b/libvpx/vpx_mem/memory_manager/hmm_true.c
index 3f7be8f..4428c3e 100644
--- a/libvpx/vpx_mem/memory_manager/hmm_true.c
+++ b/libvpx/vpx_mem/memory_manager/hmm_true.c
@@ -15,18 +15,17 @@
#include "hmm_intrnl.h"
-U(size_aau) U(true_size)(void *payload_ptr)
-{
- register head_record *head_ptr = PTR_REC_TO_HEAD(payload_ptr);
+U(size_aau) U(true_size)(void *payload_ptr) {
+ register head_record *head_ptr = PTR_REC_TO_HEAD(payload_ptr);
#ifdef HMM_AUDIT_FAIL
- AUDIT_BLOCK(head_ptr)
+ AUDIT_BLOCK(head_ptr)
#endif
- /* Convert block size from BAUs to AAUs. Subtract head size, leaving
- ** payload size.
- */
- return(
- (BLOCK_BAUS(head_ptr) * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT)) -
- HEAD_AAUS);
+ /* Convert block size from BAUs to AAUs. Subtract head size, leaving
+ ** payload size.
+ */
+ return(
+ (BLOCK_BAUS(head_ptr) * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT)) -
+ HEAD_AAUS);
}
diff --git a/libvpx/vpx_mem/memory_manager/include/cavl_if.h b/libvpx/vpx_mem/memory_manager/include/cavl_if.h
index 1b2c9b7..ec6e525 100644
--- a/libvpx/vpx_mem/memory_manager/include/cavl_if.h
+++ b/libvpx/vpx_mem/memory_manager/include/cavl_if.h
@@ -32,13 +32,12 @@
#ifndef AVL_SEARCH_TYPE_DEFINED_
#define AVL_SEARCH_TYPE_DEFINED_
-typedef enum
-{
- AVL_EQUAL = 1,
- AVL_LESS = 2,
- AVL_GREATER = 4,
- AVL_LESS_EQUAL = AVL_EQUAL | AVL_LESS,
- AVL_GREATER_EQUAL = AVL_EQUAL | AVL_GREATER
+typedef enum {
+ AVL_EQUAL = 1,
+ AVL_LESS = 2,
+ AVL_GREATER = 4,
+ AVL_LESS_EQUAL = AVL_EQUAL | AVL_LESS,
+ AVL_GREATER_EQUAL = AVL_EQUAL | AVL_GREATER
}
avl_search_type;
@@ -75,15 +74,14 @@ avl_search_type;
#endif
-typedef struct
-{
+typedef struct {
#ifdef AVL_INSIDE_STRUCT
- AVL_INSIDE_STRUCT
+ AVL_INSIDE_STRUCT
#endif
- AVL_HANDLE root;
+ AVL_HANDLE root;
}
L_(avl);
@@ -108,7 +106,7 @@ L_SC AVL_HANDLE L_(subst)(L_(avl) *tree, AVL_HANDLE new_node);
#ifdef AVL_BUILD_ITER_TYPE
L_SC int L_(build)(
- L_(avl) *tree, AVL_BUILD_ITER_TYPE p, L_SIZE num_nodes);
+ L_(avl) *tree, AVL_BUILD_ITER_TYPE p, L_SIZE num_nodes);
#endif
@@ -153,7 +151,7 @@ L_SC int L_(build)(
/* Maximum depth may be more than number of bits in a long. */
#define L_BIT_ARR_DEFN(NAME) \
- unsigned long NAME[((AVL_MAX_DEPTH) + L_LONG_BIT - 1) / L_LONG_BIT];
+ unsigned long NAME[((AVL_MAX_DEPTH) + L_LONG_BIT - 1) / L_LONG_BIT];
#else
@@ -164,29 +162,28 @@ L_SC int L_(build)(
#endif
/* Iterator structure. */
-typedef struct
-{
- /* Tree being iterated over. */
- L_(avl) *tree_;
-
- /* Records a path into the tree. If bit n is true, indicates
- ** take greater branch from the nth node in the path, otherwise
- ** take the less branch. bit 0 gives branch from root, and
- ** so on. */
- L_BIT_ARR_DEFN(branch)
-
- /* Zero-based depth of path into tree. */
- unsigned depth;
-
- /* Handles of nodes in path from root to current node (returned by *). */
- AVL_HANDLE path_h[(AVL_MAX_DEPTH) - 1];
+typedef struct {
+ /* Tree being iterated over. */
+ L_(avl) *tree_;
+
+ /* Records a path into the tree. If bit n is true, indicates
+ ** take greater branch from the nth node in the path, otherwise
+ ** take the less branch. bit 0 gives branch from root, and
+ ** so on. */
+ L_BIT_ARR_DEFN(branch)
+
+ /* Zero-based depth of path into tree. */
+ unsigned depth;
+
+ /* Handles of nodes in path from root to current node (returned by *). */
+ AVL_HANDLE path_h[(AVL_MAX_DEPTH) - 1];
}
L_(iter);
/* Iterator function prototypes. */
L_SC void L_(start_iter)(
- L_(avl) *tree, L_(iter) *iter, AVL_KEY k, avl_search_type st);
+ L_(avl) *tree, L_(iter) *iter, AVL_KEY k, avl_search_type st);
L_SC void L_(start_iter_least)(L_(avl) *tree, L_(iter) *iter);
diff --git a/libvpx/vpx_mem/memory_manager/include/cavl_impl.h b/libvpx/vpx_mem/memory_manager/include/cavl_impl.h
index 5e165dd..cf7deb7 100644
--- a/libvpx/vpx_mem/memory_manager/include/cavl_impl.h
+++ b/libvpx/vpx_mem/memory_manager/include/cavl_impl.h
@@ -110,16 +110,16 @@
#define L_BIT_ARR_DEFN(NAME) unsigned long NAME[L_BIT_ARR_LONGS];
#define L_BIT_ARR_VAL(BIT_ARR, BIT_NUM) \
- ((BIT_ARR)[(BIT_NUM) / L_LONG_BIT] & (1L << ((BIT_NUM) % L_LONG_BIT)))
+ ((BIT_ARR)[(BIT_NUM) / L_LONG_BIT] & (1L << ((BIT_NUM) % L_LONG_BIT)))
#define L_BIT_ARR_0(BIT_ARR, BIT_NUM) \
- (BIT_ARR)[(BIT_NUM) / L_LONG_BIT] &= ~(1L << ((BIT_NUM) % L_LONG_BIT));
+ (BIT_ARR)[(BIT_NUM) / L_LONG_BIT] &= ~(1L << ((BIT_NUM) % L_LONG_BIT));
#define L_BIT_ARR_1(BIT_ARR, BIT_NUM) \
- (BIT_ARR)[(BIT_NUM) / L_LONG_BIT] |= 1L << ((BIT_NUM) % L_LONG_BIT);
+ (BIT_ARR)[(BIT_NUM) / L_LONG_BIT] |= 1L << ((BIT_NUM) % L_LONG_BIT);
#define L_BIT_ARR_ALL(BIT_ARR, BIT_VAL) \
- { int i = L_BIT_ARR_LONGS; do (BIT_ARR)[--i] = 0L - (BIT_VAL); while(i); }
+ { int i = L_BIT_ARR_LONGS; do (BIT_ARR)[--i] = 0L - (BIT_VAL); while(i); }
#else /* The bit array can definitely fit in one long */
@@ -138,7 +138,7 @@
#ifdef AVL_READ_ERRORS_HAPPEN
#define L_CHECK_READ_ERROR(ERROR_RETURN) \
- { if (AVL_READ_ERROR) return(ERROR_RETURN); }
+ { if (AVL_READ_ERROR) return(ERROR_RETURN); }
#else
@@ -179,18 +179,16 @@
#if (L_IMPL_MASK & AVL_IMPL_INIT)
-L_SC void L_(init)(L_(avl) *l_tree)
-{
- l_tree->root = AVL_NULL;
+L_SC void L_(init)(L_(avl) *l_tree) {
+ l_tree->root = AVL_NULL;
}
#endif
#if (L_IMPL_MASK & AVL_IMPL_IS_EMPTY)
-L_SC int L_(is_empty)(L_(avl) *l_tree)
-{
- return(l_tree->root == AVL_NULL);
+L_SC int L_(is_empty)(L_(avl) *l_tree) {
+ return(l_tree->root == AVL_NULL);
}
#endif
@@ -201,358 +199,305 @@ L_SC int L_(is_empty)(L_(avl) *l_tree)
/* Balances subtree, returns handle of root node of subtree after balancing.
*/
-L_SC AVL_HANDLE L_(balance)(L_BALANCE_PARAM_DECL_PREFIX AVL_HANDLE bal_h)
-{
- AVL_HANDLE deep_h;
+L_SC AVL_HANDLE L_(balance)(L_BALANCE_PARAM_DECL_PREFIX AVL_HANDLE bal_h) {
+ AVL_HANDLE deep_h;
- /* Either the "greater than" or the "less than" subtree of
- ** this node has to be 2 levels deeper (or else it wouldn't
- ** need balancing).
- */
- if (AVL_GET_BALANCE_FACTOR(bal_h) > 0)
- {
- /* "Greater than" subtree is deeper. */
+ /* Either the "greater than" or the "less than" subtree of
+ ** this node has to be 2 levels deeper (or else it wouldn't
+ ** need balancing).
+ */
+ if (AVL_GET_BALANCE_FACTOR(bal_h) > 0) {
+ /* "Greater than" subtree is deeper. */
- deep_h = AVL_GET_GREATER(bal_h, 1);
+ deep_h = AVL_GET_GREATER(bal_h, 1);
- L_CHECK_READ_ERROR(AVL_NULL)
+ L_CHECK_READ_ERROR(AVL_NULL)
- if (AVL_GET_BALANCE_FACTOR(deep_h) < 0)
- {
- int bf;
-
- AVL_HANDLE old_h = bal_h;
- bal_h = AVL_GET_LESS(deep_h, 1);
- L_CHECK_READ_ERROR(AVL_NULL)
- AVL_SET_GREATER(old_h, AVL_GET_LESS(bal_h, 1))
- AVL_SET_LESS(deep_h, AVL_GET_GREATER(bal_h, 1))
- AVL_SET_LESS(bal_h, old_h)
- AVL_SET_GREATER(bal_h, deep_h)
-
- bf = AVL_GET_BALANCE_FACTOR(bal_h);
-
- if (bf != 0)
- {
- if (bf > 0)
- {
- AVL_SET_BALANCE_FACTOR(old_h, -1)
- AVL_SET_BALANCE_FACTOR(deep_h, 0)
- }
- else
- {
- AVL_SET_BALANCE_FACTOR(deep_h, 1)
- AVL_SET_BALANCE_FACTOR(old_h, 0)
- }
-
- AVL_SET_BALANCE_FACTOR(bal_h, 0)
- }
- else
- {
- AVL_SET_BALANCE_FACTOR(old_h, 0)
- AVL_SET_BALANCE_FACTOR(deep_h, 0)
- }
- }
- else
- {
- AVL_SET_GREATER(bal_h, AVL_GET_LESS(deep_h, 0))
- AVL_SET_LESS(deep_h, bal_h)
-
- if (AVL_GET_BALANCE_FACTOR(deep_h) == 0)
- {
- AVL_SET_BALANCE_FACTOR(deep_h, -1)
- AVL_SET_BALANCE_FACTOR(bal_h, 1)
- }
- else
- {
- AVL_SET_BALANCE_FACTOR(deep_h, 0)
- AVL_SET_BALANCE_FACTOR(bal_h, 0)
- }
-
- bal_h = deep_h;
+ if (AVL_GET_BALANCE_FACTOR(deep_h) < 0) {
+ int bf;
+
+ AVL_HANDLE old_h = bal_h;
+ bal_h = AVL_GET_LESS(deep_h, 1);
+ L_CHECK_READ_ERROR(AVL_NULL)
+ AVL_SET_GREATER(old_h, AVL_GET_LESS(bal_h, 1))
+ AVL_SET_LESS(deep_h, AVL_GET_GREATER(bal_h, 1))
+ AVL_SET_LESS(bal_h, old_h)
+ AVL_SET_GREATER(bal_h, deep_h)
+
+ bf = AVL_GET_BALANCE_FACTOR(bal_h);
+
+ if (bf != 0) {
+ if (bf > 0) {
+ AVL_SET_BALANCE_FACTOR(old_h, -1)
+ AVL_SET_BALANCE_FACTOR(deep_h, 0)
+ } else {
+ AVL_SET_BALANCE_FACTOR(deep_h, 1)
+ AVL_SET_BALANCE_FACTOR(old_h, 0)
}
+
+ AVL_SET_BALANCE_FACTOR(bal_h, 0)
+ } else {
+ AVL_SET_BALANCE_FACTOR(old_h, 0)
+ AVL_SET_BALANCE_FACTOR(deep_h, 0)
+ }
+ } else {
+ AVL_SET_GREATER(bal_h, AVL_GET_LESS(deep_h, 0))
+ AVL_SET_LESS(deep_h, bal_h)
+
+ if (AVL_GET_BALANCE_FACTOR(deep_h) == 0) {
+ AVL_SET_BALANCE_FACTOR(deep_h, -1)
+ AVL_SET_BALANCE_FACTOR(bal_h, 1)
+ } else {
+ AVL_SET_BALANCE_FACTOR(deep_h, 0)
+ AVL_SET_BALANCE_FACTOR(bal_h, 0)
+ }
+
+ bal_h = deep_h;
}
- else
- {
- /* "Less than" subtree is deeper. */
+ } else {
+ /* "Less than" subtree is deeper. */
- deep_h = AVL_GET_LESS(bal_h, 1);
- L_CHECK_READ_ERROR(AVL_NULL)
+ deep_h = AVL_GET_LESS(bal_h, 1);
+ L_CHECK_READ_ERROR(AVL_NULL)
- if (AVL_GET_BALANCE_FACTOR(deep_h) > 0)
- {
- int bf;
- AVL_HANDLE old_h = bal_h;
- bal_h = AVL_GET_GREATER(deep_h, 1);
- L_CHECK_READ_ERROR(AVL_NULL)
- AVL_SET_LESS(old_h, AVL_GET_GREATER(bal_h, 0))
- AVL_SET_GREATER(deep_h, AVL_GET_LESS(bal_h, 0))
- AVL_SET_GREATER(bal_h, old_h)
- AVL_SET_LESS(bal_h, deep_h)
-
- bf = AVL_GET_BALANCE_FACTOR(bal_h);
-
- if (bf != 0)
- {
- if (bf < 0)
- {
- AVL_SET_BALANCE_FACTOR(old_h, 1)
- AVL_SET_BALANCE_FACTOR(deep_h, 0)
- }
- else
- {
- AVL_SET_BALANCE_FACTOR(deep_h, -1)
- AVL_SET_BALANCE_FACTOR(old_h, 0)
- }
-
- AVL_SET_BALANCE_FACTOR(bal_h, 0)
- }
- else
- {
- AVL_SET_BALANCE_FACTOR(old_h, 0)
- AVL_SET_BALANCE_FACTOR(deep_h, 0)
- }
- }
- else
- {
- AVL_SET_LESS(bal_h, AVL_GET_GREATER(deep_h, 0))
- AVL_SET_GREATER(deep_h, bal_h)
-
- if (AVL_GET_BALANCE_FACTOR(deep_h) == 0)
- {
- AVL_SET_BALANCE_FACTOR(deep_h, 1)
- AVL_SET_BALANCE_FACTOR(bal_h, -1)
- }
- else
- {
- AVL_SET_BALANCE_FACTOR(deep_h, 0)
- AVL_SET_BALANCE_FACTOR(bal_h, 0)
- }
-
- bal_h = deep_h;
+ if (AVL_GET_BALANCE_FACTOR(deep_h) > 0) {
+ int bf;
+ AVL_HANDLE old_h = bal_h;
+ bal_h = AVL_GET_GREATER(deep_h, 1);
+ L_CHECK_READ_ERROR(AVL_NULL)
+ AVL_SET_LESS(old_h, AVL_GET_GREATER(bal_h, 0))
+ AVL_SET_GREATER(deep_h, AVL_GET_LESS(bal_h, 0))
+ AVL_SET_GREATER(bal_h, old_h)
+ AVL_SET_LESS(bal_h, deep_h)
+
+ bf = AVL_GET_BALANCE_FACTOR(bal_h);
+
+ if (bf != 0) {
+ if (bf < 0) {
+ AVL_SET_BALANCE_FACTOR(old_h, 1)
+ AVL_SET_BALANCE_FACTOR(deep_h, 0)
+ } else {
+ AVL_SET_BALANCE_FACTOR(deep_h, -1)
+ AVL_SET_BALANCE_FACTOR(old_h, 0)
}
+
+ AVL_SET_BALANCE_FACTOR(bal_h, 0)
+ } else {
+ AVL_SET_BALANCE_FACTOR(old_h, 0)
+ AVL_SET_BALANCE_FACTOR(deep_h, 0)
+ }
+ } else {
+ AVL_SET_LESS(bal_h, AVL_GET_GREATER(deep_h, 0))
+ AVL_SET_GREATER(deep_h, bal_h)
+
+ if (AVL_GET_BALANCE_FACTOR(deep_h) == 0) {
+ AVL_SET_BALANCE_FACTOR(deep_h, 1)
+ AVL_SET_BALANCE_FACTOR(bal_h, -1)
+ } else {
+ AVL_SET_BALANCE_FACTOR(deep_h, 0)
+ AVL_SET_BALANCE_FACTOR(bal_h, 0)
+ }
+
+ bal_h = deep_h;
}
+ }
- return(bal_h);
+ return(bal_h);
}
-L_SC AVL_HANDLE L_(insert)(L_(avl) *l_tree, AVL_HANDLE h)
-{
- AVL_SET_LESS(h, AVL_NULL)
- AVL_SET_GREATER(h, AVL_NULL)
- AVL_SET_BALANCE_FACTOR(h, 0)
+L_SC AVL_HANDLE L_(insert)(L_(avl) *l_tree, AVL_HANDLE h) {
+ AVL_SET_LESS(h, AVL_NULL)
+ AVL_SET_GREATER(h, AVL_NULL)
+ AVL_SET_BALANCE_FACTOR(h, 0)
- if (l_tree->root == AVL_NULL)
- l_tree->root = h;
- else
- {
- /* Last unbalanced node encountered in search for insertion point. */
- AVL_HANDLE unbal = AVL_NULL;
- /* Parent of last unbalanced node. */
- AVL_HANDLE parent_unbal = AVL_NULL;
- /* Balance factor of last unbalanced node. */
- int unbal_bf;
-
- /* Zero-based depth in tree. */
- unsigned depth = 0, unbal_depth = 0;
-
- /* Records a path into the tree. If bit n is true, indicates
- ** take greater branch from the nth node in the path, otherwise
- ** take the less branch. bit 0 gives branch from root, and
- ** so on. */
- L_BIT_ARR_DEFN(branch)
-
- AVL_HANDLE hh = l_tree->root;
- AVL_HANDLE parent = AVL_NULL;
- int cmp;
-
- do
- {
- if (AVL_GET_BALANCE_FACTOR(hh) != 0)
- {
- unbal = hh;
- parent_unbal = parent;
- unbal_depth = depth;
- }
-
- cmp = AVL_COMPARE_NODE_NODE(h, hh);
-
- if (cmp == 0)
- /* Duplicate key. */
- return(hh);
-
- parent = hh;
-
- if (cmp > 0)
- {
- hh = AVL_GET_GREATER(hh, 1);
- L_BIT_ARR_1(branch, depth)
- }
- else
- {
- hh = AVL_GET_LESS(hh, 1);
- L_BIT_ARR_0(branch, depth)
- }
-
- L_CHECK_READ_ERROR(AVL_NULL)
- depth++;
- }
- while (hh != AVL_NULL);
+ if (l_tree->root == AVL_NULL)
+ l_tree->root = h;
+ else {
+ /* Last unbalanced node encountered in search for insertion point. */
+ AVL_HANDLE unbal = AVL_NULL;
+ /* Parent of last unbalanced node. */
+ AVL_HANDLE parent_unbal = AVL_NULL;
+ /* Balance factor of last unbalanced node. */
+ int unbal_bf;
- /* Add node to insert as leaf of tree. */
- if (cmp < 0)
- AVL_SET_LESS(parent, h)
- else
- AVL_SET_GREATER(parent, h)
+ /* Zero-based depth in tree. */
+ unsigned depth = 0, unbal_depth = 0;
- depth = unbal_depth;
+ /* Records a path into the tree. If bit n is true, indicates
+ ** take greater branch from the nth node in the path, otherwise
+ ** take the less branch. bit 0 gives branch from root, and
+ ** so on. */
+ L_BIT_ARR_DEFN(branch)
- if (unbal == AVL_NULL)
- hh = l_tree->root;
- else
- {
- cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1;
- depth++;
- unbal_bf = AVL_GET_BALANCE_FACTOR(unbal);
-
- if (cmp < 0)
- unbal_bf--;
- else /* cmp > 0 */
- unbal_bf++;
-
- hh = cmp < 0 ? AVL_GET_LESS(unbal, 1) : AVL_GET_GREATER(unbal, 1);
- L_CHECK_READ_ERROR(AVL_NULL)
-
- if ((unbal_bf != -2) && (unbal_bf != 2))
- {
- /* No rebalancing of tree is necessary. */
- AVL_SET_BALANCE_FACTOR(unbal, unbal_bf)
- unbal = AVL_NULL;
- }
- }
+ AVL_HANDLE hh = l_tree->root;
+ AVL_HANDLE parent = AVL_NULL;
+ int cmp;
- if (hh != AVL_NULL)
- while (h != hh)
- {
- cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1;
- depth++;
-
- if (cmp < 0)
- {
- AVL_SET_BALANCE_FACTOR(hh, -1)
- hh = AVL_GET_LESS(hh, 1);
- }
- else /* cmp > 0 */
- {
- AVL_SET_BALANCE_FACTOR(hh, 1)
- hh = AVL_GET_GREATER(hh, 1);
- }
-
- L_CHECK_READ_ERROR(AVL_NULL)
- }
-
- if (unbal != AVL_NULL)
- {
- unbal = L_(balance)(L_BALANCE_PARAM_CALL_PREFIX unbal);
- L_CHECK_READ_ERROR(AVL_NULL)
-
- if (parent_unbal == AVL_NULL)
- l_tree->root = unbal;
- else
- {
- depth = unbal_depth - 1;
- cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1;
-
- if (cmp < 0)
- AVL_SET_LESS(parent_unbal, unbal)
- else /* cmp > 0 */
- AVL_SET_GREATER(parent_unbal, unbal)
- }
- }
+ do {
+ if (AVL_GET_BALANCE_FACTOR(hh) != 0) {
+ unbal = hh;
+ parent_unbal = parent;
+ unbal_depth = depth;
+ }
- }
+ cmp = AVL_COMPARE_NODE_NODE(h, hh);
- return(h);
-}
+ if (cmp == 0)
+ /* Duplicate key. */
+ return(hh);
-#endif
+ parent = hh;
-#if (L_IMPL_MASK & AVL_IMPL_SEARCH)
+ if (cmp > 0) {
+ hh = AVL_GET_GREATER(hh, 1);
+ L_BIT_ARR_1(branch, depth)
+ } else {
+ hh = AVL_GET_LESS(hh, 1);
+ L_BIT_ARR_0(branch, depth)
+ }
+
+ L_CHECK_READ_ERROR(AVL_NULL)
+ depth++;
+ } while (hh != AVL_NULL);
+
+ /* Add node to insert as leaf of tree. */
+ if (cmp < 0)
+ AVL_SET_LESS(parent, h)
+ else
+ AVL_SET_GREATER(parent, h)
+
+ depth = unbal_depth;
+
+ if (unbal == AVL_NULL)
+ hh = l_tree->root;
+ else {
+ cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1;
+ depth++;
+ unbal_bf = AVL_GET_BALANCE_FACTOR(unbal);
+
+ if (cmp < 0)
+ unbal_bf--;
+ else /* cmp > 0 */
+ unbal_bf++;
+
+ hh = cmp < 0 ? AVL_GET_LESS(unbal, 1) : AVL_GET_GREATER(unbal, 1);
+ L_CHECK_READ_ERROR(AVL_NULL)
+
+ if ((unbal_bf != -2) && (unbal_bf != 2)) {
+ /* No rebalancing of tree is necessary. */
+ AVL_SET_BALANCE_FACTOR(unbal, unbal_bf)
+ unbal = AVL_NULL;
+ }
+ }
-L_SC AVL_HANDLE L_(search)(L_(avl) *l_tree, AVL_KEY k, avl_search_type st)
-{
- int cmp, target_cmp;
- AVL_HANDLE match_h = AVL_NULL;
- AVL_HANDLE h = l_tree->root;
+ if (hh != AVL_NULL)
+ while (h != hh) {
+ cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1;
+ depth++;
- if (st & AVL_LESS)
- target_cmp = 1;
- else if (st & AVL_GREATER)
- target_cmp = -1;
- else
- target_cmp = 0;
+ if (cmp < 0) {
+ AVL_SET_BALANCE_FACTOR(hh, -1)
+ hh = AVL_GET_LESS(hh, 1);
+ } else { /* cmp > 0 */
+ AVL_SET_BALANCE_FACTOR(hh, 1)
+ hh = AVL_GET_GREATER(hh, 1);
+ }
- while (h != AVL_NULL)
- {
- cmp = AVL_COMPARE_KEY_NODE(k, h);
+ L_CHECK_READ_ERROR(AVL_NULL)
+ }
- if (cmp == 0)
- {
- if (st & AVL_EQUAL)
- {
- match_h = h;
- break;
- }
+ if (unbal != AVL_NULL) {
+ unbal = L_(balance)(L_BALANCE_PARAM_CALL_PREFIX unbal);
+ L_CHECK_READ_ERROR(AVL_NULL)
- cmp = -target_cmp;
- }
- else if (target_cmp != 0)
- if (!((cmp ^ target_cmp) & L_MASK_HIGH_BIT))
- /* cmp and target_cmp are both positive or both negative. */
- match_h = h;
+ if (parent_unbal == AVL_NULL)
+ l_tree->root = unbal;
+ else {
+ depth = unbal_depth - 1;
+ cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1;
- h = cmp < 0 ? AVL_GET_LESS(h, 1) : AVL_GET_GREATER(h, 1);
- L_CHECK_READ_ERROR(AVL_NULL)
+ if (cmp < 0)
+ AVL_SET_LESS(parent_unbal, unbal)
+ else /* cmp > 0 */
+ AVL_SET_GREATER(parent_unbal, unbal)
+ }
}
- return(match_h);
+ }
+
+ return(h);
+}
+
+#endif
+
+#if (L_IMPL_MASK & AVL_IMPL_SEARCH)
+
+L_SC AVL_HANDLE L_(search)(L_(avl) *l_tree, AVL_KEY k, avl_search_type st) {
+ int cmp, target_cmp;
+ AVL_HANDLE match_h = AVL_NULL;
+ AVL_HANDLE h = l_tree->root;
+
+ if (st & AVL_LESS)
+ target_cmp = 1;
+ else if (st & AVL_GREATER)
+ target_cmp = -1;
+ else
+ target_cmp = 0;
+
+ while (h != AVL_NULL) {
+ cmp = AVL_COMPARE_KEY_NODE(k, h);
+
+ if (cmp == 0) {
+ if (st & AVL_EQUAL) {
+ match_h = h;
+ break;
+ }
+
+ cmp = -target_cmp;
+ } else if (target_cmp != 0)
+ if (!((cmp ^ target_cmp) & L_MASK_HIGH_BIT))
+ /* cmp and target_cmp are both positive or both negative. */
+ match_h = h;
+
+ h = cmp < 0 ? AVL_GET_LESS(h, 1) : AVL_GET_GREATER(h, 1);
+ L_CHECK_READ_ERROR(AVL_NULL)
+ }
+
+ return(match_h);
}
#endif
#if (L_IMPL_MASK & AVL_IMPL_SEARCH_LEAST)
-L_SC AVL_HANDLE L_(search_least)(L_(avl) *l_tree)
-{
- AVL_HANDLE h = l_tree->root;
- AVL_HANDLE parent = AVL_NULL;
+L_SC AVL_HANDLE L_(search_least)(L_(avl) *l_tree) {
+ AVL_HANDLE h = l_tree->root;
+ AVL_HANDLE parent = AVL_NULL;
- while (h != AVL_NULL)
- {
- parent = h;
- h = AVL_GET_LESS(h, 1);
- L_CHECK_READ_ERROR(AVL_NULL)
- }
+ while (h != AVL_NULL) {
+ parent = h;
+ h = AVL_GET_LESS(h, 1);
+ L_CHECK_READ_ERROR(AVL_NULL)
+ }
- return(parent);
+ return(parent);
}
#endif
#if (L_IMPL_MASK & AVL_IMPL_SEARCH_GREATEST)
-L_SC AVL_HANDLE L_(search_greatest)(L_(avl) *l_tree)
-{
- AVL_HANDLE h = l_tree->root;
- AVL_HANDLE parent = AVL_NULL;
+L_SC AVL_HANDLE L_(search_greatest)(L_(avl) *l_tree) {
+ AVL_HANDLE h = l_tree->root;
+ AVL_HANDLE parent = AVL_NULL;
- while (h != AVL_NULL)
- {
- parent = h;
- h = AVL_GET_GREATER(h, 1);
- L_CHECK_READ_ERROR(AVL_NULL)
- }
+ while (h != AVL_NULL) {
+ parent = h;
+ h = AVL_GET_GREATER(h, 1);
+ L_CHECK_READ_ERROR(AVL_NULL)
+ }
- return(parent);
+ return(parent);
}
#endif
@@ -564,284 +509,253 @@ L_SC AVL_HANDLE L_(search_greatest)(L_(avl) *l_tree)
*/
L_SC AVL_HANDLE L_(balance)(L_BALANCE_PARAM_DECL_PREFIX AVL_HANDLE bal_h);
-L_SC AVL_HANDLE L_(remove)(L_(avl) *l_tree, AVL_KEY k)
-{
- /* Zero-based depth in tree. */
- unsigned depth = 0, rm_depth;
-
- /* Records a path into the tree. If bit n is true, indicates
- ** take greater branch from the nth node in the path, otherwise
- ** take the less branch. bit 0 gives branch from root, and
- ** so on. */
- L_BIT_ARR_DEFN(branch)
-
- AVL_HANDLE h = l_tree->root;
- AVL_HANDLE parent = AVL_NULL;
- AVL_HANDLE child;
- AVL_HANDLE path;
- int cmp, cmp_shortened_sub_with_path;
- int reduced_depth;
- int bf;
- AVL_HANDLE rm;
- AVL_HANDLE parent_rm;
-
- for (; ;)
- {
- if (h == AVL_NULL)
- /* No node in tree with given key. */
- return(AVL_NULL);
-
- cmp = AVL_COMPARE_KEY_NODE(k, h);
+L_SC AVL_HANDLE L_(remove)(L_(avl) *l_tree, AVL_KEY k) {
+ /* Zero-based depth in tree. */
+ unsigned depth = 0, rm_depth;
+
+ /* Records a path into the tree. If bit n is true, indicates
+ ** take greater branch from the nth node in the path, otherwise
+ ** take the less branch. bit 0 gives branch from root, and
+ ** so on. */
+ L_BIT_ARR_DEFN(branch)
+
+ AVL_HANDLE h = l_tree->root;
+ AVL_HANDLE parent = AVL_NULL;
+ AVL_HANDLE child;
+ AVL_HANDLE path;
+ int cmp, cmp_shortened_sub_with_path;
+ int reduced_depth;
+ int bf;
+ AVL_HANDLE rm;
+ AVL_HANDLE parent_rm;
+
+ for (;;) {
+ if (h == AVL_NULL)
+ /* No node in tree with given key. */
+ return(AVL_NULL);
- if (cmp == 0)
- /* Found node to remove. */
- break;
+ cmp = AVL_COMPARE_KEY_NODE(k, h);
- parent = h;
+ if (cmp == 0)
+ /* Found node to remove. */
+ break;
- if (cmp > 0)
- {
- h = AVL_GET_GREATER(h, 1);
- L_BIT_ARR_1(branch, depth)
- }
- else
- {
- h = AVL_GET_LESS(h, 1);
- L_BIT_ARR_0(branch, depth)
- }
+ parent = h;
- L_CHECK_READ_ERROR(AVL_NULL)
- depth++;
- cmp_shortened_sub_with_path = cmp;
+ if (cmp > 0) {
+ h = AVL_GET_GREATER(h, 1);
+ L_BIT_ARR_1(branch, depth)
+ } else {
+ h = AVL_GET_LESS(h, 1);
+ L_BIT_ARR_0(branch, depth)
}
- rm = h;
- parent_rm = parent;
- rm_depth = depth;
-
- /* If the node to remove is not a leaf node, we need to get a
- ** leaf node, or a node with a single leaf as its child, to put
- ** in the place of the node to remove. We will get the greatest
- ** node in the less subtree (of the node to remove), or the least
- ** node in the greater subtree. We take the leaf node from the
- ** deeper subtree, if there is one. */
-
- if (AVL_GET_BALANCE_FACTOR(h) < 0)
- {
+ L_CHECK_READ_ERROR(AVL_NULL)
+ depth++;
+ cmp_shortened_sub_with_path = cmp;
+ }
+
+ rm = h;
+ parent_rm = parent;
+ rm_depth = depth;
+
+ /* If the node to remove is not a leaf node, we need to get a
+ ** leaf node, or a node with a single leaf as its child, to put
+ ** in the place of the node to remove. We will get the greatest
+ ** node in the less subtree (of the node to remove), or the least
+ ** node in the greater subtree. We take the leaf node from the
+ ** deeper subtree, if there is one. */
+
+ if (AVL_GET_BALANCE_FACTOR(h) < 0) {
+ child = AVL_GET_LESS(h, 1);
+ L_BIT_ARR_0(branch, depth)
+ cmp = -1;
+ } else {
+ child = AVL_GET_GREATER(h, 1);
+ L_BIT_ARR_1(branch, depth)
+ cmp = 1;
+ }
+
+ L_CHECK_READ_ERROR(AVL_NULL)
+ depth++;
+
+ if (child != AVL_NULL) {
+ cmp = -cmp;
+
+ do {
+ parent = h;
+ h = child;
+
+ if (cmp < 0) {
child = AVL_GET_LESS(h, 1);
L_BIT_ARR_0(branch, depth)
- cmp = -1;
- }
- else
- {
+ } else {
child = AVL_GET_GREATER(h, 1);
L_BIT_ARR_1(branch, depth)
- cmp = 1;
- }
+ }
- L_CHECK_READ_ERROR(AVL_NULL)
- depth++;
+ L_CHECK_READ_ERROR(AVL_NULL)
+ depth++;
+ } while (child != AVL_NULL);
- if (child != AVL_NULL)
- {
- cmp = -cmp;
-
- do
- {
- parent = h;
- h = child;
-
- if (cmp < 0)
- {
- child = AVL_GET_LESS(h, 1);
- L_BIT_ARR_0(branch, depth)
- }
- else
- {
- child = AVL_GET_GREATER(h, 1);
- L_BIT_ARR_1(branch, depth)
- }
-
- L_CHECK_READ_ERROR(AVL_NULL)
- depth++;
- }
- while (child != AVL_NULL);
+ if (parent == rm)
+ /* Only went through do loop once. Deleted node will be replaced
+ ** in the tree structure by one of its immediate children. */
+ cmp_shortened_sub_with_path = -cmp;
+ else
+ cmp_shortened_sub_with_path = cmp;
+
+ /* Get the handle of the opposite child, which may not be null. */
+ child = cmp > 0 ? AVL_GET_LESS(h, 0) : AVL_GET_GREATER(h, 0);
+ }
- if (parent == rm)
- /* Only went through do loop once. Deleted node will be replaced
- ** in the tree structure by one of its immediate children. */
- cmp_shortened_sub_with_path = -cmp;
+ if (parent == AVL_NULL)
+ /* There were only 1 or 2 nodes in this tree. */
+ l_tree->root = child;
+ else if (cmp_shortened_sub_with_path < 0)
+ AVL_SET_LESS(parent, child)
+ else
+ AVL_SET_GREATER(parent, child)
+
+ /* "path" is the parent of the subtree being eliminated or reduced
+ ** from a depth of 2 to 1. If "path" is the node to be removed, we
+ ** set path to the node we're about to poke into the position of the
+ ** node to be removed. */
+ path = parent == rm ? h : parent;
+
+ if (h != rm) {
+ /* Poke in the replacement for the node to be removed. */
+ AVL_SET_LESS(h, AVL_GET_LESS(rm, 0))
+ AVL_SET_GREATER(h, AVL_GET_GREATER(rm, 0))
+ AVL_SET_BALANCE_FACTOR(h, AVL_GET_BALANCE_FACTOR(rm))
+
+ if (parent_rm == AVL_NULL)
+ l_tree->root = h;
+ else {
+ depth = rm_depth - 1;
+
+ if (L_BIT_ARR_VAL(branch, depth))
+ AVL_SET_GREATER(parent_rm, h)
else
- cmp_shortened_sub_with_path = cmp;
+ AVL_SET_LESS(parent_rm, h)
+ }
+ }
- /* Get the handle of the opposite child, which may not be null. */
- child = cmp > 0 ? AVL_GET_LESS(h, 0) : AVL_GET_GREATER(h, 0);
- }
+ if (path != AVL_NULL) {
+ /* Create a temporary linked list from the parent of the path node
+ ** to the root node. */
+ h = l_tree->root;
+ parent = AVL_NULL;
+ depth = 0;
- if (parent == AVL_NULL)
- /* There were only 1 or 2 nodes in this tree. */
- l_tree->root = child;
- else if (cmp_shortened_sub_with_path < 0)
- AVL_SET_LESS(parent, child)
- else
- AVL_SET_GREATER(parent, child)
-
- /* "path" is the parent of the subtree being eliminated or reduced
- ** from a depth of 2 to 1. If "path" is the node to be removed, we
- ** set path to the node we're about to poke into the position of the
- ** node to be removed. */
- path = parent == rm ? h : parent;
-
- if (h != rm)
- {
- /* Poke in the replacement for the node to be removed. */
- AVL_SET_LESS(h, AVL_GET_LESS(rm, 0))
- AVL_SET_GREATER(h, AVL_GET_GREATER(rm, 0))
- AVL_SET_BALANCE_FACTOR(h, AVL_GET_BALANCE_FACTOR(rm))
-
- if (parent_rm == AVL_NULL)
- l_tree->root = h;
- else
- {
- depth = rm_depth - 1;
-
- if (L_BIT_ARR_VAL(branch, depth))
- AVL_SET_GREATER(parent_rm, h)
- else
- AVL_SET_LESS(parent_rm, h)
- }
+ while (h != path) {
+ if (L_BIT_ARR_VAL(branch, depth)) {
+ child = AVL_GET_GREATER(h, 1);
+ AVL_SET_GREATER(h, parent)
+ } else {
+ child = AVL_GET_LESS(h, 1);
+ AVL_SET_LESS(h, parent)
+ }
+
+ L_CHECK_READ_ERROR(AVL_NULL)
+ depth++;
+ parent = h;
+ h = child;
}
- if (path != AVL_NULL)
- {
- /* Create a temporary linked list from the parent of the path node
- ** to the root node. */
- h = l_tree->root;
- parent = AVL_NULL;
- depth = 0;
-
- while (h != path)
- {
- if (L_BIT_ARR_VAL(branch, depth))
- {
- child = AVL_GET_GREATER(h, 1);
- AVL_SET_GREATER(h, parent)
- }
- else
- {
- child = AVL_GET_LESS(h, 1);
- AVL_SET_LESS(h, parent)
- }
-
- L_CHECK_READ_ERROR(AVL_NULL)
- depth++;
- parent = h;
- h = child;
- }
+ /* Climb from the path node to the root node using the linked
+ ** list, restoring the tree structure and rebalancing as necessary.
+ */
+ reduced_depth = 1;
+ cmp = cmp_shortened_sub_with_path;
- /* Climb from the path node to the root node using the linked
- ** list, restoring the tree structure and rebalancing as necessary.
- */
- reduced_depth = 1;
- cmp = cmp_shortened_sub_with_path;
-
- for (; ;)
- {
- if (reduced_depth)
- {
- bf = AVL_GET_BALANCE_FACTOR(h);
-
- if (cmp < 0)
- bf++;
- else /* cmp > 0 */
- bf--;
-
- if ((bf == -2) || (bf == 2))
- {
- h = L_(balance)(L_BALANCE_PARAM_CALL_PREFIX h);
- L_CHECK_READ_ERROR(AVL_NULL)
- bf = AVL_GET_BALANCE_FACTOR(h);
- }
- else
- AVL_SET_BALANCE_FACTOR(h, bf)
- reduced_depth = (bf == 0);
- }
-
- if (parent == AVL_NULL)
- break;
-
- child = h;
- h = parent;
- depth--;
- cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1;
-
- if (cmp < 0)
- {
- parent = AVL_GET_LESS(h, 1);
- AVL_SET_LESS(h, child)
- }
- else
- {
- parent = AVL_GET_GREATER(h, 1);
- AVL_SET_GREATER(h, child)
- }
-
- L_CHECK_READ_ERROR(AVL_NULL)
- }
+ for (;;) {
+ if (reduced_depth) {
+ bf = AVL_GET_BALANCE_FACTOR(h);
+
+ if (cmp < 0)
+ bf++;
+ else /* cmp > 0 */
+ bf--;
+
+ if ((bf == -2) || (bf == 2)) {
+ h = L_(balance)(L_BALANCE_PARAM_CALL_PREFIX h);
+ L_CHECK_READ_ERROR(AVL_NULL)
+ bf = AVL_GET_BALANCE_FACTOR(h);
+ } else
+ AVL_SET_BALANCE_FACTOR(h, bf)
+ reduced_depth = (bf == 0);
+ }
+
+ if (parent == AVL_NULL)
+ break;
+
+ child = h;
+ h = parent;
+ depth--;
+ cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1;
+
+ if (cmp < 0) {
+ parent = AVL_GET_LESS(h, 1);
+ AVL_SET_LESS(h, child)
+ } else {
+ parent = AVL_GET_GREATER(h, 1);
+ AVL_SET_GREATER(h, child)
+ }
- l_tree->root = h;
+ L_CHECK_READ_ERROR(AVL_NULL)
}
- return(rm);
+ l_tree->root = h;
+ }
+
+ return(rm);
}
#endif
#if (L_IMPL_MASK & AVL_IMPL_SUBST)
-L_SC AVL_HANDLE L_(subst)(L_(avl) *l_tree, AVL_HANDLE new_node)
-{
- AVL_HANDLE h = l_tree->root;
- AVL_HANDLE parent = AVL_NULL;
- int cmp, last_cmp;
-
- /* Search for node already in tree with same key. */
- for (; ;)
- {
- if (h == AVL_NULL)
- /* No node in tree with same key as new node. */
- return(AVL_NULL);
+L_SC AVL_HANDLE L_(subst)(L_(avl) *l_tree, AVL_HANDLE new_node) {
+ AVL_HANDLE h = l_tree->root;
+ AVL_HANDLE parent = AVL_NULL;
+ int cmp, last_cmp;
- cmp = AVL_COMPARE_NODE_NODE(new_node, h);
-
- if (cmp == 0)
- /* Found the node to substitute new one for. */
- break;
+ /* Search for node already in tree with same key. */
+ for (;;) {
+ if (h == AVL_NULL)
+ /* No node in tree with same key as new node. */
+ return(AVL_NULL);
- last_cmp = cmp;
- parent = h;
- h = cmp < 0 ? AVL_GET_LESS(h, 1) : AVL_GET_GREATER(h, 1);
- L_CHECK_READ_ERROR(AVL_NULL)
- }
+ cmp = AVL_COMPARE_NODE_NODE(new_node, h);
- /* Copy tree housekeeping fields from node in tree to new node. */
- AVL_SET_LESS(new_node, AVL_GET_LESS(h, 0))
- AVL_SET_GREATER(new_node, AVL_GET_GREATER(h, 0))
- AVL_SET_BALANCE_FACTOR(new_node, AVL_GET_BALANCE_FACTOR(h))
+ if (cmp == 0)
+ /* Found the node to substitute new one for. */
+ break;
- if (parent == AVL_NULL)
- /* New node is also new root. */
- l_tree->root = new_node;
- else
- {
- /* Make parent point to new node. */
- if (last_cmp < 0)
- AVL_SET_LESS(parent, new_node)
- else
- AVL_SET_GREATER(parent, new_node)
- }
-
- return(h);
+ last_cmp = cmp;
+ parent = h;
+ h = cmp < 0 ? AVL_GET_LESS(h, 1) : AVL_GET_GREATER(h, 1);
+ L_CHECK_READ_ERROR(AVL_NULL)
+ }
+
+ /* Copy tree housekeeping fields from node in tree to new node. */
+ AVL_SET_LESS(new_node, AVL_GET_LESS(h, 0))
+ AVL_SET_GREATER(new_node, AVL_GET_GREATER(h, 0))
+ AVL_SET_BALANCE_FACTOR(new_node, AVL_GET_BALANCE_FACTOR(h))
+
+ if (parent == AVL_NULL)
+ /* New node is also new root. */
+ l_tree->root = new_node;
+ else {
+ /* Make parent point to new node. */
+ if (last_cmp < 0)
+ AVL_SET_LESS(parent, new_node)
+ else
+ AVL_SET_GREATER(parent, new_node)
+ }
+
+ return(h);
}
#endif
@@ -851,144 +765,136 @@ L_SC AVL_HANDLE L_(subst)(L_(avl) *l_tree, AVL_HANDLE new_node)
#if (L_IMPL_MASK & AVL_IMPL_BUILD)
L_SC int L_(build)(
- L_(avl) *l_tree, AVL_BUILD_ITER_TYPE p, L_SIZE num_nodes)
-{
- /* Gives path to subtree being built. If bit n is false, branch
- ** less from the node at depth n, if true branch greater. */
- L_BIT_ARR_DEFN(branch)
-
- /* If bit n is true, then for the current subtree at depth n, its
- ** greater subtree has one more node than its less subtree. */
- L_BIT_ARR_DEFN(rem)
-
- /* Depth of root node of current subtree. */
- unsigned depth = 0;
+ L_(avl) *l_tree, AVL_BUILD_ITER_TYPE p, L_SIZE num_nodes) {
+ /* Gives path to subtree being built. If bit n is false, branch
+ ** less from the node at depth n, if true branch greater. */
+ L_BIT_ARR_DEFN(branch)
+
+ /* If bit n is true, then for the current subtree at depth n, its
+ ** greater subtree has one more node than its less subtree. */
+ L_BIT_ARR_DEFN(rem)
+
+ /* Depth of root node of current subtree. */
+ unsigned depth = 0;
+
+ /* Number of nodes in current subtree. */
+ L_SIZE num_sub = num_nodes;
+
+ /* The algorithm relies on a stack of nodes whose less subtree has
+ ** been built, but whose greater subtree has not yet been built.
+ ** The stack is implemented as linked list. The nodes are linked
+ ** together by having the "greater" handle of a node set to the
+ ** next node in the list. "less_parent" is the handle of the first
+ ** node in the list. */
+ AVL_HANDLE less_parent = AVL_NULL;
+
+ /* h is root of current subtree, child is one of its children. */
+ AVL_HANDLE h;
+ AVL_HANDLE child;
+
+ if (num_nodes == 0) {
+ l_tree->root = AVL_NULL;
+ return(1);
+ }
- /* Number of nodes in current subtree. */
- L_SIZE num_sub = num_nodes;
+ for (;;) {
+ while (num_sub > 2) {
+ /* Subtract one for root of subtree. */
+ num_sub--;
- /* The algorithm relies on a stack of nodes whose less subtree has
- ** been built, but whose greater subtree has not yet been built.
- ** The stack is implemented as linked list. The nodes are linked
- ** together by having the "greater" handle of a node set to the
- ** next node in the list. "less_parent" is the handle of the first
- ** node in the list. */
- AVL_HANDLE less_parent = AVL_NULL;
+ if (num_sub & 1)
+ L_BIT_ARR_1(rem, depth)
+ else
+ L_BIT_ARR_0(rem, depth)
+ L_BIT_ARR_0(branch, depth)
+ depth++;
- /* h is root of current subtree, child is one of its children. */
- AVL_HANDLE h;
- AVL_HANDLE child;
+ num_sub >>= 1;
+ }
- if (num_nodes == 0)
- {
- l_tree->root = AVL_NULL;
- return(1);
+ if (num_sub == 2) {
+ /* Build a subtree with two nodes, slanting to greater.
+ ** I arbitrarily chose to always have the extra node in the
+ ** greater subtree when there is an odd number of nodes to
+ ** split between the two subtrees. */
+
+ h = AVL_BUILD_ITER_VAL(p);
+ L_CHECK_READ_ERROR(0)
+ AVL_BUILD_ITER_INCR(p)
+ child = AVL_BUILD_ITER_VAL(p);
+ L_CHECK_READ_ERROR(0)
+ AVL_BUILD_ITER_INCR(p)
+ AVL_SET_LESS(child, AVL_NULL)
+ AVL_SET_GREATER(child, AVL_NULL)
+ AVL_SET_BALANCE_FACTOR(child, 0)
+ AVL_SET_GREATER(h, child)
+ AVL_SET_LESS(h, AVL_NULL)
+ AVL_SET_BALANCE_FACTOR(h, 1)
+ } else { /* num_sub == 1 */
+ /* Build a subtree with one node. */
+
+ h = AVL_BUILD_ITER_VAL(p);
+ L_CHECK_READ_ERROR(0)
+ AVL_BUILD_ITER_INCR(p)
+ AVL_SET_LESS(h, AVL_NULL)
+ AVL_SET_GREATER(h, AVL_NULL)
+ AVL_SET_BALANCE_FACTOR(h, 0)
}
- for (; ;)
- {
- while (num_sub > 2)
- {
- /* Subtract one for root of subtree. */
- num_sub--;
-
- if (num_sub & 1)
- L_BIT_ARR_1(rem, depth)
- else
- L_BIT_ARR_0(rem, depth)
- L_BIT_ARR_0(branch, depth)
- depth++;
-
- num_sub >>= 1;
+ while (depth) {
+ depth--;
+
+ if (!L_BIT_ARR_VAL(branch, depth))
+ /* We've completed a less subtree. */
+ break;
+
+ /* We've completed a greater subtree, so attach it to
+ ** its parent (that is less than it). We pop the parent
+ ** off the stack of less parents. */
+ child = h;
+ h = less_parent;
+ less_parent = AVL_GET_GREATER(h, 1);
+ L_CHECK_READ_ERROR(0)
+ AVL_SET_GREATER(h, child)
+ /* num_sub = 2 * (num_sub - rem[depth]) + rem[depth] + 1 */
+ num_sub <<= 1;
+ num_sub += L_BIT_ARR_VAL(rem, depth) ? 0 : 1;
+
+ if (num_sub & (num_sub - 1))
+ /* num_sub is not a power of 2. */
+ AVL_SET_BALANCE_FACTOR(h, 0)
+ else
+ /* num_sub is a power of 2. */
+ AVL_SET_BALANCE_FACTOR(h, 1)
}
- if (num_sub == 2)
- {
- /* Build a subtree with two nodes, slanting to greater.
- ** I arbitrarily chose to always have the extra node in the
- ** greater subtree when there is an odd number of nodes to
- ** split between the two subtrees. */
-
- h = AVL_BUILD_ITER_VAL(p);
- L_CHECK_READ_ERROR(0)
- AVL_BUILD_ITER_INCR(p)
- child = AVL_BUILD_ITER_VAL(p);
- L_CHECK_READ_ERROR(0)
- AVL_BUILD_ITER_INCR(p)
- AVL_SET_LESS(child, AVL_NULL)
- AVL_SET_GREATER(child, AVL_NULL)
- AVL_SET_BALANCE_FACTOR(child, 0)
- AVL_SET_GREATER(h, child)
- AVL_SET_LESS(h, AVL_NULL)
- AVL_SET_BALANCE_FACTOR(h, 1)
- }
- else /* num_sub == 1 */
- {
- /* Build a subtree with one node. */
-
- h = AVL_BUILD_ITER_VAL(p);
- L_CHECK_READ_ERROR(0)
- AVL_BUILD_ITER_INCR(p)
- AVL_SET_LESS(h, AVL_NULL)
- AVL_SET_GREATER(h, AVL_NULL)
- AVL_SET_BALANCE_FACTOR(h, 0)
- }
+ if (num_sub == num_nodes)
+ /* We've completed the full tree. */
+ break;
- while (depth)
- {
- depth--;
-
- if (!L_BIT_ARR_VAL(branch, depth))
- /* We've completed a less subtree. */
- break;
-
- /* We've completed a greater subtree, so attach it to
- ** its parent (that is less than it). We pop the parent
- ** off the stack of less parents. */
- child = h;
- h = less_parent;
- less_parent = AVL_GET_GREATER(h, 1);
- L_CHECK_READ_ERROR(0)
- AVL_SET_GREATER(h, child)
- /* num_sub = 2 * (num_sub - rem[depth]) + rem[depth] + 1 */
- num_sub <<= 1;
- num_sub += L_BIT_ARR_VAL(rem, depth) ? 0 : 1;
-
- if (num_sub & (num_sub - 1))
- /* num_sub is not a power of 2. */
- AVL_SET_BALANCE_FACTOR(h, 0)
- else
- /* num_sub is a power of 2. */
- AVL_SET_BALANCE_FACTOR(h, 1)
- }
-
- if (num_sub == num_nodes)
- /* We've completed the full tree. */
- break;
-
- /* The subtree we've completed is the less subtree of the
- ** next node in the sequence. */
-
- child = h;
- h = AVL_BUILD_ITER_VAL(p);
- L_CHECK_READ_ERROR(0)
- AVL_BUILD_ITER_INCR(p)
- AVL_SET_LESS(h, child)
+ /* The subtree we've completed is the less subtree of the
+ ** next node in the sequence. */
- /* Put h into stack of less parents. */
- AVL_SET_GREATER(h, less_parent)
- less_parent = h;
+ child = h;
+ h = AVL_BUILD_ITER_VAL(p);
+ L_CHECK_READ_ERROR(0)
+ AVL_BUILD_ITER_INCR(p)
+ AVL_SET_LESS(h, child)
- /* Proceed to creating greater than subtree of h. */
- L_BIT_ARR_1(branch, depth)
- num_sub += L_BIT_ARR_VAL(rem, depth) ? 1 : 0;
- depth++;
+ /* Put h into stack of less parents. */
+ AVL_SET_GREATER(h, less_parent)
+ less_parent = h;
- } /* end for ( ; ; ) */
+ /* Proceed to creating greater than subtree of h. */
+ L_BIT_ARR_1(branch, depth)
+ num_sub += L_BIT_ARR_VAL(rem, depth) ? 1 : 0;
+ depth++;
- l_tree->root = h;
+ } /* end for (;; ) */
- return(1);
+ l_tree->root = h;
+
+ return(1);
}
#endif
@@ -1001,9 +907,8 @@ L_SC int L_(build)(
** invalid. (Depth is zero-base.) It's not necessary to initialize
** iterators prior to passing them to the "start" function.
*/
-L_SC void L_(init_iter)(L_(iter) *iter)
-{
- iter->depth = ~0;
+L_SC void L_(init_iter)(L_(iter) *iter) {
+ iter->depth = ~0;
}
#endif
@@ -1011,7 +916,7 @@ L_SC void L_(init_iter)(L_(iter) *iter)
#ifdef AVL_READ_ERRORS_HAPPEN
#define L_CHECK_READ_ERROR_INV_DEPTH \
- { if (AVL_READ_ERROR) { iter->depth = ~0; return; } }
+ { if (AVL_READ_ERROR) { iter->depth = ~0; return; } }
#else
@@ -1022,174 +927,157 @@ L_SC void L_(init_iter)(L_(iter) *iter)
#if (L_IMPL_MASK & AVL_IMPL_START_ITER)
L_SC void L_(start_iter)(
- L_(avl) *l_tree, L_(iter) *iter, AVL_KEY k, avl_search_type st)
-{
- AVL_HANDLE h = l_tree->root;
- unsigned d = 0;
- int cmp, target_cmp;
-
- /* Save the tree that we're going to iterate through in a
- ** member variable. */
- iter->tree_ = l_tree;
-
- iter->depth = ~0;
+ L_(avl) *l_tree, L_(iter) *iter, AVL_KEY k, avl_search_type st) {
+ AVL_HANDLE h = l_tree->root;
+ unsigned d = 0;
+ int cmp, target_cmp;
+
+ /* Save the tree that we're going to iterate through in a
+ ** member variable. */
+ iter->tree_ = l_tree;
+
+ iter->depth = ~0;
+
+ if (h == AVL_NULL)
+ /* Tree is empty. */
+ return;
+
+ if (st & AVL_LESS)
+ /* Key can be greater than key of starting node. */
+ target_cmp = 1;
+ else if (st & AVL_GREATER)
+ /* Key can be less than key of starting node. */
+ target_cmp = -1;
+ else
+ /* Key must be same as key of starting node. */
+ target_cmp = 0;
+
+ for (;;) {
+ cmp = AVL_COMPARE_KEY_NODE(k, h);
+
+ if (cmp == 0) {
+ if (st & AVL_EQUAL) {
+ /* Equal node was sought and found as starting node. */
+ iter->depth = d;
+ break;
+ }
+
+ cmp = -target_cmp;
+ } else if (target_cmp != 0)
+ if (!((cmp ^ target_cmp) & L_MASK_HIGH_BIT))
+ /* cmp and target_cmp are both negative or both positive. */
+ iter->depth = d;
+
+ h = cmp < 0 ? AVL_GET_LESS(h, 1) : AVL_GET_GREATER(h, 1);
+ L_CHECK_READ_ERROR_INV_DEPTH
if (h == AVL_NULL)
- /* Tree is empty. */
- return;
-
- if (st & AVL_LESS)
- /* Key can be greater than key of starting node. */
- target_cmp = 1;
- else if (st & AVL_GREATER)
- /* Key can be less than key of starting node. */
- target_cmp = -1;
- else
- /* Key must be same as key of starting node. */
- target_cmp = 0;
-
- for (; ;)
- {
- cmp = AVL_COMPARE_KEY_NODE(k, h);
-
- if (cmp == 0)
- {
- if (st & AVL_EQUAL)
- {
- /* Equal node was sought and found as starting node. */
- iter->depth = d;
- break;
- }
-
- cmp = -target_cmp;
- }
- else if (target_cmp != 0)
- if (!((cmp ^ target_cmp) & L_MASK_HIGH_BIT))
- /* cmp and target_cmp are both negative or both positive. */
- iter->depth = d;
-
- h = cmp < 0 ? AVL_GET_LESS(h, 1) : AVL_GET_GREATER(h, 1);
- L_CHECK_READ_ERROR_INV_DEPTH
-
- if (h == AVL_NULL)
- break;
-
- if (cmp > 0)
- L_BIT_ARR_1(iter->branch, d)
- else
- L_BIT_ARR_0(iter->branch, d)
- iter->path_h[d++] = h;
- }
+ break;
+
+ if (cmp > 0)
+ L_BIT_ARR_1(iter->branch, d)
+ else
+ L_BIT_ARR_0(iter->branch, d)
+ iter->path_h[d++] = h;
+ }
}
#endif
#if (L_IMPL_MASK & AVL_IMPL_START_ITER_LEAST)
-L_SC void L_(start_iter_least)(L_(avl) *l_tree, L_(iter) *iter)
-{
- AVL_HANDLE h = l_tree->root;
+L_SC void L_(start_iter_least)(L_(avl) *l_tree, L_(iter) *iter) {
+ AVL_HANDLE h = l_tree->root;
- iter->tree_ = l_tree;
+ iter->tree_ = l_tree;
- iter->depth = ~0;
+ iter->depth = ~0;
- L_BIT_ARR_ALL(iter->branch, 0)
+ L_BIT_ARR_ALL(iter->branch, 0)
- while (h != AVL_NULL)
- {
- if (iter->depth != ~0)
- iter->path_h[iter->depth] = h;
+ while (h != AVL_NULL) {
+ if (iter->depth != ~0)
+ iter->path_h[iter->depth] = h;
- iter->depth++;
- h = AVL_GET_LESS(h, 1);
- L_CHECK_READ_ERROR_INV_DEPTH
- }
+ iter->depth++;
+ h = AVL_GET_LESS(h, 1);
+ L_CHECK_READ_ERROR_INV_DEPTH
+ }
}
#endif
#if (L_IMPL_MASK & AVL_IMPL_START_ITER_GREATEST)
-L_SC void L_(start_iter_greatest)(L_(avl) *l_tree, L_(iter) *iter)
-{
- AVL_HANDLE h = l_tree->root;
+L_SC void L_(start_iter_greatest)(L_(avl) *l_tree, L_(iter) *iter) {
+ AVL_HANDLE h = l_tree->root;
- iter->tree_ = l_tree;
+ iter->tree_ = l_tree;
- iter->depth = ~0;
+ iter->depth = ~0;
- L_BIT_ARR_ALL(iter->branch, 1)
+ L_BIT_ARR_ALL(iter->branch, 1)
- while (h != AVL_NULL)
- {
- if (iter->depth != ~0)
- iter->path_h[iter->depth] = h;
+ while (h != AVL_NULL) {
+ if (iter->depth != ~0)
+ iter->path_h[iter->depth] = h;
- iter->depth++;
- h = AVL_GET_GREATER(h, 1);
- L_CHECK_READ_ERROR_INV_DEPTH
- }
+ iter->depth++;
+ h = AVL_GET_GREATER(h, 1);
+ L_CHECK_READ_ERROR_INV_DEPTH
+ }
}
#endif
#if (L_IMPL_MASK & AVL_IMPL_GET_ITER)
-L_SC AVL_HANDLE L_(get_iter)(L_(iter) *iter)
-{
- if (iter->depth == ~0)
- return(AVL_NULL);
+L_SC AVL_HANDLE L_(get_iter)(L_(iter) *iter) {
+ if (iter->depth == ~0)
+ return(AVL_NULL);
- return(iter->depth == 0 ?
- iter->tree_->root : iter->path_h[iter->depth - 1]);
+ return(iter->depth == 0 ?
+ iter->tree_->root : iter->path_h[iter->depth - 1]);
}
#endif
#if (L_IMPL_MASK & AVL_IMPL_INCR_ITER)
-L_SC void L_(incr_iter)(L_(iter) *iter)
-{
+L_SC void L_(incr_iter)(L_(iter) *iter) {
#define l_tree (iter->tree_)
- if (iter->depth != ~0)
- {
- AVL_HANDLE h =
- AVL_GET_GREATER((iter->depth == 0 ?
- iter->tree_->root : iter->path_h[iter->depth - 1]), 1);
- L_CHECK_READ_ERROR_INV_DEPTH
+ if (iter->depth != ~0) {
+ AVL_HANDLE h =
+ AVL_GET_GREATER((iter->depth == 0 ?
+ iter->tree_->root : iter->path_h[iter->depth - 1]), 1);
+ L_CHECK_READ_ERROR_INV_DEPTH
- if (h == AVL_NULL)
- do
- {
- if (iter->depth == 0)
- {
- iter->depth = ~0;
- break;
- }
-
- iter->depth--;
- }
- while (L_BIT_ARR_VAL(iter->branch, iter->depth));
- else
- {
- L_BIT_ARR_1(iter->branch, iter->depth)
- iter->path_h[iter->depth++] = h;
+ if (h == AVL_NULL)
+ do {
+ if (iter->depth == 0) {
+ iter->depth = ~0;
+ break;
+ }
- for (; ;)
- {
- h = AVL_GET_LESS(h, 1);
- L_CHECK_READ_ERROR_INV_DEPTH
+ iter->depth--;
+ } while (L_BIT_ARR_VAL(iter->branch, iter->depth));
+ else {
+ L_BIT_ARR_1(iter->branch, iter->depth)
+ iter->path_h[iter->depth++] = h;
- if (h == AVL_NULL)
- break;
+ for (;;) {
+ h = AVL_GET_LESS(h, 1);
+ L_CHECK_READ_ERROR_INV_DEPTH
- L_BIT_ARR_0(iter->branch, iter->depth)
- iter->path_h[iter->depth++] = h;
- }
- }
+ if (h == AVL_NULL)
+ break;
+
+ L_BIT_ARR_0(iter->branch, iter->depth)
+ iter->path_h[iter->depth++] = h;
+ }
}
+ }
#undef l_tree
}
@@ -1198,47 +1086,40 @@ L_SC void L_(incr_iter)(L_(iter) *iter)
#if (L_IMPL_MASK & AVL_IMPL_DECR_ITER)
-L_SC void L_(decr_iter)(L_(iter) *iter)
-{
+L_SC void L_(decr_iter)(L_(iter) *iter) {
#define l_tree (iter->tree_)
- if (iter->depth != ~0)
- {
- AVL_HANDLE h =
- AVL_GET_LESS((iter->depth == 0 ?
- iter->tree_->root : iter->path_h[iter->depth - 1]), 1);
- L_CHECK_READ_ERROR_INV_DEPTH
+ if (iter->depth != ~0) {
+ AVL_HANDLE h =
+ AVL_GET_LESS((iter->depth == 0 ?
+ iter->tree_->root : iter->path_h[iter->depth - 1]), 1);
+ L_CHECK_READ_ERROR_INV_DEPTH
- if (h == AVL_NULL)
- do
- {
- if (iter->depth == 0)
- {
- iter->depth = ~0;
- break;
- }
-
- iter->depth--;
- }
- while (!L_BIT_ARR_VAL(iter->branch, iter->depth));
- else
- {
- L_BIT_ARR_0(iter->branch, iter->depth)
- iter->path_h[iter->depth++] = h;
+ if (h == AVL_NULL)
+ do {
+ if (iter->depth == 0) {
+ iter->depth = ~0;
+ break;
+ }
- for (; ;)
- {
- h = AVL_GET_GREATER(h, 1);
- L_CHECK_READ_ERROR_INV_DEPTH
+ iter->depth--;
+ } while (!L_BIT_ARR_VAL(iter->branch, iter->depth));
+ else {
+ L_BIT_ARR_0(iter->branch, iter->depth)
+ iter->path_h[iter->depth++] = h;
- if (h == AVL_NULL)
- break;
+ for (;;) {
+ h = AVL_GET_GREATER(h, 1);
+ L_CHECK_READ_ERROR_INV_DEPTH
- L_BIT_ARR_1(iter->branch, iter->depth)
- iter->path_h[iter->depth++] = h;
- }
- }
+ if (h == AVL_NULL)
+ break;
+
+ L_BIT_ARR_1(iter->branch, iter->depth)
+ iter->path_h[iter->depth++] = h;
+ }
}
+ }
#undef l_tree
}
diff --git a/libvpx/vpx_mem/memory_manager/include/heapmm.h b/libvpx/vpx_mem/memory_manager/include/heapmm.h
index 33004ca..4934c2d 100644
--- a/libvpx/vpx_mem/memory_manager/include/heapmm.h
+++ b/libvpx/vpx_mem/memory_manager/include/heapmm.h
@@ -81,30 +81,29 @@
#include "hmm_cnfg.h"
/* Heap descriptor. */
-typedef struct HMM_UNIQUE(structure)
-{
- /* private: */
-
- /* Pointer to (payload of) root node in AVL tree. This field should
- ** really be the AVL tree descriptor (type avl_avl). But (in the
- ** instantiation of the AVL tree generic package used in package) the
- ** AVL tree descriptor simply contains a pointer to the root. So,
- ** whenever a pointer to the AVL tree descriptor is needed, I use the
- ** cast:
- **
- ** (avl_avl *) &(heap_desc->avl_tree_root)
- **
- ** (where heap_desc is a pointer to a heap descriptor). This trick
- ** allows me to avoid including cavl_if.h in this external header. */
- void *avl_tree_root;
-
- /* Pointer to first byte of last block freed, after any coalescing. */
- void *last_freed;
-
- /* public: */
-
- HMM_UNIQUE(size_bau) num_baus_can_shrink;
- void *end_of_shrinkable_chunk;
+typedef struct HMM_UNIQUE(structure) {
+ /* private: */
+
+ /* Pointer to (payload of) root node in AVL tree. This field should
+ ** really be the AVL tree descriptor (type avl_avl). But (in the
+ ** instantiation of the AVL tree generic package used in package) the
+ ** AVL tree descriptor simply contains a pointer to the root. So,
+ ** whenever a pointer to the AVL tree descriptor is needed, I use the
+ ** cast:
+ **
+ ** (avl_avl *) &(heap_desc->avl_tree_root)
+ **
+ ** (where heap_desc is a pointer to a heap descriptor). This trick
+ ** allows me to avoid including cavl_if.h in this external header. */
+ void *avl_tree_root;
+
+ /* Pointer to first byte of last block freed, after any coalescing. */
+ void *last_freed;
+
+ /* public: */
+
+ HMM_UNIQUE(size_bau) num_baus_can_shrink;
+ void *end_of_shrinkable_chunk;
}
HMM_UNIQUE(descriptor);
@@ -113,41 +112,41 @@ HMM_UNIQUE(descriptor);
void HMM_UNIQUE(init)(HMM_UNIQUE(descriptor) *desc);
void *HMM_UNIQUE(alloc)(
- HMM_UNIQUE(descriptor) *desc, HMM_UNIQUE(size_aau) num_addr_align_units);
+ HMM_UNIQUE(descriptor) *desc, HMM_UNIQUE(size_aau) num_addr_align_units);
/* NOT YET IMPLEMENTED */
void *HMM_UNIQUE(greedy_alloc)(
- HMM_UNIQUE(descriptor) *desc, HMM_UNIQUE(size_aau) needed_addr_align_units,
- HMM_UNIQUE(size_aau) coveted_addr_align_units);
+ HMM_UNIQUE(descriptor) *desc, HMM_UNIQUE(size_aau) needed_addr_align_units,
+ HMM_UNIQUE(size_aau) coveted_addr_align_units);
int HMM_UNIQUE(resize)(
- HMM_UNIQUE(descriptor) *desc, void *mem,
- HMM_UNIQUE(size_aau) num_addr_align_units);
+ HMM_UNIQUE(descriptor) *desc, void *mem,
+ HMM_UNIQUE(size_aau) num_addr_align_units);
/* NOT YET IMPLEMENTED */
int HMM_UNIQUE(greedy_resize)(
- HMM_UNIQUE(descriptor) *desc, void *mem,
- HMM_UNIQUE(size_aau) needed_addr_align_units,
- HMM_UNIQUE(size_aau) coveted_addr_align_units);
+ HMM_UNIQUE(descriptor) *desc, void *mem,
+ HMM_UNIQUE(size_aau) needed_addr_align_units,
+ HMM_UNIQUE(size_aau) coveted_addr_align_units);
void HMM_UNIQUE(free)(HMM_UNIQUE(descriptor) *desc, void *mem);
HMM_UNIQUE(size_aau) HMM_UNIQUE(true_size)(void *mem);
HMM_UNIQUE(size_aau) HMM_UNIQUE(largest_available)(
- HMM_UNIQUE(descriptor) *desc);
+ HMM_UNIQUE(descriptor) *desc);
void HMM_UNIQUE(new_chunk)(
- HMM_UNIQUE(descriptor) *desc, void *start_of_chunk,
- HMM_UNIQUE(size_bau) num_block_align_units);
+ HMM_UNIQUE(descriptor) *desc, void *start_of_chunk,
+ HMM_UNIQUE(size_bau) num_block_align_units);
void HMM_UNIQUE(grow_chunk)(
- HMM_UNIQUE(descriptor) *desc, void *end_of_chunk,
- HMM_UNIQUE(size_bau) num_block_align_units);
+ HMM_UNIQUE(descriptor) *desc, void *end_of_chunk,
+ HMM_UNIQUE(size_bau) num_block_align_units);
/* NOT YET IMPLEMENTED */
void HMM_UNIQUE(shrink_chunk)(
- HMM_UNIQUE(descriptor) *desc,
- HMM_UNIQUE(size_bau) num_block_align_units);
+ HMM_UNIQUE(descriptor) *desc,
+ HMM_UNIQUE(size_bau) num_block_align_units);
#endif /* defined HMM_PROCESS */
diff --git a/libvpx/vpx_mem/memory_manager/include/hmm_cnfg.h b/libvpx/vpx_mem/memory_manager/include/hmm_cnfg.h
index 30b9f50..2c3391d 100644
--- a/libvpx/vpx_mem/memory_manager/include/hmm_cnfg.h
+++ b/libvpx/vpx_mem/memory_manager/include/hmm_cnfg.h
@@ -45,8 +45,8 @@
#define HMM_UNIQUE(BASE) hmm_ ## BASE
/* Number of bytes in an Address Alignment Unit (AAU). */
-//fwg
-//#define HMM_ADDR_ALIGN_UNIT sizeof(int)
+// fwg
+// #define HMM_ADDR_ALIGN_UNIT sizeof(int)
#define HMM_ADDR_ALIGN_UNIT 32
/* Number of AAUs in a Block Alignment Unit (BAU). */
@@ -65,7 +65,7 @@ void hmm_dflt_abort(const char *, const char *);
** statement. If you remove the definition of this macro, no self-auditing
** will be performed. */
#define HMM_AUDIT_FAIL \
- hmm_dflt_abort(__FILE__, HMM_SYM_TO_STRING(__LINE__));
+ hmm_dflt_abort(__FILE__, HMM_SYM_TO_STRING(__LINE__));
#elif HMM_CNFG_NUM == 0
@@ -90,8 +90,8 @@ extern const char *HMM_UNIQUE(fail_file);
extern unsigned HMM_UNIQUE(fail_line);
#define HMM_AUDIT_FAIL \
- { HMM_UNIQUE(fail_file) = __FILE__; HMM_UNIQUE(fail_line) = __LINE__; \
- longjmp(HMM_UNIQUE(jmp_buf), 1); }
+ { HMM_UNIQUE(fail_file) = __FILE__; HMM_UNIQUE(fail_line) = __LINE__; \
+ longjmp(HMM_UNIQUE(jmp_buf), 1); }
#elif HMM_CNFG_NUM == 1
diff --git a/libvpx/vpx_mem/memory_manager/include/hmm_intrnl.h b/libvpx/vpx_mem/memory_manager/include/hmm_intrnl.h
index 5d62abc..27cefe4 100644
--- a/libvpx/vpx_mem/memory_manager/include/hmm_intrnl.h
+++ b/libvpx/vpx_mem/memory_manager/include/hmm_intrnl.h
@@ -26,34 +26,32 @@
/* Mask of high bit of variable of size_bau type. */
#define HIGH_BIT_BAU_SIZE \
- ((U(size_bau)) ~ (((U(size_bau)) ~ (U(size_bau)) 0) >> 1))
+ ((U(size_bau)) ~ (((U(size_bau)) ~ (U(size_bau)) 0) >> 1))
/* Add a given number of AAUs to pointer. */
#define AAUS_FORWARD(PTR, AAU_OFFSET) \
- (((char *) (PTR)) + ((AAU_OFFSET) * ((U(size_aau)) HMM_ADDR_ALIGN_UNIT)))
+ (((char *) (PTR)) + ((AAU_OFFSET) * ((U(size_aau)) HMM_ADDR_ALIGN_UNIT)))
/* Subtract a given number of AAUs from pointer. */
#define AAUS_BACKWARD(PTR, AAU_OFFSET) \
- (((char *) (PTR)) - ((AAU_OFFSET) * ((U(size_aau)) HMM_ADDR_ALIGN_UNIT)))
+ (((char *) (PTR)) - ((AAU_OFFSET) * ((U(size_aau)) HMM_ADDR_ALIGN_UNIT)))
/* Add a given number of BAUs to a pointer. */
#define BAUS_FORWARD(PTR, BAU_OFFSET) \
- AAUS_FORWARD((PTR), (BAU_OFFSET) * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT))
+ AAUS_FORWARD((PTR), (BAU_OFFSET) * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT))
/* Subtract a given number of BAUs to a pointer. */
#define BAUS_BACKWARD(PTR, BAU_OFFSET) \
- AAUS_BACKWARD((PTR), (BAU_OFFSET) * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT))
+ AAUS_BACKWARD((PTR), (BAU_OFFSET) * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT))
-typedef struct head_struct
-{
- /* Sizes in Block Alignment Units. */
- HMM_UNIQUE(size_bau) previous_block_size, block_size;
+typedef struct head_struct {
+ /* Sizes in Block Alignment Units. */
+ HMM_UNIQUE(size_bau) previous_block_size, block_size;
}
head_record;
-typedef struct ptr_struct
-{
- struct ptr_struct *self, *prev, *next;
+typedef struct ptr_struct {
+ struct ptr_struct *self, *prev, *next;
}
ptr_record;
@@ -71,50 +69,50 @@ ptr_record;
/* Minimum number of BAUs in a block (allowing room for the pointer record. */
#define MIN_BLOCK_BAUS \
- DIV_ROUND_UP(HEAD_AAUS + PTR_RECORD_AAUS, HMM_BLOCK_ALIGN_UNIT)
+ DIV_ROUND_UP(HEAD_AAUS + PTR_RECORD_AAUS, HMM_BLOCK_ALIGN_UNIT)
/* Return number of BAUs in block (masking off high bit containing block
** status). */
#define BLOCK_BAUS(HEAD_PTR) \
- (((head_record *) (HEAD_PTR))->block_size & ~HIGH_BIT_BAU_SIZE)
+ (((head_record *) (HEAD_PTR))->block_size & ~HIGH_BIT_BAU_SIZE)
/* Return number of BAUs in previous block (masking off high bit containing
** block status). */
#define PREV_BLOCK_BAUS(HEAD_PTR) \
- (((head_record *) (HEAD_PTR))->previous_block_size & ~HIGH_BIT_BAU_SIZE)
+ (((head_record *) (HEAD_PTR))->previous_block_size & ~HIGH_BIT_BAU_SIZE)
/* Set number of BAUs in previous block, preserving high bit containing
** block status. */
#define SET_PREV_BLOCK_BAUS(HEAD_PTR, N_BAUS) \
- { register head_record *h_ptr = (head_record *) (HEAD_PTR); \
- h_ptr->previous_block_size &= HIGH_BIT_BAU_SIZE; \
- h_ptr->previous_block_size |= (N_BAUS); }
+ { register head_record *h_ptr = (head_record *) (HEAD_PTR); \
+ h_ptr->previous_block_size &= HIGH_BIT_BAU_SIZE; \
+ h_ptr->previous_block_size |= (N_BAUS); }
/* Convert pointer to pointer record of block to pointer to block's head
** record. */
#define PTR_REC_TO_HEAD(PTR_REC_PTR) \
- ((head_record *) AAUS_BACKWARD(PTR_REC_PTR, HEAD_AAUS))
+ ((head_record *) AAUS_BACKWARD(PTR_REC_PTR, HEAD_AAUS))
/* Convert pointer to block head to pointer to block's pointer record. */
#define HEAD_TO_PTR_REC(HEAD_PTR) \
- ((ptr_record *) AAUS_FORWARD(HEAD_PTR, HEAD_AAUS))
+ ((ptr_record *) AAUS_FORWARD(HEAD_PTR, HEAD_AAUS))
/* Returns non-zero if block is allocated. */
#define IS_BLOCK_ALLOCATED(HEAD_PTR) \
- (((((head_record *) (HEAD_PTR))->block_size | \
- ((head_record *) (HEAD_PTR))->previous_block_size) & \
- HIGH_BIT_BAU_SIZE) == 0)
+ (((((head_record *) (HEAD_PTR))->block_size | \
+ ((head_record *) (HEAD_PTR))->previous_block_size) & \
+ HIGH_BIT_BAU_SIZE) == 0)
#define MARK_BLOCK_ALLOCATED(HEAD_PTR) \
- { register head_record *h_ptr = (head_record *) (HEAD_PTR); \
- h_ptr->block_size &= ~HIGH_BIT_BAU_SIZE; \
- h_ptr->previous_block_size &= ~HIGH_BIT_BAU_SIZE; }
+ { register head_record *h_ptr = (head_record *) (HEAD_PTR); \
+ h_ptr->block_size &= ~HIGH_BIT_BAU_SIZE; \
+ h_ptr->previous_block_size &= ~HIGH_BIT_BAU_SIZE; }
/* Mark a block as free when it is not the first block in a bin (and
** therefore not a node in the AVL tree). */
#define MARK_SUCCESSIVE_BLOCK_IN_FREE_BIN(HEAD_PTR) \
- { register head_record *h_ptr = (head_record *) (HEAD_PTR); \
- h_ptr->block_size |= HIGH_BIT_BAU_SIZE; }
+ { register head_record *h_ptr = (head_record *) (HEAD_PTR); \
+ h_ptr->block_size |= HIGH_BIT_BAU_SIZE; }
/* Prototypes for internal functions implemented in one file and called in
** another.
@@ -125,7 +123,7 @@ void U(into_free_collection)(U(descriptor) *desc, head_record *head_ptr);
void U(out_of_free_collection)(U(descriptor) *desc, head_record *head_ptr);
void *U(alloc_from_bin)(
- U(descriptor) *desc, ptr_record *bin_front_ptr, U(size_bau) n_baus);
+ U(descriptor) *desc, ptr_record *bin_front_ptr, U(size_bau) n_baus);
#ifdef HMM_AUDIT_FAIL
@@ -137,12 +135,12 @@ int U(audit_block_fail_dummy_return)(void);
/* Auditing a block consists of checking that the size in its head
** matches the previous block size in the head of the next block. */
#define AUDIT_BLOCK_AS_EXPR(HEAD_PTR) \
- ((BLOCK_BAUS(HEAD_PTR) == \
- PREV_BLOCK_BAUS(BAUS_FORWARD(HEAD_PTR, BLOCK_BAUS(HEAD_PTR)))) ? \
- 0 : U(audit_block_fail_dummy_return)())
+ ((BLOCK_BAUS(HEAD_PTR) == \
+ PREV_BLOCK_BAUS(BAUS_FORWARD(HEAD_PTR, BLOCK_BAUS(HEAD_PTR)))) ? \
+ 0 : U(audit_block_fail_dummy_return)())
#define AUDIT_BLOCK(HEAD_PTR) \
- { void *h_ptr = (HEAD_PTR); AUDIT_BLOCK_AS_EXPR(h_ptr); }
+ { void *h_ptr = (HEAD_PTR); AUDIT_BLOCK_AS_EXPR(h_ptr); }
#endif
diff --git a/libvpx/vpx_mem/vpx_mem.c b/libvpx/vpx_mem/vpx_mem.c
index eade432..059248b 100644
--- a/libvpx/vpx_mem/vpx_mem.c
+++ b/libvpx/vpx_mem/vpx_mem.c
@@ -51,15 +51,14 @@ static void *vpx_mm_realloc(void *memblk, size_t size);
#endif /*CONFIG_MEM_MANAGER*/
#if USE_GLOBAL_FUNCTION_POINTERS
-struct GLOBAL_FUNC_POINTERS
-{
- g_malloc_func g_malloc;
- g_calloc_func g_calloc;
- g_realloc_func g_realloc;
- g_free_func g_free;
- g_memcpy_func g_memcpy;
- g_memset_func g_memset;
- g_memmove_func g_memmove;
+struct GLOBAL_FUNC_POINTERS {
+ g_malloc_func g_malloc;
+ g_calloc_func g_calloc;
+ g_realloc_func g_realloc;
+ g_free_func g_free;
+ g_memcpy_func g_memcpy;
+ g_memset_func g_memset;
+ g_memmove_func g_memmove;
} *g_func = NULL;
# define VPX_MALLOC_L g_func->g_malloc
@@ -77,346 +76,314 @@ struct GLOBAL_FUNC_POINTERS
# define VPX_MEMMOVE_L memmove
#endif /* USE_GLOBAL_FUNCTION_POINTERS */
-unsigned int vpx_mem_get_version()
-{
- unsigned int ver = ((unsigned int)(unsigned char)VPX_MEM_VERSION_CHIEF << 24 |
- (unsigned int)(unsigned char)VPX_MEM_VERSION_MAJOR << 16 |
- (unsigned int)(unsigned char)VPX_MEM_VERSION_MINOR << 8 |
- (unsigned int)(unsigned char)VPX_MEM_VERSION_PATCH);
- return ver;
+unsigned int vpx_mem_get_version() {
+ unsigned int ver = ((unsigned int)(unsigned char)VPX_MEM_VERSION_CHIEF << 24 |
+ (unsigned int)(unsigned char)VPX_MEM_VERSION_MAJOR << 16 |
+ (unsigned int)(unsigned char)VPX_MEM_VERSION_MINOR << 8 |
+ (unsigned int)(unsigned char)VPX_MEM_VERSION_PATCH);
+ return ver;
}
-int vpx_mem_set_heap_size(size_t size)
-{
- int ret = -1;
+int vpx_mem_set_heap_size(size_t size) {
+ int ret = -1;
#if CONFIG_MEM_MANAGER
#if MM_DYNAMIC_MEMORY
- if (!g_mng_memory_allocated && size)
- {
- g_mm_memory_size = size;
- ret = 0;
- }
- else
- ret = -3;
+ if (!g_mng_memory_allocated && size) {
+ g_mm_memory_size = size;
+ ret = 0;
+ } else
+ ret = -3;
#else
- ret = -2;
+ ret = -2;
#endif
#else
- (void)size;
+ (void)size;
#endif
- return ret;
+ return ret;
}
-void *vpx_memalign(size_t align, size_t size)
-{
- void *addr,
- * x = NULL;
+void *vpx_memalign(size_t align, size_t size) {
+ void *addr,
+ * x = NULL;
#if CONFIG_MEM_MANAGER
- int number_aau;
+ int number_aau;
- if (vpx_mm_create_heap_memory() < 0)
- {
- _P(printf("[vpx][mm] ERROR vpx_memalign() Couldn't create memory for Heap.\n");)
- }
+ if (vpx_mm_create_heap_memory() < 0) {
+ _P(printf("[vpx][mm] ERROR vpx_memalign() Couldn't create memory for Heap.\n");)
+ }
- number_aau = ((size + align - 1 + ADDRESS_STORAGE_SIZE) >>
- SHIFT_HMM_ADDR_ALIGN_UNIT) + 1;
+ number_aau = ((size + align - 1 + ADDRESS_STORAGE_SIZE) >>
+ SHIFT_HMM_ADDR_ALIGN_UNIT) + 1;
- addr = hmm_alloc(&hmm_d, number_aau);
+ addr = hmm_alloc(&hmm_d, number_aau);
#else
- addr = VPX_MALLOC_L(size + align - 1 + ADDRESS_STORAGE_SIZE);
+ addr = VPX_MALLOC_L(size + align - 1 + ADDRESS_STORAGE_SIZE);
#endif /*CONFIG_MEM_MANAGER*/
- if (addr)
- {
- x = align_addr((unsigned char *)addr + ADDRESS_STORAGE_SIZE, (int)align);
- /* save the actual malloc address */
- ((size_t *)x)[-1] = (size_t)addr;
- }
+ if (addr) {
+ x = align_addr((unsigned char *)addr + ADDRESS_STORAGE_SIZE, (int)align);
+ /* save the actual malloc address */
+ ((size_t *)x)[-1] = (size_t)addr;
+ }
- return x;
+ return x;
}
-void *vpx_malloc(size_t size)
-{
- return vpx_memalign(DEFAULT_ALIGNMENT, size);
+void *vpx_malloc(size_t size) {
+ return vpx_memalign(DEFAULT_ALIGNMENT, size);
}
-void *vpx_calloc(size_t num, size_t size)
-{
- void *x;
+void *vpx_calloc(size_t num, size_t size) {
+ void *x;
- x = vpx_memalign(DEFAULT_ALIGNMENT, num * size);
+ x = vpx_memalign(DEFAULT_ALIGNMENT, num * size);
- if (x)
- VPX_MEMSET_L(x, 0, num * size);
+ if (x)
+ VPX_MEMSET_L(x, 0, num * size);
- return x;
+ return x;
}
-void *vpx_realloc(void *memblk, size_t size)
-{
- void *addr,
- * new_addr = NULL;
- int align = DEFAULT_ALIGNMENT;
-
- /*
- The realloc() function changes the size of the object pointed to by
- ptr to the size specified by size, and returns a pointer to the
- possibly moved block. The contents are unchanged up to the lesser
- of the new and old sizes. If ptr is null, realloc() behaves like
- malloc() for the specified size. If size is zero (0) and ptr is
- not a null pointer, the object pointed to is freed.
- */
- if (!memblk)
- new_addr = vpx_malloc(size);
- else if (!size)
- vpx_free(memblk);
- else
- {
- addr = (void *)(((size_t *)memblk)[-1]);
- memblk = NULL;
+void *vpx_realloc(void *memblk, size_t size) {
+ void *addr,
+ * new_addr = NULL;
+ int align = DEFAULT_ALIGNMENT;
+
+ /*
+ The realloc() function changes the size of the object pointed to by
+ ptr to the size specified by size, and returns a pointer to the
+ possibly moved block. The contents are unchanged up to the lesser
+ of the new and old sizes. If ptr is null, realloc() behaves like
+ malloc() for the specified size. If size is zero (0) and ptr is
+ not a null pointer, the object pointed to is freed.
+ */
+ if (!memblk)
+ new_addr = vpx_malloc(size);
+ else if (!size)
+ vpx_free(memblk);
+ else {
+ addr = (void *)(((size_t *)memblk)[-1]);
+ memblk = NULL;
#if CONFIG_MEM_MANAGER
- new_addr = vpx_mm_realloc(addr, size + align + ADDRESS_STORAGE_SIZE);
+ new_addr = vpx_mm_realloc(addr, size + align + ADDRESS_STORAGE_SIZE);
#else
- new_addr = VPX_REALLOC_L(addr, size + align + ADDRESS_STORAGE_SIZE);
+ new_addr = VPX_REALLOC_L(addr, size + align + ADDRESS_STORAGE_SIZE);
#endif
- if (new_addr)
- {
- addr = new_addr;
- new_addr = (void *)(((size_t)
- ((unsigned char *)new_addr + ADDRESS_STORAGE_SIZE) + (align - 1)) &
- (size_t) - align);
- /* save the actual malloc address */
- ((size_t *)new_addr)[-1] = (size_t)addr;
- }
+ if (new_addr) {
+ addr = new_addr;
+ new_addr = (void *)(((size_t)
+ ((unsigned char *)new_addr + ADDRESS_STORAGE_SIZE) + (align - 1)) &
+ (size_t) - align);
+ /* save the actual malloc address */
+ ((size_t *)new_addr)[-1] = (size_t)addr;
}
+ }
- return new_addr;
+ return new_addr;
}
-void vpx_free(void *memblk)
-{
- if (memblk)
- {
- void *addr = (void *)(((size_t *)memblk)[-1]);
+void vpx_free(void *memblk) {
+ if (memblk) {
+ void *addr = (void *)(((size_t *)memblk)[-1]);
#if CONFIG_MEM_MANAGER
- hmm_free(&hmm_d, addr);
+ hmm_free(&hmm_d, addr);
#else
- VPX_FREE_L(addr);
+ VPX_FREE_L(addr);
#endif
- }
+ }
}
#if CONFIG_MEM_TRACKER
-void *xvpx_memalign(size_t align, size_t size, char *file, int line)
-{
+void *xvpx_memalign(size_t align, size_t size, char *file, int line) {
#if TRY_BOUNDS_CHECK
- unsigned char *x_bounds;
+ unsigned char *x_bounds;
#endif
- void *x;
+ void *x;
- if (g_alloc_count == 0)
- {
+ if (g_alloc_count == 0) {
#if TRY_BOUNDS_CHECK
- int i_rv = vpx_memory_tracker_init(BOUNDS_CHECK_PAD_SIZE, BOUNDS_CHECK_VALUE);
+ int i_rv = vpx_memory_tracker_init(BOUNDS_CHECK_PAD_SIZE, BOUNDS_CHECK_VALUE);
#else
- int i_rv = vpx_memory_tracker_init(0, 0);
+ int i_rv = vpx_memory_tracker_init(0, 0);
#endif
- if (i_rv < 0)
- {
- _P(printf("ERROR xvpx_malloc MEM_TRACK_USAGE error vpx_memory_tracker_init().\n");)
- }
+ if (i_rv < 0) {
+ _P(printf("ERROR xvpx_malloc MEM_TRACK_USAGE error vpx_memory_tracker_init().\n");)
}
+ }
#if TRY_BOUNDS_CHECK
- {
- int i;
- unsigned int tempme = BOUNDS_CHECK_VALUE;
-
- x_bounds = vpx_memalign(align, size + (BOUNDS_CHECK_PAD_SIZE * 2));
-
- if (x_bounds)
- {
- /*we're aligning the address twice here but to keep things
- consistent we want to have the padding come before the stored
- address so no matter what free function gets called we will
- attempt to free the correct address*/
- x_bounds = (unsigned char *)(((size_t *)x_bounds)[-1]);
- x = align_addr(x_bounds + BOUNDS_CHECK_PAD_SIZE + ADDRESS_STORAGE_SIZE,
- (int)align);
- /* save the actual malloc address */
- ((size_t *)x)[-1] = (size_t)x_bounds;
-
- for (i = 0; i < BOUNDS_CHECK_PAD_SIZE; i += sizeof(unsigned int))
- {
- VPX_MEMCPY_L(x_bounds + i, &tempme, sizeof(unsigned int));
- VPX_MEMCPY_L((unsigned char *)x + size + i,
- &tempme, sizeof(unsigned int));
- }
- }
- else
- x = NULL;
- }
+ {
+ int i;
+ unsigned int tempme = BOUNDS_CHECK_VALUE;
+
+ x_bounds = vpx_memalign(align, size + (BOUNDS_CHECK_PAD_SIZE * 2));
+
+ if (x_bounds) {
+ /*we're aligning the address twice here but to keep things
+ consistent we want to have the padding come before the stored
+ address so no matter what free function gets called we will
+ attempt to free the correct address*/
+ x_bounds = (unsigned char *)(((size_t *)x_bounds)[-1]);
+ x = align_addr(x_bounds + BOUNDS_CHECK_PAD_SIZE + ADDRESS_STORAGE_SIZE,
+ (int)align);
+ /* save the actual malloc address */
+ ((size_t *)x)[-1] = (size_t)x_bounds;
+
+ for (i = 0; i < BOUNDS_CHECK_PAD_SIZE; i += sizeof(unsigned int)) {
+ VPX_MEMCPY_L(x_bounds + i, &tempme, sizeof(unsigned int));
+ VPX_MEMCPY_L((unsigned char *)x + size + i,
+ &tempme, sizeof(unsigned int));
+ }
+ } else
+ x = NULL;
+ }
#else
- x = vpx_memalign(align, size);
+ x = vpx_memalign(align, size);
#endif /*TRY_BOUNDS_CHECK*/
- g_alloc_count++;
+ g_alloc_count++;
- vpx_memory_tracker_add((size_t)x, (unsigned int)size, file, line, 1);
+ vpx_memory_tracker_add((size_t)x, (unsigned int)size, file, line, 1);
- return x;
+ return x;
}
-void *xvpx_malloc(size_t size, char *file, int line)
-{
- return xvpx_memalign(DEFAULT_ALIGNMENT, size, file, line);
+void *xvpx_malloc(size_t size, char *file, int line) {
+ return xvpx_memalign(DEFAULT_ALIGNMENT, size, file, line);
}
-void *xvpx_calloc(size_t num, size_t size, char *file, int line)
-{
- void *x = xvpx_memalign(DEFAULT_ALIGNMENT, num * size, file, line);
+void *xvpx_calloc(size_t num, size_t size, char *file, int line) {
+ void *x = xvpx_memalign(DEFAULT_ALIGNMENT, num * size, file, line);
- if (x)
- VPX_MEMSET_L(x, 0, num * size);
+ if (x)
+ VPX_MEMSET_L(x, 0, num * size);
- return x;
+ return x;
}
-void *xvpx_realloc(void *memblk, size_t size, char *file, int line)
-{
- struct mem_block *p = NULL;
- int orig_size = 0,
- orig_line = 0;
- char *orig_file = NULL;
+void *xvpx_realloc(void *memblk, size_t size, char *file, int line) {
+ struct mem_block *p = NULL;
+ int orig_size = 0,
+ orig_line = 0;
+ char *orig_file = NULL;
#if TRY_BOUNDS_CHECK
- unsigned char *x_bounds = memblk ?
- (unsigned char *)(((size_t *)memblk)[-1]) :
- NULL;
+ unsigned char *x_bounds = memblk ?
+ (unsigned char *)(((size_t *)memblk)[-1]) :
+ NULL;
#endif
- void *x;
+ void *x;
- if (g_alloc_count == 0)
- {
+ if (g_alloc_count == 0) {
#if TRY_BOUNDS_CHECK
- if (!vpx_memory_tracker_init(BOUNDS_CHECK_PAD_SIZE, BOUNDS_CHECK_VALUE))
+ if (!vpx_memory_tracker_init(BOUNDS_CHECK_PAD_SIZE, BOUNDS_CHECK_VALUE))
#else
- if (!vpx_memory_tracker_init(0, 0))
+ if (!vpx_memory_tracker_init(0, 0))
#endif
- {
- _P(printf("ERROR xvpx_malloc MEM_TRACK_USAGE error vpx_memory_tracker_init().\n");)
- }
- }
-
- if ((p = vpx_memory_tracker_find((size_t)memblk)))
{
- orig_size = p->size;
- orig_file = p->file;
- orig_line = p->line;
+ _P(printf("ERROR xvpx_malloc MEM_TRACK_USAGE error vpx_memory_tracker_init().\n");)
}
+ }
+
+ if ((p = vpx_memory_tracker_find((size_t)memblk))) {
+ orig_size = p->size;
+ orig_file = p->file;
+ orig_line = p->line;
+ }
#if TRY_BOUNDS_CHECK_ON_FREE
- vpx_memory_tracker_check_integrity(file, line);
+ vpx_memory_tracker_check_integrity(file, line);
#endif
- /* have to do this regardless of success, because
- * the memory that does get realloc'd may change
- * the bounds values of this block
- */
- vpx_memory_tracker_remove((size_t)memblk);
+ /* have to do this regardless of success, because
+ * the memory that does get realloc'd may change
+ * the bounds values of this block
+ */
+ vpx_memory_tracker_remove((size_t)memblk);
#if TRY_BOUNDS_CHECK
- {
- int i;
- unsigned int tempme = BOUNDS_CHECK_VALUE;
-
- x_bounds = vpx_realloc(memblk, size + (BOUNDS_CHECK_PAD_SIZE * 2));
-
- if (x_bounds)
- {
- x_bounds = (unsigned char *)(((size_t *)x_bounds)[-1]);
- x = align_addr(x_bounds + BOUNDS_CHECK_PAD_SIZE + ADDRESS_STORAGE_SIZE,
- (int)DEFAULT_ALIGNMENT);
- /* save the actual malloc address */
- ((size_t *)x)[-1] = (size_t)x_bounds;
-
- for (i = 0; i < BOUNDS_CHECK_PAD_SIZE; i += sizeof(unsigned int))
- {
- VPX_MEMCPY_L(x_bounds + i, &tempme, sizeof(unsigned int));
- VPX_MEMCPY_L((unsigned char *)x + size + i,
- &tempme, sizeof(unsigned int));
- }
- }
- else
- x = NULL;
- }
+ {
+ int i;
+ unsigned int tempme = BOUNDS_CHECK_VALUE;
+
+ x_bounds = vpx_realloc(memblk, size + (BOUNDS_CHECK_PAD_SIZE * 2));
+
+ if (x_bounds) {
+ x_bounds = (unsigned char *)(((size_t *)x_bounds)[-1]);
+ x = align_addr(x_bounds + BOUNDS_CHECK_PAD_SIZE + ADDRESS_STORAGE_SIZE,
+ (int)DEFAULT_ALIGNMENT);
+ /* save the actual malloc address */
+ ((size_t *)x)[-1] = (size_t)x_bounds;
+
+ for (i = 0; i < BOUNDS_CHECK_PAD_SIZE; i += sizeof(unsigned int)) {
+ VPX_MEMCPY_L(x_bounds + i, &tempme, sizeof(unsigned int));
+ VPX_MEMCPY_L((unsigned char *)x + size + i,
+ &tempme, sizeof(unsigned int));
+ }
+ } else
+ x = NULL;
+ }
#else
- x = vpx_realloc(memblk, size);
+ x = vpx_realloc(memblk, size);
#endif /*TRY_BOUNDS_CHECK*/
- if (!memblk) ++g_alloc_count;
+ if (!memblk) ++g_alloc_count;
- if (x)
- vpx_memory_tracker_add((size_t)x, (unsigned int)size, file, line, 1);
- else
- vpx_memory_tracker_add((size_t)memblk, orig_size, orig_file, orig_line, 1);
+ if (x)
+ vpx_memory_tracker_add((size_t)x, (unsigned int)size, file, line, 1);
+ else
+ vpx_memory_tracker_add((size_t)memblk, orig_size, orig_file, orig_line, 1);
- return x;
+ return x;
}
-void xvpx_free(void *p_address, char *file, int line)
-{
+void xvpx_free(void *p_address, char *file, int line) {
#if TRY_BOUNDS_CHECK
- unsigned char *p_bounds_address = (unsigned char *)p_address;
- /*p_bounds_address -= BOUNDS_CHECK_PAD_SIZE;*/
+ unsigned char *p_bounds_address = (unsigned char *)p_address;
+ /*p_bounds_address -= BOUNDS_CHECK_PAD_SIZE;*/
#endif
#if !TRY_BOUNDS_CHECK_ON_FREE
- (void)file;
- (void)line;
+ (void)file;
+ (void)line;
#endif
- if (p_address)
- {
+ if (p_address) {
#if TRY_BOUNDS_CHECK_ON_FREE
- vpx_memory_tracker_check_integrity(file, line);
+ vpx_memory_tracker_check_integrity(file, line);
#endif
- /* if the addr isn't found in the list, assume it was allocated via
- * vpx_ calls not xvpx_, therefore it does not contain any padding
- */
- if (vpx_memory_tracker_remove((size_t)p_address) == -2)
- {
- p_bounds_address = p_address;
- _P(fprintf(stderr, "[vpx_mem][xvpx_free] addr: %p not found in"
- " list; freed from file:%s"
- " line:%d\n", p_address, file, line));
- }
- else
- --g_alloc_count;
+ /* if the addr isn't found in the list, assume it was allocated via
+ * vpx_ calls not xvpx_, therefore it does not contain any padding
+ */
+ if (vpx_memory_tracker_remove((size_t)p_address) == -2) {
+ p_bounds_address = p_address;
+ _P(fprintf(stderr, "[vpx_mem][xvpx_free] addr: %p not found in"
+ " list; freed from file:%s"
+ " line:%d\n", p_address, file, line));
+ } else
+ --g_alloc_count;
#if TRY_BOUNDS_CHECK
- vpx_free(p_bounds_address);
+ vpx_free(p_bounds_address);
#else
- vpx_free(p_address);
+ vpx_free(p_address);
#endif
- if (!g_alloc_count)
- vpx_memory_tracker_destroy();
- }
+ if (!g_alloc_count)
+ vpx_memory_tracker_destroy();
+ }
}
#endif /*CONFIG_MEM_TRACKER*/
@@ -426,297 +393,265 @@ void xvpx_free(void *p_address, char *file, int line)
#include <task_lib.h> /*for task_delay()*/
/* This function is only used to get a stack trace of the player
object so we can se where we are having a problem. */
-static int get_my_tt(int task)
-{
- tt(task);
+static int get_my_tt(int task) {
+ tt(task);
- return 0;
+ return 0;
}
-static void vx_sleep(int msec)
-{
- int ticks_to_sleep = 0;
+static void vx_sleep(int msec) {
+ int ticks_to_sleep = 0;
- if (msec)
- {
- int msec_per_tick = 1000 / sys_clk_rate_get();
+ if (msec) {
+ int msec_per_tick = 1000 / sys_clk_rate_get();
- if (msec < msec_per_tick)
- ticks_to_sleep++;
- else
- ticks_to_sleep = msec / msec_per_tick;
- }
+ if (msec < msec_per_tick)
+ ticks_to_sleep++;
+ else
+ ticks_to_sleep = msec / msec_per_tick;
+ }
- task_delay(ticks_to_sleep);
+ task_delay(ticks_to_sleep);
}
#endif
#endif
-void *vpx_memcpy(void *dest, const void *source, size_t length)
-{
+void *vpx_memcpy(void *dest, const void *source, size_t length) {
#if CONFIG_MEM_CHECKS
- if (((int)dest < 0x4000) || ((int)source < 0x4000))
- {
- _P(printf("WARNING: vpx_memcpy dest:0x%x source:0x%x len:%d\n", (int)dest, (int)source, length);)
+ if (((int)dest < 0x4000) || ((int)source < 0x4000)) {
+ _P(printf("WARNING: vpx_memcpy dest:0x%x source:0x%x len:%d\n", (int)dest, (int)source, length);)
#if defined(VXWORKS)
- sp(get_my_tt, task_id_self(), 0, 0, 0, 0, 0, 0, 0, 0);
+ sp(get_my_tt, task_id_self(), 0, 0, 0, 0, 0, 0, 0, 0);
- vx_sleep(10000);
+ vx_sleep(10000);
#endif
- }
+ }
#endif
- return VPX_MEMCPY_L(dest, source, length);
+ return VPX_MEMCPY_L(dest, source, length);
}
-void *vpx_memset(void *dest, int val, size_t length)
-{
+void *vpx_memset(void *dest, int val, size_t length) {
#if CONFIG_MEM_CHECKS
- if ((int)dest < 0x4000)
- {
- _P(printf("WARNING: vpx_memset dest:0x%x val:%d len:%d\n", (int)dest, val, length);)
+ if ((int)dest < 0x4000) {
+ _P(printf("WARNING: vpx_memset dest:0x%x val:%d len:%d\n", (int)dest, val, length);)
#if defined(VXWORKS)
- sp(get_my_tt, task_id_self(), 0, 0, 0, 0, 0, 0, 0, 0);
+ sp(get_my_tt, task_id_self(), 0, 0, 0, 0, 0, 0, 0, 0);
- vx_sleep(10000);
+ vx_sleep(10000);
#endif
- }
+ }
#endif
- return VPX_MEMSET_L(dest, val, length);
+ return VPX_MEMSET_L(dest, val, length);
}
-void *vpx_memmove(void *dest, const void *src, size_t count)
-{
+void *vpx_memmove(void *dest, const void *src, size_t count) {
#if CONFIG_MEM_CHECKS
- if (((int)dest < 0x4000) || ((int)src < 0x4000))
- {
- _P(printf("WARNING: vpx_memmove dest:0x%x src:0x%x count:%d\n", (int)dest, (int)src, count);)
+ if (((int)dest < 0x4000) || ((int)src < 0x4000)) {
+ _P(printf("WARNING: vpx_memmove dest:0x%x src:0x%x count:%d\n", (int)dest, (int)src, count);)
#if defined(VXWORKS)
- sp(get_my_tt, task_id_self(), 0, 0, 0, 0, 0, 0, 0, 0);
+ sp(get_my_tt, task_id_self(), 0, 0, 0, 0, 0, 0, 0, 0);
- vx_sleep(10000);
+ vx_sleep(10000);
#endif
- }
+ }
#endif
- return VPX_MEMMOVE_L(dest, src, count);
+ return VPX_MEMMOVE_L(dest, src, count);
}
#if CONFIG_MEM_MANAGER
-static int vpx_mm_create_heap_memory()
-{
- int i_rv = 0;
+static int vpx_mm_create_heap_memory() {
+ int i_rv = 0;
- if (!g_mng_memory_allocated)
- {
+ if (!g_mng_memory_allocated) {
#if MM_DYNAMIC_MEMORY
- g_p_mng_memory_raw =
- (unsigned char *)malloc(g_mm_memory_size + HMM_ADDR_ALIGN_UNIT);
-
- if (g_p_mng_memory_raw)
- {
- g_p_mng_memory = (unsigned char *)((((unsigned int)g_p_mng_memory_raw) +
- HMM_ADDR_ALIGN_UNIT - 1) &
- -(int)HMM_ADDR_ALIGN_UNIT);
-
- _P(printf("[vpx][mm] total memory size:%d g_p_mng_memory_raw:0x%x g_p_mng_memory:0x%x\n"
- , g_mm_memory_size + HMM_ADDR_ALIGN_UNIT
- , (unsigned int)g_p_mng_memory_raw
- , (unsigned int)g_p_mng_memory);)
- }
- else
- {
- _P(printf("[vpx][mm] Couldn't allocate memory:%d for vpx memory manager.\n"
- , g_mm_memory_size);)
-
- i_rv = -1;
- }
+ g_p_mng_memory_raw =
+ (unsigned char *)malloc(g_mm_memory_size + HMM_ADDR_ALIGN_UNIT);
+
+ if (g_p_mng_memory_raw) {
+ g_p_mng_memory = (unsigned char *)((((unsigned int)g_p_mng_memory_raw) +
+ HMM_ADDR_ALIGN_UNIT - 1) &
+ -(int)HMM_ADDR_ALIGN_UNIT);
+
+ _P(printf("[vpx][mm] total memory size:%d g_p_mng_memory_raw:0x%x g_p_mng_memory:0x%x\n"
+, g_mm_memory_size + HMM_ADDR_ALIGN_UNIT
+, (unsigned int)g_p_mng_memory_raw
+, (unsigned int)g_p_mng_memory);)
+ } else {
+ _P(printf("[vpx][mm] Couldn't allocate memory:%d for vpx memory manager.\n"
+, g_mm_memory_size);)
+
+ i_rv = -1;
+ }
- if (g_p_mng_memory)
+ if (g_p_mng_memory)
#endif
- {
- int chunk_size = 0;
+ {
+ int chunk_size = 0;
- g_mng_memory_allocated = 1;
+ g_mng_memory_allocated = 1;
- hmm_init(&hmm_d);
+ hmm_init(&hmm_d);
- chunk_size = g_mm_memory_size >> SHIFT_HMM_ADDR_ALIGN_UNIT;
+ chunk_size = g_mm_memory_size >> SHIFT_HMM_ADDR_ALIGN_UNIT;
- chunk_size -= DUMMY_END_BLOCK_BAUS;
+ chunk_size -= DUMMY_END_BLOCK_BAUS;
- _P(printf("[vpx][mm] memory size:%d for vpx memory manager. g_p_mng_memory:0x%x chunk_size:%d\n"
- , g_mm_memory_size
- , (unsigned int)g_p_mng_memory
- , chunk_size);)
+ _P(printf("[vpx][mm] memory size:%d for vpx memory manager. g_p_mng_memory:0x%x chunk_size:%d\n"
+, g_mm_memory_size
+, (unsigned int)g_p_mng_memory
+, chunk_size);)
- hmm_new_chunk(&hmm_d, (void *)g_p_mng_memory, chunk_size);
- }
+ hmm_new_chunk(&hmm_d, (void *)g_p_mng_memory, chunk_size);
+ }
#if MM_DYNAMIC_MEMORY
- else
- {
- _P(printf("[vpx][mm] Couldn't allocate memory:%d for vpx memory manager.\n"
- , g_mm_memory_size);)
+ else {
+ _P(printf("[vpx][mm] Couldn't allocate memory:%d for vpx memory manager.\n"
+, g_mm_memory_size);)
- i_rv = -1;
- }
+ i_rv = -1;
+ }
#endif
- }
+ }
- return i_rv;
+ return i_rv;
}
-static void *vpx_mm_realloc(void *memblk, size_t size)
-{
- void *p_ret = NULL;
+static void *vpx_mm_realloc(void *memblk, size_t size) {
+ void *p_ret = NULL;
- if (vpx_mm_create_heap_memory() < 0)
- {
- _P(printf("[vpx][mm] ERROR vpx_mm_realloc() Couldn't create memory for Heap.\n");)
- }
- else
- {
- int i_rv = 0;
- int old_num_aaus;
- int new_num_aaus;
+ if (vpx_mm_create_heap_memory() < 0) {
+ _P(printf("[vpx][mm] ERROR vpx_mm_realloc() Couldn't create memory for Heap.\n");)
+ } else {
+ int i_rv = 0;
+ int old_num_aaus;
+ int new_num_aaus;
+
+ old_num_aaus = hmm_true_size(memblk);
+ new_num_aaus = (size >> SHIFT_HMM_ADDR_ALIGN_UNIT) + 1;
+
+ if (old_num_aaus == new_num_aaus) {
+ p_ret = memblk;
+ } else {
+ i_rv = hmm_resize(&hmm_d, memblk, new_num_aaus);
+
+ if (i_rv == 0) {
+ p_ret = memblk;
+ } else {
+ /* Error. Try to malloc and then copy data. */
+ void *p_from_malloc;
- old_num_aaus = hmm_true_size(memblk);
new_num_aaus = (size >> SHIFT_HMM_ADDR_ALIGN_UNIT) + 1;
+ p_from_malloc = hmm_alloc(&hmm_d, new_num_aaus);
- if (old_num_aaus == new_num_aaus)
- {
- p_ret = memblk;
- }
- else
- {
- i_rv = hmm_resize(&hmm_d, memblk, new_num_aaus);
-
- if (i_rv == 0)
- {
- p_ret = memblk;
- }
- else
- {
- /* Error. Try to malloc and then copy data. */
- void *p_from_malloc;
-
- new_num_aaus = (size >> SHIFT_HMM_ADDR_ALIGN_UNIT) + 1;
- p_from_malloc = hmm_alloc(&hmm_d, new_num_aaus);
-
- if (p_from_malloc)
- {
- vpx_memcpy(p_from_malloc, memblk, size);
- hmm_free(&hmm_d, memblk);
-
- p_ret = p_from_malloc;
- }
- }
+ if (p_from_malloc) {
+ vpx_memcpy(p_from_malloc, memblk, size);
+ hmm_free(&hmm_d, memblk);
+
+ p_ret = p_from_malloc;
}
+ }
}
+ }
- return p_ret;
+ return p_ret;
}
#endif /*CONFIG_MEM_MANAGER*/
#if USE_GLOBAL_FUNCTION_POINTERS
# if CONFIG_MEM_TRACKER
extern int vpx_memory_tracker_set_functions(g_malloc_func g_malloc_l
- , g_calloc_func g_calloc_l
- , g_realloc_func g_realloc_l
- , g_free_func g_free_l
- , g_memcpy_func g_memcpy_l
- , g_memset_func g_memset_l
- , g_memmove_func g_memmove_l);
+, g_calloc_func g_calloc_l
+, g_realloc_func g_realloc_l
+, g_free_func g_free_l
+, g_memcpy_func g_memcpy_l
+, g_memset_func g_memset_l
+, g_memmove_func g_memmove_l);
# endif
#endif /*USE_GLOBAL_FUNCTION_POINTERS*/
int vpx_mem_set_functions(g_malloc_func g_malloc_l
- , g_calloc_func g_calloc_l
- , g_realloc_func g_realloc_l
- , g_free_func g_free_l
- , g_memcpy_func g_memcpy_l
- , g_memset_func g_memset_l
- , g_memmove_func g_memmove_l)
-{
+, g_calloc_func g_calloc_l
+, g_realloc_func g_realloc_l
+, g_free_func g_free_l
+, g_memcpy_func g_memcpy_l
+, g_memset_func g_memset_l
+, g_memmove_func g_memmove_l) {
#if USE_GLOBAL_FUNCTION_POINTERS
- /* If use global functions is turned on then the
- application must set the global functions before
- it does anything else or vpx_mem will have
- unpredictable results. */
- if (!g_func)
- {
- g_func = (struct GLOBAL_FUNC_POINTERS *)
- g_malloc_l(sizeof(struct GLOBAL_FUNC_POINTERS));
+ /* If use global functions is turned on then the
+ application must set the global functions before
+ it does anything else or vpx_mem will have
+ unpredictable results. */
+ if (!g_func) {
+ g_func = (struct GLOBAL_FUNC_POINTERS *)
+ g_malloc_l(sizeof(struct GLOBAL_FUNC_POINTERS));
- if (!g_func)
- {
- return -1;
- }
+ if (!g_func) {
+ return -1;
}
+ }
#if CONFIG_MEM_TRACKER
- {
- int rv = 0;
- rv = vpx_memory_tracker_set_functions(g_malloc_l
- , g_calloc_l
- , g_realloc_l
- , g_free_l
- , g_memcpy_l
- , g_memset_l
- , g_memmove_l);
-
- if (rv < 0)
- {
- return rv;
- }
+ {
+ int rv = 0;
+ rv = vpx_memory_tracker_set_functions(g_malloc_l
+, g_calloc_l
+, g_realloc_l
+, g_free_l
+, g_memcpy_l
+, g_memset_l
+, g_memmove_l);
+
+ if (rv < 0) {
+ return rv;
}
+ }
#endif
- g_func->g_malloc = g_malloc_l;
- g_func->g_calloc = g_calloc_l;
- g_func->g_realloc = g_realloc_l;
- g_func->g_free = g_free_l;
- g_func->g_memcpy = g_memcpy_l;
- g_func->g_memset = g_memset_l;
- g_func->g_memmove = g_memmove_l;
+ g_func->g_malloc = g_malloc_l;
+ g_func->g_calloc = g_calloc_l;
+ g_func->g_realloc = g_realloc_l;
+ g_func->g_free = g_free_l;
+ g_func->g_memcpy = g_memcpy_l;
+ g_func->g_memset = g_memset_l;
+ g_func->g_memmove = g_memmove_l;
- return 0;
+ return 0;
#else
- (void)g_malloc_l;
- (void)g_calloc_l;
- (void)g_realloc_l;
- (void)g_free_l;
- (void)g_memcpy_l;
- (void)g_memset_l;
- (void)g_memmove_l;
- return -1;
+ (void)g_malloc_l;
+ (void)g_calloc_l;
+ (void)g_realloc_l;
+ (void)g_free_l;
+ (void)g_memcpy_l;
+ (void)g_memset_l;
+ (void)g_memmove_l;
+ return -1;
#endif
}
-int vpx_mem_unset_functions()
-{
+int vpx_mem_unset_functions() {
#if USE_GLOBAL_FUNCTION_POINTERS
- if (g_func)
- {
- g_free_func temp_free = g_func->g_free;
- temp_free(g_func);
- g_func = NULL;
- }
+ if (g_func) {
+ g_free_func temp_free = g_func->g_free;
+ temp_free(g_func);
+ g_func = NULL;
+ }
#endif
- return 0;
+ return 0;
}
diff --git a/libvpx/vpx_mem/vpx_mem.h b/libvpx/vpx_mem/vpx_mem.h
index 749eaa4..c7321a9 100644
--- a/libvpx/vpx_mem/vpx_mem.h
+++ b/libvpx/vpx_mem/vpx_mem.h
@@ -12,6 +12,7 @@
#ifndef __VPX_MEM_H__
#define __VPX_MEM_H__
+#include "vpx_config.h"
#if defined(__uClinux__)
# include <lddk.h>
#endif
@@ -30,11 +31,11 @@
#endif
#ifndef VPX_CHECK_MEM_FUNCTIONS
# define VPX_CHECK_MEM_FUNCTIONS 0 /* enable basic safety checks in _memcpy,
- _memset, and _memmove */
+_memset, and _memmove */
#endif
#ifndef REPLACE_BUILTIN_FUNCTIONS
# define REPLACE_BUILTIN_FUNCTIONS 0 /* replace builtin functions with their
- vpx_ equivalents */
+vpx_ equivalents */
#endif
#include <stdlib.h>
@@ -44,70 +45,63 @@
extern "C" {
#endif
- /*
- vpx_mem_get_version()
- provided for runtime version checking. Returns an unsigned int of the form
- CHIEF | MAJOR | MINOR | PATCH, where the chief version number is the high
- order byte.
- */
- unsigned int vpx_mem_get_version(void);
-
- /*
- vpx_mem_set_heap_size(size_t size)
- size - size in bytes for the memory manager to allocate for its heap
- Sets the memory manager's initial heap size
- Return:
- 0: on success
- -1: if memory manager calls have not been included in the vpx_mem lib
- -2: if the memory manager has been compiled to use static memory
- -3: if the memory manager has already allocated its heap
- */
- int vpx_mem_set_heap_size(size_t size);
-
- void *vpx_memalign(size_t align, size_t size);
- void *vpx_malloc(size_t size);
- void *vpx_calloc(size_t num, size_t size);
- void *vpx_realloc(void *memblk, size_t size);
- void vpx_free(void *memblk);
-
- void *vpx_memcpy(void *dest, const void *src, size_t length);
- void *vpx_memset(void *dest, int val, size_t length);
- void *vpx_memmove(void *dest, const void *src, size_t count);
-
- /* special memory functions */
- void *vpx_mem_alloc(int id, size_t size, size_t align);
- void vpx_mem_free(int id, void *mem, size_t size);
-
- /* Wrappers to standard library functions. */
- typedef void*(* g_malloc_func)(size_t);
- typedef void*(* g_calloc_func)(size_t, size_t);
- typedef void*(* g_realloc_func)(void *, size_t);
- typedef void (* g_free_func)(void *);
- typedef void*(* g_memcpy_func)(void *, const void *, size_t);
- typedef void*(* g_memset_func)(void *, int, size_t);
- typedef void*(* g_memmove_func)(void *, const void *, size_t);
-
- int vpx_mem_set_functions(g_malloc_func g_malloc_l
- , g_calloc_func g_calloc_l
- , g_realloc_func g_realloc_l
- , g_free_func g_free_l
- , g_memcpy_func g_memcpy_l
- , g_memset_func g_memset_l
- , g_memmove_func g_memmove_l);
- int vpx_mem_unset_functions(void);
-
-
- /* some defines for backward compatibility */
+ /*
+ vpx_mem_get_version()
+ provided for runtime version checking. Returns an unsigned int of the form
+ CHIEF | MAJOR | MINOR | PATCH, where the chief version number is the high
+ order byte.
+ */
+ unsigned int vpx_mem_get_version(void);
+
+ /*
+ vpx_mem_set_heap_size(size_t size)
+ size - size in bytes for the memory manager to allocate for its heap
+ Sets the memory manager's initial heap size
+ Return:
+ 0: on success
+ -1: if memory manager calls have not been included in the vpx_mem lib
+ -2: if the memory manager has been compiled to use static memory
+ -3: if the memory manager has already allocated its heap
+ */
+ int vpx_mem_set_heap_size(size_t size);
+
+ void *vpx_memalign(size_t align, size_t size);
+ void *vpx_malloc(size_t size);
+ void *vpx_calloc(size_t num, size_t size);
+ void *vpx_realloc(void *memblk, size_t size);
+ void vpx_free(void *memblk);
+
+ void *vpx_memcpy(void *dest, const void *src, size_t length);
+ void *vpx_memset(void *dest, int val, size_t length);
+ void *vpx_memmove(void *dest, const void *src, size_t count);
+
+ /* special memory functions */
+ void *vpx_mem_alloc(int id, size_t size, size_t align);
+ void vpx_mem_free(int id, void *mem, size_t size);
+
+ /* Wrappers to standard library functions. */
+ typedef void *(* g_malloc_func)(size_t);
+ typedef void *(* g_calloc_func)(size_t, size_t);
+ typedef void *(* g_realloc_func)(void *, size_t);
+ typedef void (* g_free_func)(void *);
+ typedef void *(* g_memcpy_func)(void *, const void *, size_t);
+ typedef void *(* g_memset_func)(void *, int, size_t);
+ typedef void *(* g_memmove_func)(void *, const void *, size_t);
+
+ int vpx_mem_set_functions(g_malloc_func g_malloc_l
+, g_calloc_func g_calloc_l
+, g_realloc_func g_realloc_l
+, g_free_func g_free_l
+, g_memcpy_func g_memcpy_l
+, g_memset_func g_memset_l
+, g_memmove_func g_memmove_l);
+ int vpx_mem_unset_functions(void);
+
+
+ /* some defines for backward compatibility */
#define DMEM_GENERAL 0
-#define duck_memalign(X,Y,Z) vpx_memalign(X,Y)
-#define duck_malloc(X,Y) vpx_malloc(X)
-#define duck_calloc(X,Y,Z) vpx_calloc(X,Y)
-#define duck_realloc vpx_realloc
-#define duck_free vpx_free
-#define duck_memcpy vpx_memcpy
-#define duck_memmove vpx_memmove
-#define duck_memset vpx_memset
+// (*)<
#if REPLACE_BUILTIN_FUNCTIONS
# ifndef __VPX_MEM_C__
@@ -124,13 +118,13 @@ extern "C" {
#if CONFIG_MEM_TRACKER
#include <stdarg.h>
- /*from vpx_mem/vpx_mem_tracker.c*/
- extern void vpx_memory_tracker_dump();
- extern void vpx_memory_tracker_check_integrity(char *file, unsigned int line);
- extern int vpx_memory_tracker_set_log_type(int type, char *option);
- extern int vpx_memory_tracker_set_log_func(void *userdata,
- void(*logfunc)(void *userdata,
- const char *fmt, va_list args));
+ /*from vpx_mem/vpx_mem_tracker.c*/
+ extern void vpx_memory_tracker_dump();
+ extern void vpx_memory_tracker_check_integrity(char *file, unsigned int line);
+ extern int vpx_memory_tracker_set_log_type(int type, char *option);
+ extern int vpx_memory_tracker_set_log_func(void *userdata,
+ void(*logfunc)(void *userdata,
+ const char *fmt, va_list args));
# ifndef __VPX_MEM_C__
# define vpx_memalign(align, size) xvpx_memalign((align), (size), __FILE__, __LINE__)
# define vpx_malloc(size) xvpx_malloc((size), __FILE__, __LINE__)
@@ -142,13 +136,13 @@ extern "C" {
# define vpx_mem_free(id,mem,size) xvpx_mem_free(id, mem, size, __FILE__, __LINE__)
# endif
- void *xvpx_memalign(size_t align, size_t size, char *file, int line);
- void *xvpx_malloc(size_t size, char *file, int line);
- void *xvpx_calloc(size_t num, size_t size, char *file, int line);
- void *xvpx_realloc(void *memblk, size_t size, char *file, int line);
- void xvpx_free(void *memblk, char *file, int line);
- void *xvpx_mem_alloc(int id, size_t size, size_t align, char *file, int line);
- void xvpx_mem_free(int id, void *mem, size_t size, char *file, int line);
+ void *xvpx_memalign(size_t align, size_t size, char *file, int line);
+ void *xvpx_malloc(size_t size, char *file, int line);
+ void *xvpx_calloc(size_t num, size_t size, char *file, int line);
+ void *xvpx_realloc(void *memblk, size_t size, char *file, int line);
+ void xvpx_free(void *memblk, char *file, int line);
+ void *xvpx_mem_alloc(int id, size_t size, size_t align, char *file, int line);
+ void xvpx_mem_free(int id, void *mem, size_t size, char *file, int line);
#else
# ifndef __VPX_MEM_C__
diff --git a/libvpx/vpx_mem/vpx_mem_tracker.c b/libvpx/vpx_mem/vpx_mem_tracker.c
index b37076e..613e8a1 100644
--- a/libvpx/vpx_mem/vpx_mem_tracker.c
+++ b/libvpx/vpx_mem/vpx_mem_tracker.c
@@ -22,7 +22,7 @@
in the memory_tracker struct as well as calls to create/destroy/lock/unlock
the mutex in vpx_memory_tracker_init/Destroy and memory_tracker_lock_mutex/unlock_mutex
*/
-#include "vpx_config.h"
+#include "./vpx_config.h"
#if defined(__uClinux__)
# include <lddk.h>
@@ -40,20 +40,20 @@
#include <stdio.h>
#include <stdlib.h>
-#include <string.h> //VXWORKS doesn't have a malloc/memory.h file,
-//this should pull in malloc,free,etc.
+#include <string.h> // VXWORKS doesn't have a malloc/memory.h file,
+// this should pull in malloc,free,etc.
#include <stdarg.h>
#include "include/vpx_mem_tracker.h"
-#undef vpx_malloc //undefine any vpx_mem macros that may affect calls to
-#undef vpx_free //memory functions in this file
+#undef vpx_malloc // undefine any vpx_mem macros that may affect calls to
+#undef vpx_free // memory functions in this file
#undef vpx_memcpy
#undef vpx_memset
#ifndef USE_GLOBAL_FUNCTION_POINTERS
-# define USE_GLOBAL_FUNCTION_POINTERS 0 //use function pointers instead of compiled functions.
+# define USE_GLOBAL_FUNCTION_POINTERS 0 // use function pointers instead of compiled functions.
#endif
#if USE_GLOBAL_FUNCTION_POINTERS
@@ -94,39 +94,37 @@ static int memory_tracker_unlock_mutex();
#endif
#ifndef VPX_NO_GLOBALS
-struct memory_tracker
-{
- struct mem_block *head,
- * tail;
- int len,
- totalsize;
- unsigned int current_allocated,
- max_allocated;
+struct memory_tracker {
+ struct mem_block *head,
+ * tail;
+ int len,
+ totalsize;
+ unsigned int current_allocated,
+ max_allocated;
#if HAVE_PTHREAD_H
- pthread_mutex_t mutex;
+ pthread_mutex_t mutex;
#elif defined(WIN32) || defined(_WIN32_WCE)
- HANDLE mutex;
+ HANDLE mutex;
#elif defined(VXWORKS)
- SEM_ID mutex;
+ SEM_ID mutex;
#elif defined(NO_MUTEX)
#else
#error "No mutex type defined for this platform!"
#endif
- int padding_size,
- pad_value;
+ int padding_size,
+ pad_value;
};
-static struct memory_tracker memtrack; //our global memory allocation list
-static int g_b_mem_tracker_inited = 0; //indicates whether the global list has
-//been initialized (1:yes/0:no)
-static struct
-{
- FILE *file;
- int type;
- void (*func)(void *userdata, const char *fmt, va_list args);
- void *userdata;
+static struct memory_tracker memtrack; // our global memory allocation list
+static int g_b_mem_tracker_inited = 0; // indicates whether the global list has
+// been initialized (1:yes/0:no)
+static struct {
+ FILE *file;
+ int type;
+ void (*func)(void *userdata, const char *fmt, va_list args);
+ void *userdata;
} g_logging = {NULL, 0, NULL, NULL};
#else
# include "vpx_global_handling.h"
@@ -157,60 +155,54 @@ extern void *vpx_memset(void *dest, int val, size_t length);
Initializes global memory tracker structure
Allocates the head of the list
*/
-int vpx_memory_tracker_init(int padding_size, int pad_value)
-{
- if (!g_b_mem_tracker_inited)
- {
- if ((memtrack.head = (struct mem_block *)
- MEM_TRACK_MALLOC(sizeof(struct mem_block))))
- {
- int ret;
+int vpx_memory_tracker_init(int padding_size, int pad_value) {
+ if (!g_b_mem_tracker_inited) {
+ if ((memtrack.head = (struct mem_block *)
+ MEM_TRACK_MALLOC(sizeof(struct mem_block)))) {
+ int ret;
- MEM_TRACK_MEMSET(memtrack.head, 0, sizeof(struct mem_block));
+ MEM_TRACK_MEMSET(memtrack.head, 0, sizeof(struct mem_block));
- memtrack.tail = memtrack.head;
+ memtrack.tail = memtrack.head;
- memtrack.current_allocated = 0;
- memtrack.max_allocated = 0;
+ memtrack.current_allocated = 0;
+ memtrack.max_allocated = 0;
- memtrack.padding_size = padding_size;
- memtrack.pad_value = pad_value;
+ memtrack.padding_size = padding_size;
+ memtrack.pad_value = pad_value;
#if HAVE_PTHREAD_H
- ret = pthread_mutex_init(&memtrack.mutex,
- NULL); /*mutex attributes (NULL=default)*/
+ ret = pthread_mutex_init(&memtrack.mutex,
+ NULL); /*mutex attributes (NULL=default)*/
#elif defined(WIN32) || defined(_WIN32_WCE)
- memtrack.mutex = CreateMutex(NULL, /*security attributes*/
- FALSE, /*we don't want initial ownership*/
- NULL); /*mutex name*/
- ret = !memtrack.mutex;
+ memtrack.mutex = CreateMutex(NULL, /*security attributes*/
+ FALSE, /*we don't want initial ownership*/
+ NULL); /*mutex name*/
+ ret = !memtrack.mutex;
#elif defined(VXWORKS)
- memtrack.mutex = sem_bcreate(SEM_Q_FIFO, /*SEM_Q_FIFO non-priority based mutex*/
- SEM_FULL); /*SEM_FULL initial state is unlocked*/
- ret = !memtrack.mutex;
+ memtrack.mutex = sem_bcreate(SEM_Q_FIFO, /*SEM_Q_FIFO non-priority based mutex*/
+ SEM_FULL); /*SEM_FULL initial state is unlocked*/
+ ret = !memtrack.mutex;
#elif defined(NO_MUTEX)
- ret = 0;
+ ret = 0;
#endif
- if (ret)
- {
- memtrack_log("vpx_memory_tracker_init: Error creating mutex!\n");
-
- MEM_TRACK_FREE(memtrack.head);
- memtrack.head = NULL;
- }
- else
- {
- memtrack_log("Memory Tracker init'd, v."vpx_mem_tracker_version" pad_size:%d pad_val:0x%x %d\n"
- , padding_size
- , pad_value
- , pad_value);
- g_b_mem_tracker_inited = 1;
- }
- }
+ if (ret) {
+ memtrack_log("vpx_memory_tracker_init: Error creating mutex!\n");
+
+ MEM_TRACK_FREE(memtrack.head);
+ memtrack.head = NULL;
+ } else {
+ memtrack_log("Memory Tracker init'd, v."vpx_mem_tracker_version" pad_size:%d pad_val:0x%x %d\n"
+, padding_size
+, pad_value
+, pad_value);
+ g_b_mem_tracker_inited = 1;
+ }
}
+ }
- return g_b_mem_tracker_inited;
+ return g_b_mem_tracker_inited;
}
/*
@@ -218,39 +210,35 @@ int vpx_memory_tracker_init(int padding_size, int pad_value)
If our global struct was initialized zeros out all its members,
frees memory and destroys it's mutex
*/
-void vpx_memory_tracker_destroy()
-{
- if (!memory_tracker_lock_mutex())
- {
- struct mem_block *p = memtrack.head,
- * p2 = memtrack.head;
+void vpx_memory_tracker_destroy() {
+ if (!memory_tracker_lock_mutex()) {
+ struct mem_block *p = memtrack.head,
+ * p2 = memtrack.head;
- memory_tracker_dump();
+ memory_tracker_dump();
- while (p)
- {
- p2 = p;
- p = p->next;
+ while (p) {
+ p2 = p;
+ p = p->next;
- MEM_TRACK_FREE(p2);
- }
+ MEM_TRACK_FREE(p2);
+ }
- memtrack.head = NULL;
- memtrack.tail = NULL;
- memtrack.len = 0;
- memtrack.current_allocated = 0;
- memtrack.max_allocated = 0;
+ memtrack.head = NULL;
+ memtrack.tail = NULL;
+ memtrack.len = 0;
+ memtrack.current_allocated = 0;
+ memtrack.max_allocated = 0;
- if (!g_logging.type && g_logging.file && g_logging.file != stderr)
- {
- fclose(g_logging.file);
- g_logging.file = NULL;
- }
+ if (!g_logging.type && g_logging.file && g_logging.file != stderr) {
+ fclose(g_logging.file);
+ g_logging.file = NULL;
+ }
- memory_tracker_unlock_mutex();
+ memory_tracker_unlock_mutex();
- g_b_mem_tracker_inited = 0;
- }
+ g_b_mem_tracker_inited = 0;
+ }
}
/*
@@ -265,9 +253,8 @@ void vpx_memory_tracker_destroy()
*/
void vpx_memory_tracker_add(size_t addr, unsigned int size,
char *file, unsigned int line,
- int padded)
-{
- memory_tracker_add(addr, size, file, line, padded);
+ int padded) {
+ memory_tracker_add(addr, size, file, line, padded);
}
/*
@@ -278,9 +265,8 @@ void vpx_memory_tracker_add(size_t addr, unsigned int size,
Return:
Same as described for memory_tracker_remove
*/
-int vpx_memory_tracker_remove(size_t addr)
-{
- return memory_tracker_remove(addr);
+int vpx_memory_tracker_remove(size_t addr) {
+ return memory_tracker_remove(addr);
}
/*
@@ -290,17 +276,15 @@ int vpx_memory_tracker_remove(size_t addr)
If found, pointer to the memory block that matches addr
NULL otherwise
*/
-struct mem_block *vpx_memory_tracker_find(size_t addr)
-{
- struct mem_block *p = NULL;
-
- if (!memory_tracker_lock_mutex())
- {
- p = memory_tracker_find(addr);
- memory_tracker_unlock_mutex();
- }
+struct mem_block *vpx_memory_tracker_find(size_t addr) {
+ struct mem_block *p = NULL;
- return p;
+ if (!memory_tracker_lock_mutex()) {
+ p = memory_tracker_find(addr);
+ memory_tracker_unlock_mutex();
+ }
+
+ return p;
}
/*
@@ -309,13 +293,11 @@ struct mem_block *vpx_memory_tracker_find(size_t addr)
library function to dump the current contents of the
global memory allocation list
*/
-void vpx_memory_tracker_dump()
-{
- if (!memory_tracker_lock_mutex())
- {
- memory_tracker_dump();
- memory_tracker_unlock_mutex();
- }
+void vpx_memory_tracker_dump() {
+ if (!memory_tracker_lock_mutex()) {
+ memory_tracker_dump();
+ memory_tracker_unlock_mutex();
+ }
}
/*
@@ -326,13 +308,11 @@ void vpx_memory_tracker_dump()
integrity check function to inspect every address in the global
memory allocation list
*/
-void vpx_memory_tracker_check_integrity(char *file, unsigned int line)
-{
- if (!memory_tracker_lock_mutex())
- {
- memory_tracker_check_integrity(file, line);
- memory_tracker_unlock_mutex();
- }
+void vpx_memory_tracker_check_integrity(char *file, unsigned int line) {
+ if (!memory_tracker_lock_mutex()) {
+ memory_tracker_check_integrity(file, line);
+ memory_tracker_unlock_mutex();
+ }
}
/*
@@ -344,43 +324,38 @@ void vpx_memory_tracker_check_integrity(char *file, unsigned int line)
-1: if the logging type could not be set, because the value was invalid
or because a file could not be opened
*/
-int vpx_memory_tracker_set_log_type(int type, char *option)
-{
- int ret = -1;
+int vpx_memory_tracker_set_log_type(int type, char *option) {
+ int ret = -1;
- switch (type)
- {
+ switch (type) {
case 0:
- g_logging.type = 0;
+ g_logging.type = 0;
- if (!option)
- {
- g_logging.file = stderr;
- ret = 0;
- }
- else
- {
- if ((g_logging.file = fopen((char *)option, "w")))
- ret = 0;
- }
+ if (!option) {
+ g_logging.file = stderr;
+ ret = 0;
+ } else {
+ if ((g_logging.file = fopen((char *)option, "w")))
+ ret = 0;
+ }
- break;
+ break;
#if defined(WIN32) && !defined(_WIN32_WCE)
case 1:
- g_logging.type = type;
- ret = 0;
- break;
+ g_logging.type = type;
+ ret = 0;
+ break;
#endif
default:
- break;
- }
+ break;
+ }
- //output the version to the new logging destination
- if (!ret)
- memtrack_log("Memory Tracker logging initialized, "
- "Memory Tracker v."vpx_mem_tracker_version"\n");
+ // output the version to the new logging destination
+ if (!ret)
+ memtrack_log("Memory Tracker logging initialized, "
+ "Memory Tracker v."vpx_mem_tracker_version"\n");
- return ret;
+ return ret;
}
/*
@@ -392,24 +367,22 @@ int vpx_memory_tracker_set_log_type(int type, char *option)
*/
int vpx_memory_tracker_set_log_func(void *userdata,
void(*logfunc)(void *userdata,
- const char *fmt, va_list args))
-{
- int ret = -1;
-
- if (logfunc)
- {
- g_logging.type = -1;
- g_logging.userdata = userdata;
- g_logging.func = logfunc;
- ret = 0;
- }
-
- //output the version to the new logging destination
- if (!ret)
- memtrack_log("Memory Tracker logging initialized, "
- "Memory Tracker v."vpx_mem_tracker_version"\n");
-
- return ret;
+ const char *fmt, va_list args)) {
+ int ret = -1;
+
+ if (logfunc) {
+ g_logging.type = -1;
+ g_logging.userdata = userdata;
+ g_logging.func = logfunc;
+ ret = 0;
+ }
+
+ // output the version to the new logging destination
+ if (!ret)
+ memtrack_log("Memory Tracker logging initialized, "
+ "Memory Tracker v."vpx_mem_tracker_version"\n");
+
+ return ret;
}
/*
@@ -425,79 +398,73 @@ int vpx_memory_tracker_set_log_func(void *userdata,
*
*/
-static void memtrack_log(const char *fmt, ...)
-{
- va_list list;
+static void memtrack_log(const char *fmt, ...) {
+ va_list list;
- va_start(list, fmt);
+ va_start(list, fmt);
- switch (g_logging.type)
- {
+ switch (g_logging.type) {
case -1:
- if (g_logging.func)
- g_logging.func(g_logging.userdata, fmt, list);
+ if (g_logging.func)
+ g_logging.func(g_logging.userdata, fmt, list);
- break;
+ break;
case 0:
- if (g_logging.file)
- {
- vfprintf(g_logging.file, fmt, list);
- fflush(g_logging.file);
- }
+ if (g_logging.file) {
+ vfprintf(g_logging.file, fmt, list);
+ fflush(g_logging.file);
+ }
- break;
+ break;
#if defined(WIN32) && !defined(_WIN32_WCE)
- case 1:
- {
- char temp[1024];
- _vsnprintf(temp, sizeof(temp) / sizeof(char) - 1, fmt, list);
- OutputDebugString(temp);
+ case 1: {
+ char temp[1024];
+ _vsnprintf(temp, sizeof(temp) / sizeof(char) - 1, fmt, list);
+ OutputDebugString(temp);
}
break;
#endif
default:
- break;
- }
+ break;
+ }
- va_end(list);
+ va_end(list);
}
/*
memory_tracker_dump()
Dumps the current contents of the global memory allocation list
*/
-static void memory_tracker_dump()
-{
- int i = 0;
- struct mem_block *p = (memtrack.head ? memtrack.head->next : NULL);
+static void memory_tracker_dump() {
+ int i = 0;
+ struct mem_block *p = (memtrack.head ? memtrack.head->next : NULL);
- memtrack_log("\n_currently Allocated= %d; Max allocated= %d\n",
- memtrack.current_allocated, memtrack.max_allocated);
+ memtrack_log("\n_currently Allocated= %d; Max allocated= %d\n",
+ memtrack.current_allocated, memtrack.max_allocated);
- while (p)
- {
+ while (p) {
#if defined(WIN32) && !defined(_WIN32_WCE)
- /*when using outputdebugstring, output filenames so they
- can be clicked to be opened in visual studio*/
- if (g_logging.type == 1)
- memtrack_log("memblocks[%d].addr= 0x%.8x, memblocks[%d].size= %d, file:\n"
- " %s(%d):\n", i,
- p->addr, i, p->size,
- p->file, p->line);
- else
+ /*when using outputdebugstring, output filenames so they
+ can be clicked to be opened in visual studio*/
+ if (g_logging.type == 1)
+ memtrack_log("memblocks[%d].addr= 0x%.8x, memblocks[%d].size= %d, file:\n"
+ " %s(%d):\n", i,
+ p->addr, i, p->size,
+ p->file, p->line);
+ else
#endif
- memtrack_log("memblocks[%d].addr= 0x%.8x, memblocks[%d].size= %d, file: %s, line: %d\n", i,
- p->addr, i, p->size,
- p->file, p->line);
+ memtrack_log("memblocks[%d].addr= 0x%.8x, memblocks[%d].size= %d, file: %s, line: %d\n", i,
+ p->addr, i, p->size,
+ p->file, p->line);
- p = p->next;
- ++i;
- }
+ p = p->next;
+ ++i;
+ }
- memtrack_log("\n");
+ memtrack_log("\n");
}
/*
@@ -508,55 +475,49 @@ static void memory_tracker_dump()
this function will check ea. addr in the list verifying that
addr-padding_size and addr+padding_size is filled with pad_value
*/
-static void memory_tracker_check_integrity(char *file, unsigned int line)
-{
- if (memtrack.padding_size)
- {
- int i,
- index = 0;
- unsigned char *p_show_me,
- * p_show_me2;
- unsigned int tempme = memtrack.pad_value,
- dead1,
- dead2;
- unsigned char *x_bounds;
- struct mem_block *p = memtrack.head->next;
-
- while (p)
- {
- //x_bounds = (unsigned char*)p->addr;
- //back up VPX_BYTE_ALIGNMENT
- //x_bounds -= memtrack.padding_size;
-
- if (p->padded) // can the bounds be checked?
- {
- /*yes, move to the address that was actually allocated
- by the vpx_* calls*/
- x_bounds = (unsigned char *)(((size_t *)p->addr)[-1]);
-
- for (i = 0; i < memtrack.padding_size; i += sizeof(unsigned int))
- {
- p_show_me = (x_bounds + i);
- p_show_me2 = (unsigned char *)(p->addr + p->size + i);
-
- MEM_TRACK_MEMCPY(&dead1, p_show_me, sizeof(unsigned int));
- MEM_TRACK_MEMCPY(&dead2, p_show_me2, sizeof(unsigned int));
-
- if ((dead1 != tempme) || (dead2 != tempme))
- {
- memtrack_log("\n[vpx_mem integrity check failed]:\n"
- " index[%d,%d] {%s:%d} addr=0x%x, size=%d,"
- " file: %s, line: %d c0:0x%x c1:0x%x\n",
- index, i, file, line, p->addr, p->size, p->file,
- p->line, dead1, dead2);
- }
- }
- }
-
- ++index;
- p = p->next;
+static void memory_tracker_check_integrity(char *file, unsigned int line) {
+ if (memtrack.padding_size) {
+ int i,
+ index = 0;
+ unsigned char *p_show_me,
+ * p_show_me2;
+ unsigned int tempme = memtrack.pad_value,
+ dead1,
+ dead2;
+ unsigned char *x_bounds;
+ struct mem_block *p = memtrack.head->next;
+
+ while (p) {
+ // x_bounds = (unsigned char*)p->addr;
+ // back up VPX_BYTE_ALIGNMENT
+ // x_bounds -= memtrack.padding_size;
+
+ if (p->padded) { // can the bounds be checked?
+ /*yes, move to the address that was actually allocated
+ by the vpx_* calls*/
+ x_bounds = (unsigned char *)(((size_t *)p->addr)[-1]);
+
+ for (i = 0; i < memtrack.padding_size; i += sizeof(unsigned int)) {
+ p_show_me = (x_bounds + i);
+ p_show_me2 = (unsigned char *)(p->addr + p->size + i);
+
+ MEM_TRACK_MEMCPY(&dead1, p_show_me, sizeof(unsigned int));
+ MEM_TRACK_MEMCPY(&dead2, p_show_me2, sizeof(unsigned int));
+
+ if ((dead1 != tempme) || (dead2 != tempme)) {
+ memtrack_log("\n[vpx_mem integrity check failed]:\n"
+ " index[%d,%d] {%s:%d} addr=0x%x, size=%d,"
+ " file: %s, line: %d c0:0x%x c1:0x%x\n",
+ index, i, file, line, p->addr, p->size, p->file,
+ p->line, dead1, dead2);
+ }
}
+ }
+
+ ++index;
+ p = p->next;
}
+ }
}
/*
@@ -568,43 +529,38 @@ static void memory_tracker_check_integrity(char *file, unsigned int line)
*/
void memory_tracker_add(size_t addr, unsigned int size,
char *file, unsigned int line,
- int padded)
-{
- if (!memory_tracker_lock_mutex())
- {
- struct mem_block *p;
+ int padded) {
+ if (!memory_tracker_lock_mutex()) {
+ struct mem_block *p;
- p = MEM_TRACK_MALLOC(sizeof(struct mem_block));
+ p = MEM_TRACK_MALLOC(sizeof(struct mem_block));
- if (p)
- {
- p->prev = memtrack.tail;
- p->prev->next = p;
- p->addr = addr;
- p->size = size;
- p->line = line;
- p->file = file;
- p->padded = padded;
- p->next = NULL;
+ if (p) {
+ p->prev = memtrack.tail;
+ p->prev->next = p;
+ p->addr = addr;
+ p->size = size;
+ p->line = line;
+ p->file = file;
+ p->padded = padded;
+ p->next = NULL;
- memtrack.tail = p;
+ memtrack.tail = p;
- memtrack.current_allocated += size;
+ memtrack.current_allocated += size;
- if (memtrack.current_allocated > memtrack.max_allocated)
- memtrack.max_allocated = memtrack.current_allocated;
+ if (memtrack.current_allocated > memtrack.max_allocated)
+ memtrack.max_allocated = memtrack.current_allocated;
- //memtrack_log("memory_tracker_add: added addr=0x%.8x\n", addr);
+ // memtrack_log("memory_tracker_add: added addr=0x%.8x\n", addr);
- memory_tracker_unlock_mutex();
- }
- else
- {
- memtrack_log("memory_tracker_add: error allocating memory!\n");
- memory_tracker_unlock_mutex();
- vpx_memory_tracker_destroy();
- }
+ memory_tracker_unlock_mutex();
+ } else {
+ memtrack_log("memory_tracker_add: error allocating memory!\n");
+ memory_tracker_unlock_mutex();
+ vpx_memory_tracker_destroy();
}
+ }
}
/*
@@ -617,41 +573,36 @@ void memory_tracker_add(size_t addr, unsigned int size,
-1: if the mutex could not be locked
-2: if the addr was not found in the list
*/
-int memory_tracker_remove(size_t addr)
-{
- int ret = -1;
-
- if (!memory_tracker_lock_mutex())
- {
- struct mem_block *p;
+int memory_tracker_remove(size_t addr) {
+ int ret = -1;
- if ((p = memory_tracker_find(addr)))
- {
- memtrack.current_allocated -= p->size;
+ if (!memory_tracker_lock_mutex()) {
+ struct mem_block *p;
- p->prev->next = p->next;
+ if ((p = memory_tracker_find(addr))) {
+ memtrack.current_allocated -= p->size;
- if (p->next)
- p->next->prev = p->prev;
- else
- memtrack.tail = p->prev;
+ p->prev->next = p->next;
- ret = 0;
- MEM_TRACK_FREE(p);
- }
- else
- {
- if (addr)
- memtrack_log("memory_tracker_remove(): addr not found in list,"
- " 0x%.8x\n", addr);
+ if (p->next)
+ p->next->prev = p->prev;
+ else
+ memtrack.tail = p->prev;
- ret = -2;
- }
+ ret = 0;
+ MEM_TRACK_FREE(p);
+ } else {
+ if (addr)
+ memtrack_log("memory_tracker_remove(): addr not found in list,"
+ " 0x%.8x\n", addr);
- memory_tracker_unlock_mutex();
+ ret = -2;
}
- return ret;
+ memory_tracker_unlock_mutex();
+ }
+
+ return ret;
}
/*
@@ -662,19 +613,17 @@ int memory_tracker_remove(size_t addr)
the need for repeated locking and unlocking as in Remove
Returns: pointer to the mem block if found, NULL otherwise
*/
-static struct mem_block *memory_tracker_find(size_t addr)
-{
- struct mem_block *p = NULL;
+static struct mem_block *memory_tracker_find(size_t addr) {
+ struct mem_block *p = NULL;
- if (memtrack.head)
- {
- p = memtrack.head->next;
+ if (memtrack.head) {
+ p = memtrack.head->next;
- while (p && (p->addr != addr))
- p = p->next;
- }
+ while (p && (p->addr != addr))
+ p = p->next;
+ }
- return p;
+ return p;
}
@@ -687,28 +636,25 @@ static struct mem_block *memory_tracker_find(size_t addr)
<0: Failure, either the mutex was not initialized
or the call to lock the mutex failed
*/
-static int memory_tracker_lock_mutex()
-{
- int ret = -1;
+static int memory_tracker_lock_mutex() {
+ int ret = -1;
- if (g_b_mem_tracker_inited)
- {
+ if (g_b_mem_tracker_inited) {
#if HAVE_PTHREAD_H
- ret = pthread_mutex_lock(&memtrack.mutex);
+ ret = pthread_mutex_lock(&memtrack.mutex);
#elif defined(WIN32) || defined(_WIN32_WCE)
- ret = WaitForSingleObject(memtrack.mutex, INFINITE);
+ ret = WaitForSingleObject(memtrack.mutex, INFINITE);
#elif defined(VXWORKS)
- ret = sem_take(memtrack.mutex, WAIT_FOREVER);
+ ret = sem_take(memtrack.mutex, WAIT_FOREVER);
#endif
- if (ret)
- {
- memtrack_log("memory_tracker_lock_mutex: mutex lock failed\n");
- }
+ if (ret) {
+ memtrack_log("memory_tracker_lock_mutex: mutex lock failed\n");
}
+ }
- return ret;
+ return ret;
}
/*
@@ -719,28 +665,25 @@ static int memory_tracker_lock_mutex()
<0: Failure, either the mutex was not initialized
or the call to unlock the mutex failed
*/
-static int memory_tracker_unlock_mutex()
-{
- int ret = -1;
+static int memory_tracker_unlock_mutex() {
+ int ret = -1;
- if (g_b_mem_tracker_inited)
- {
+ if (g_b_mem_tracker_inited) {
#if HAVE_PTHREAD_H
- ret = pthread_mutex_unlock(&memtrack.mutex);
+ ret = pthread_mutex_unlock(&memtrack.mutex);
#elif defined(WIN32) || defined(_WIN32_WCE)
- ret = !ReleaseMutex(memtrack.mutex);
+ ret = !ReleaseMutex(memtrack.mutex);
#elif defined(VXWORKS)
- ret = sem_give(memtrack.mutex);
+ ret = sem_give(memtrack.mutex);
#endif
- if (ret)
- {
- memtrack_log("memory_tracker_unlock_mutex: mutex unlock failed\n");
- }
+ if (ret) {
+ memtrack_log("memory_tracker_unlock_mutex: mutex unlock failed\n");
}
+ }
- return ret;
+ return ret;
}
#endif
@@ -754,45 +697,44 @@ static int memory_tracker_unlock_mutex()
-1: if the use global function pointers is not set.
*/
int vpx_memory_tracker_set_functions(mem_track_malloc_func g_malloc_l
- , mem_track_calloc_func g_calloc_l
- , mem_track_realloc_func g_realloc_l
- , mem_track_free_func g_free_l
- , mem_track_memcpy_func g_memcpy_l
- , mem_track_memset_func g_memset_l
- , mem_track_memmove_func g_memmove_l)
-{
+, mem_track_calloc_func g_calloc_l
+, mem_track_realloc_func g_realloc_l
+, mem_track_free_func g_free_l
+, mem_track_memcpy_func g_memcpy_l
+, mem_track_memset_func g_memset_l
+, mem_track_memmove_func g_memmove_l) {
#if USE_GLOBAL_FUNCTION_POINTERS
- if (g_malloc_l)
- g_malloc = g_malloc_l;
+ if (g_malloc_l)
+ g_malloc = g_malloc_l;
- if (g_calloc_l)
- g_calloc = g_calloc_l;
+ if (g_calloc_l)
+ g_calloc = g_calloc_l;
- if (g_realloc_l)
- g_realloc = g_realloc_l;
+ if (g_realloc_l)
+ g_realloc = g_realloc_l;
- if (g_free_l)
- g_free = g_free_l;
+ if (g_free_l)
+ g_free = g_free_l;
- if (g_memcpy_l)
- g_memcpy = g_memcpy_l;
+ if (g_memcpy_l)
+ g_memcpy = g_memcpy_l;
- if (g_memset_l)
- g_memset = g_memset_l;
+ if (g_memset_l)
+ g_memset = g_memset_l;
- if (g_memmove_l)
- g_memmove = g_memmove_l;
+ if (g_memmove_l)
+ g_memmove = g_memmove_l;
- return 0;
+ return 0;
#else
- (void)g_malloc_l;
- (void)g_calloc_l;
- (void)g_realloc_l;
- (void)g_free_l;
- (void)g_memcpy_l;
- (void)g_memset_l;
- (void)g_memmove_l;
- return -1;
+ (void)g_malloc_l;
+ (void)g_calloc_l;
+ (void)g_realloc_l;
+ (void)g_free_l;
+ (void)g_memcpy_l;
+ (void)g_memset_l;
+ (void)g_memmove_l;
+ return -1;
#endif
}
diff --git a/libvpx/vpx_ports/arm_cpudetect.c b/libvpx/vpx_ports/arm_cpudetect.c
index 8ff95a1..542ff67 100644
--- a/libvpx/vpx_ports/arm_cpudetect.c
+++ b/libvpx/vpx_ports/arm_cpudetect.c
@@ -12,50 +12,45 @@
#include <string.h>
#include "arm.h"
-static int arm_cpu_env_flags(int *flags)
-{
- char *env;
- env = getenv("VPX_SIMD_CAPS");
- if (env && *env)
- {
- *flags = (int)strtol(env, NULL, 0);
- return 0;
- }
- *flags = 0;
- return -1;
+static int arm_cpu_env_flags(int *flags) {
+ char *env;
+ env = getenv("VPX_SIMD_CAPS");
+ if (env && *env) {
+ *flags = (int)strtol(env, NULL, 0);
+ return 0;
+ }
+ *flags = 0;
+ return -1;
}
-static int arm_cpu_env_mask(void)
-{
- char *env;
- env = getenv("VPX_SIMD_CAPS_MASK");
- return env && *env ? (int)strtol(env, NULL, 0) : ~0;
+static int arm_cpu_env_mask(void) {
+ char *env;
+ env = getenv("VPX_SIMD_CAPS_MASK");
+ return env && *env ? (int)strtol(env, NULL, 0) : ~0;
}
#if !CONFIG_RUNTIME_CPU_DETECT
-int arm_cpu_caps(void)
-{
+int arm_cpu_caps(void) {
/* This function should actually be a no-op. There is no way to adjust any of
* these because the RTCD tables do not exist: the functions are called
* statically */
- int flags;
- int mask;
- if (!arm_cpu_env_flags(&flags))
- {
- return flags;
- }
- mask = arm_cpu_env_mask();
+ int flags;
+ int mask;
+ if (!arm_cpu_env_flags(&flags)) {
+ return flags;
+ }
+ mask = arm_cpu_env_mask();
#if HAVE_EDSP
- flags |= HAS_EDSP;
+ flags |= HAS_EDSP;
#endif /* HAVE_EDSP */
#if HAVE_MEDIA
- flags |= HAS_MEDIA;
+ flags |= HAS_MEDIA;
#endif /* HAVE_MEDIA */
#if HAVE_NEON
- flags |= HAS_NEON;
+ flags |= HAS_NEON;
#endif /* HAVE_NEON */
- return flags & mask;
+ return flags & mask;
}
#elif defined(_MSC_VER) /* end !CONFIG_RUNTIME_CPU_DETECT */
@@ -64,156 +59,134 @@ int arm_cpu_caps(void)
#define WIN32_EXTRA_LEAN
#include <windows.h>
-int arm_cpu_caps(void)
-{
- int flags;
- int mask;
- if (!arm_cpu_env_flags(&flags))
- {
- return flags;
- }
- mask = arm_cpu_env_mask();
- /* MSVC has no inline __asm support for ARM, but it does let you __emit
- * instructions via their assembled hex code.
- * All of these instructions should be essentially nops.
- */
+int arm_cpu_caps(void) {
+ int flags;
+ int mask;
+ if (!arm_cpu_env_flags(&flags)) {
+ return flags;
+ }
+ mask = arm_cpu_env_mask();
+ /* MSVC has no inline __asm support for ARM, but it does let you __emit
+ * instructions via their assembled hex code.
+ * All of these instructions should be essentially nops.
+ */
#if HAVE_EDSP
- if (mask & HAS_EDSP)
- {
- __try
- {
- /*PLD [r13]*/
- __emit(0xF5DDF000);
- flags |= HAS_EDSP;
- }
- __except(GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION)
- {
- /*Ignore exception.*/
- }
+ if (mask & HAS_EDSP) {
+ __try {
+ /*PLD [r13]*/
+ __emit(0xF5DDF000);
+ flags |= HAS_EDSP;
+ } __except (GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION) {
+ /*Ignore exception.*/
}
+ }
#if HAVE_MEDIA
- if (mask & HAS_MEDIA)
- __try
- {
- /*SHADD8 r3,r3,r3*/
- __emit(0xE6333F93);
- flags |= HAS_MEDIA;
- }
- __except(GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION)
- {
- /*Ignore exception.*/
- }
- }
+ if (mask & HAS_MEDIA)
+ __try {
+ /*SHADD8 r3,r3,r3*/
+ __emit(0xE6333F93);
+ flags |= HAS_MEDIA;
+ } __except (GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION) {
+ /*Ignore exception.*/
+ }
+}
#if HAVE_NEON
- if (mask & HAS_NEON)
- {
- __try
- {
- /*VORR q0,q0,q0*/
- __emit(0xF2200150);
- flags |= HAS_NEON;
- }
- __except(GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION)
- {
- /*Ignore exception.*/
- }
- }
+if (mask &HAS_NEON) {
+ __try {
+ /*VORR q0,q0,q0*/
+ __emit(0xF2200150);
+ flags |= HAS_NEON;
+ } __except (GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION) {
+ /*Ignore exception.*/
+ }
+}
#endif /* HAVE_NEON */
#endif /* HAVE_MEDIA */
#endif /* HAVE_EDSP */
- return flags & mask;
+return flags & mask;
}
#elif defined(__ANDROID__) /* end _MSC_VER */
#include <cpu-features.h>
-int arm_cpu_caps(void)
-{
- int flags;
- int mask;
- uint64_t features;
- if (!arm_cpu_env_flags(&flags))
- {
- return flags;
- }
- mask = arm_cpu_env_mask();
- features = android_getCpuFeatures();
+int arm_cpu_caps(void) {
+ int flags;
+ int mask;
+ uint64_t features;
+ if (!arm_cpu_env_flags(&flags)) {
+ return flags;
+ }
+ mask = arm_cpu_env_mask();
+ features = android_getCpuFeatures();
#if HAVE_EDSP
- flags |= HAS_EDSP;
+ flags |= HAS_EDSP;
#endif /* HAVE_EDSP */
#if HAVE_MEDIA
- flags |= HAS_MEDIA;
+ flags |= HAS_MEDIA;
#endif /* HAVE_MEDIA */
#if HAVE_NEON
- if (features & ANDROID_CPU_ARM_FEATURE_NEON)
- flags |= HAS_NEON;
+ if (features & ANDROID_CPU_ARM_FEATURE_NEON)
+ flags |= HAS_NEON;
#endif /* HAVE_NEON */
- return flags & mask;
+ return flags & mask;
}
#elif defined(__linux__) /* end __ANDROID__ */
+
#include <stdio.h>
-int arm_cpu_caps(void)
-{
- FILE *fin;
- int flags;
- int mask;
- if (!arm_cpu_env_flags(&flags))
- {
- return flags;
- }
- mask = arm_cpu_env_mask();
- /* Reading /proc/self/auxv would be easier, but that doesn't work reliably
- * on Android.
- * This also means that detection will fail in Scratchbox.
+int arm_cpu_caps(void) {
+ FILE *fin;
+ int flags;
+ int mask;
+ if (!arm_cpu_env_flags(&flags)) {
+ return flags;
+ }
+ mask = arm_cpu_env_mask();
+ /* Reading /proc/self/auxv would be easier, but that doesn't work reliably
+ * on Android.
+ * This also means that detection will fail in Scratchbox.
+ */
+ fin = fopen("/proc/cpuinfo", "r");
+ if (fin != NULL) {
+ /* 512 should be enough for anybody (it's even enough for all the flags
+ * that x86 has accumulated... so far).
*/
- fin = fopen("/proc/cpuinfo","r");
- if(fin != NULL)
- {
- /* 512 should be enough for anybody (it's even enough for all the flags
- * that x86 has accumulated... so far).
- */
- char buf[512];
- while (fgets(buf, 511, fin) != NULL)
- {
+ char buf[512];
+ while (fgets(buf, 511, fin) != NULL) {
#if HAVE_EDSP || HAVE_NEON
- if (memcmp(buf, "Features", 8) == 0)
- {
- char *p;
+ if (memcmp(buf, "Features", 8) == 0) {
+ char *p;
#if HAVE_EDSP
- p=strstr(buf, " edsp");
- if (p != NULL && (p[5] == ' ' || p[5] == '\n'))
- {
- flags |= HAS_EDSP;
- }
+ p = strstr(buf, " edsp");
+ if (p != NULL && (p[5] == ' ' || p[5] == '\n')) {
+ flags |= HAS_EDSP;
+ }
#if HAVE_NEON
- p = strstr(buf, " neon");
- if (p != NULL && (p[5] == ' ' || p[5] == '\n'))
- {
- flags |= HAS_NEON;
- }
+ p = strstr(buf, " neon");
+ if (p != NULL && (p[5] == ' ' || p[5] == '\n')) {
+ flags |= HAS_NEON;
+ }
#endif /* HAVE_NEON */
#endif /* HAVE_EDSP */
- }
+ }
#endif /* HAVE_EDSP || HAVE_NEON */
#if HAVE_MEDIA
- if (memcmp(buf, "CPU architecture:",17) == 0){
- int version;
- version = atoi(buf+17);
- if (version >= 6)
- {
- flags |= HAS_MEDIA;
- }
- }
-#endif /* HAVE_MEDIA */
+ if (memcmp(buf, "CPU architecture:", 17) == 0) {
+ int version;
+ version = atoi(buf + 17);
+ if (version >= 6) {
+ flags |= HAS_MEDIA;
}
- fclose(fin);
+ }
+#endif /* HAVE_MEDIA */
}
- return flags & mask;
+ fclose(fin);
+ }
+ return flags & mask;
}
#else /* end __linux__ */
#error "--enable-runtime-cpu-detect selected, but no CPU detection method " \
- "available for your platform. Reconfigure with --disable-runtime-cpu-detect."
+"available for your platform. Reconfigure with --disable-runtime-cpu-detect."
#endif
diff --git a/libvpx/vpx_ports/asm_offsets.h b/libvpx/vpx_ports/asm_offsets.h
index 7b6ae4a..d3a3e5a 100644
--- a/libvpx/vpx_ports/asm_offsets.h
+++ b/libvpx/vpx_ports/asm_offsets.h
@@ -15,8 +15,8 @@
#include <stddef.h>
#define ct_assert(name,cond) \
- static void assert_##name(void) UNUSED;\
- static void assert_##name(void) {switch(0){case 0:case !!(cond):;}}
+ static void assert_##name(void) UNUSED;\
+ static void assert_##name(void) {switch(0){case 0:case !!(cond):;}}
#if INLINE_ASM
#define DEFINE(sym, val) asm("\n" #sym " EQU %0" : : "i" (val))
diff --git a/libvpx/vpx_ports/config.h b/libvpx/vpx_ports/config.h
new file mode 100644
index 0000000..1abe70d
--- /dev/null
+++ b/libvpx/vpx_ports/config.h
@@ -0,0 +1,10 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "vpx_config.h"
diff --git a/libvpx/vpx_ports/emmintrin_compat.h b/libvpx/vpx_ports/emmintrin_compat.h
new file mode 100644
index 0000000..782d603
--- /dev/null
+++ b/libvpx/vpx_ports/emmintrin_compat.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_PORTS_EMMINTRIN_COMPAT_H
+#define VPX_PORTS_EMMINTRIN_COMPAT_H
+
+#if defined(__GNUC__) && __GNUC__ < 4
+/* From emmintrin.h (gcc 4.5.3) */
+/* Casts between various SP, DP, INT vector types. Note that these do no
+ conversion of values, they just change the type. */
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_castpd_ps(__m128d __A)
+{
+ return (__m128) __A;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_castpd_si128(__m128d __A)
+{
+ return (__m128i) __A;
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_castps_pd(__m128 __A)
+{
+ return (__m128d) __A;
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_castps_si128(__m128 __A)
+{
+ return (__m128i) __A;
+}
+
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_castsi128_ps(__m128i __A)
+{
+ return (__m128) __A;
+}
+
+extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_castsi128_pd(__m128i __A)
+{
+ return (__m128d) __A;
+}
+#endif
+
+#endif
diff --git a/libvpx/vpx_ports/emms.asm b/libvpx/vpx_ports/emms.asm
index efad1a5..db8da28 100644
--- a/libvpx/vpx_ports/emms.asm
+++ b/libvpx/vpx_ports/emms.asm
@@ -18,7 +18,7 @@ sym(vpx_reset_mmx_state):
ret
-%ifidn __OUTPUT_FORMAT__,x64
+%if LIBVPX_YASM_WIN64
global sym(vpx_winx64_fldcw) PRIVATE
sym(vpx_winx64_fldcw):
sub rsp, 8
diff --git a/libvpx/vpx_ports/mem.h b/libvpx/vpx_ports/mem.h
index 29e507f..62b86bb 100644
--- a/libvpx/vpx_ports/mem.h
+++ b/libvpx/vpx_ports/mem.h
@@ -11,6 +11,7 @@
#ifndef VPX_PORTS_MEM_H
#define VPX_PORTS_MEM_H
+
#include "vpx_config.h"
#include "vpx/vpx_integer.h"
@@ -31,8 +32,8 @@
* within the array.
*/
#define DECLARE_ALIGNED_ARRAY(a,typ,val,n)\
-typ val##_[(n)+(a)/sizeof(typ)+1];\
-typ *val = (typ*)((((intptr_t)val##_)+(a)-1)&((intptr_t)-(a)))
+ typ val##_[(n)+(a)/sizeof(typ)+1];\
+ typ *val = (typ*)((((intptr_t)val##_)+(a)-1)&((intptr_t)-(a)))
/* Indicates that the usage of the specified variable has been audited to assure
diff --git a/libvpx/vpx_ports/mem_ops.h b/libvpx/vpx_ports/mem_ops.h
index dec28d5..2d44a3a 100644
--- a/libvpx/vpx_ports/mem_ops.h
+++ b/libvpx/vpx_ports/mem_ops.h
@@ -60,88 +60,82 @@
#undef mem_get_be16
#define mem_get_be16 mem_ops_wrap_symbol(mem_get_be16)
-static unsigned MEM_VALUE_T mem_get_be16(const void *vmem)
-{
- unsigned MEM_VALUE_T val;
- const MAU_T *mem = (const MAU_T *)vmem;
-
- val = mem[0] << 8;
- val |= mem[1];
- return val;
+static unsigned MEM_VALUE_T mem_get_be16(const void *vmem) {
+ unsigned MEM_VALUE_T val;
+ const MAU_T *mem = (const MAU_T *)vmem;
+
+ val = mem[0] << 8;
+ val |= mem[1];
+ return val;
}
#undef mem_get_be24
#define mem_get_be24 mem_ops_wrap_symbol(mem_get_be24)
-static unsigned MEM_VALUE_T mem_get_be24(const void *vmem)
-{
- unsigned MEM_VALUE_T val;
- const MAU_T *mem = (const MAU_T *)vmem;
-
- val = mem[0] << 16;
- val |= mem[1] << 8;
- val |= mem[2];
- return val;
+static unsigned MEM_VALUE_T mem_get_be24(const void *vmem) {
+ unsigned MEM_VALUE_T val;
+ const MAU_T *mem = (const MAU_T *)vmem;
+
+ val = mem[0] << 16;
+ val |= mem[1] << 8;
+ val |= mem[2];
+ return val;
}
#undef mem_get_be32
#define mem_get_be32 mem_ops_wrap_symbol(mem_get_be32)
-static unsigned MEM_VALUE_T mem_get_be32(const void *vmem)
-{
- unsigned MEM_VALUE_T val;
- const MAU_T *mem = (const MAU_T *)vmem;
-
- val = mem[0] << 24;
- val |= mem[1] << 16;
- val |= mem[2] << 8;
- val |= mem[3];
- return val;
+static unsigned MEM_VALUE_T mem_get_be32(const void *vmem) {
+ unsigned MEM_VALUE_T val;
+ const MAU_T *mem = (const MAU_T *)vmem;
+
+ val = mem[0] << 24;
+ val |= mem[1] << 16;
+ val |= mem[2] << 8;
+ val |= mem[3];
+ return val;
}
#undef mem_get_le16
#define mem_get_le16 mem_ops_wrap_symbol(mem_get_le16)
-static unsigned MEM_VALUE_T mem_get_le16(const void *vmem)
-{
- unsigned MEM_VALUE_T val;
- const MAU_T *mem = (const MAU_T *)vmem;
-
- val = mem[1] << 8;
- val |= mem[0];
- return val;
+static unsigned MEM_VALUE_T mem_get_le16(const void *vmem) {
+ unsigned MEM_VALUE_T val;
+ const MAU_T *mem = (const MAU_T *)vmem;
+
+ val = mem[1] << 8;
+ val |= mem[0];
+ return val;
}
#undef mem_get_le24
#define mem_get_le24 mem_ops_wrap_symbol(mem_get_le24)
-static unsigned MEM_VALUE_T mem_get_le24(const void *vmem)
-{
- unsigned MEM_VALUE_T val;
- const MAU_T *mem = (const MAU_T *)vmem;
-
- val = mem[2] << 16;
- val |= mem[1] << 8;
- val |= mem[0];
- return val;
+static unsigned MEM_VALUE_T mem_get_le24(const void *vmem) {
+ unsigned MEM_VALUE_T val;
+ const MAU_T *mem = (const MAU_T *)vmem;
+
+ val = mem[2] << 16;
+ val |= mem[1] << 8;
+ val |= mem[0];
+ return val;
}
#undef mem_get_le32
#define mem_get_le32 mem_ops_wrap_symbol(mem_get_le32)
-static unsigned MEM_VALUE_T mem_get_le32(const void *vmem)
-{
- unsigned MEM_VALUE_T val;
- const MAU_T *mem = (const MAU_T *)vmem;
-
- val = mem[3] << 24;
- val |= mem[2] << 16;
- val |= mem[1] << 8;
- val |= mem[0];
- return val;
+static unsigned MEM_VALUE_T mem_get_le32(const void *vmem) {
+ unsigned MEM_VALUE_T val;
+ const MAU_T *mem = (const MAU_T *)vmem;
+
+ val = mem[3] << 24;
+ val |= mem[2] << 16;
+ val |= mem[1] << 8;
+ val |= mem[0];
+ return val;
}
#define mem_get_s_generic(end,sz) \
- static signed MEM_VALUE_T mem_get_s##end##sz(const void *vmem) {\
- const MAU_T *mem = (const MAU_T*)vmem;\
- signed MEM_VALUE_T val = mem_get_##end##sz(mem);\
- return (val << (MEM_VALUE_T_SZ_BITS - sz)) >> (MEM_VALUE_T_SZ_BITS - sz);\
- }
+ static signed MEM_VALUE_T mem_get_s##end##sz(const void *vmem) {\
+ const MAU_T *mem = (const MAU_T*)vmem;\
+ signed MEM_VALUE_T val = mem_get_##end##sz(mem);\
+ return (val << (MEM_VALUE_T_SZ_BITS - sz)) >> (MEM_VALUE_T_SZ_BITS - sz);\
+ }
#undef mem_get_sbe16
#define mem_get_sbe16 mem_ops_wrap_symbol(mem_get_sbe16)
@@ -169,66 +163,60 @@ mem_get_s_generic(le, 32)
#undef mem_put_be16
#define mem_put_be16 mem_ops_wrap_symbol(mem_put_be16)
-static void mem_put_be16(void *vmem, MEM_VALUE_T val)
-{
- MAU_T *mem = (MAU_T *)vmem;
+static void mem_put_be16(void *vmem, MEM_VALUE_T val) {
+ MAU_T *mem = (MAU_T *)vmem;
- mem[0] = (val >> 8) & 0xff;
- mem[1] = (val >> 0) & 0xff;
+ mem[0] = (val >> 8) & 0xff;
+ mem[1] = (val >> 0) & 0xff;
}
#undef mem_put_be24
#define mem_put_be24 mem_ops_wrap_symbol(mem_put_be24)
-static void mem_put_be24(void *vmem, MEM_VALUE_T val)
-{
- MAU_T *mem = (MAU_T *)vmem;
+static void mem_put_be24(void *vmem, MEM_VALUE_T val) {
+ MAU_T *mem = (MAU_T *)vmem;
- mem[0] = (val >> 16) & 0xff;
- mem[1] = (val >> 8) & 0xff;
- mem[2] = (val >> 0) & 0xff;
+ mem[0] = (val >> 16) & 0xff;
+ mem[1] = (val >> 8) & 0xff;
+ mem[2] = (val >> 0) & 0xff;
}
#undef mem_put_be32
#define mem_put_be32 mem_ops_wrap_symbol(mem_put_be32)
-static void mem_put_be32(void *vmem, MEM_VALUE_T val)
-{
- MAU_T *mem = (MAU_T *)vmem;
-
- mem[0] = (val >> 24) & 0xff;
- mem[1] = (val >> 16) & 0xff;
- mem[2] = (val >> 8) & 0xff;
- mem[3] = (val >> 0) & 0xff;
+static void mem_put_be32(void *vmem, MEM_VALUE_T val) {
+ MAU_T *mem = (MAU_T *)vmem;
+
+ mem[0] = (val >> 24) & 0xff;
+ mem[1] = (val >> 16) & 0xff;
+ mem[2] = (val >> 8) & 0xff;
+ mem[3] = (val >> 0) & 0xff;
}
#undef mem_put_le16
#define mem_put_le16 mem_ops_wrap_symbol(mem_put_le16)
-static void mem_put_le16(void *vmem, MEM_VALUE_T val)
-{
- MAU_T *mem = (MAU_T *)vmem;
+static void mem_put_le16(void *vmem, MEM_VALUE_T val) {
+ MAU_T *mem = (MAU_T *)vmem;
- mem[0] = (val >> 0) & 0xff;
- mem[1] = (val >> 8) & 0xff;
+ mem[0] = (val >> 0) & 0xff;
+ mem[1] = (val >> 8) & 0xff;
}
#undef mem_put_le24
#define mem_put_le24 mem_ops_wrap_symbol(mem_put_le24)
-static void mem_put_le24(void *vmem, MEM_VALUE_T val)
-{
- MAU_T *mem = (MAU_T *)vmem;
+static void mem_put_le24(void *vmem, MEM_VALUE_T val) {
+ MAU_T *mem = (MAU_T *)vmem;
- mem[0] = (val >> 0) & 0xff;
- mem[1] = (val >> 8) & 0xff;
- mem[2] = (val >> 16) & 0xff;
+ mem[0] = (val >> 0) & 0xff;
+ mem[1] = (val >> 8) & 0xff;
+ mem[2] = (val >> 16) & 0xff;
}
#undef mem_put_le32
#define mem_put_le32 mem_ops_wrap_symbol(mem_put_le32)
-static void mem_put_le32(void *vmem, MEM_VALUE_T val)
-{
- MAU_T *mem = (MAU_T *)vmem;
-
- mem[0] = (val >> 0) & 0xff;
- mem[1] = (val >> 8) & 0xff;
- mem[2] = (val >> 16) & 0xff;
- mem[3] = (val >> 24) & 0xff;
+static void mem_put_le32(void *vmem, MEM_VALUE_T val) {
+ MAU_T *mem = (MAU_T *)vmem;
+
+ mem[0] = (val >> 0) & 0xff;
+ mem[1] = (val >> 8) & 0xff;
+ mem[2] = (val >> 16) & 0xff;
+ mem[3] = (val >> 24) & 0xff;
}
diff --git a/libvpx/vpx_ports/mem_ops_aligned.h b/libvpx/vpx_ports/mem_ops_aligned.h
index fca653a..0100300 100644
--- a/libvpx/vpx_ports/mem_ops_aligned.h
+++ b/libvpx/vpx_ports/mem_ops_aligned.h
@@ -24,61 +24,61 @@
* could redefine these macros.
*/
#define swap_endian_16(val,raw) do {\
- val = ((raw>>8) & 0x00ff) \
- | ((raw<<8) & 0xff00);\
- } while(0)
+ val = ((raw>>8) & 0x00ff) \
+ | ((raw<<8) & 0xff00);\
+ } while(0)
#define swap_endian_32(val,raw) do {\
- val = ((raw>>24) & 0x000000ff) \
- | ((raw>>8) & 0x0000ff00) \
- | ((raw<<8) & 0x00ff0000) \
- | ((raw<<24) & 0xff000000); \
- } while(0)
+ val = ((raw>>24) & 0x000000ff) \
+ | ((raw>>8) & 0x0000ff00) \
+ | ((raw<<8) & 0x00ff0000) \
+ | ((raw<<24) & 0xff000000); \
+ } while(0)
#define swap_endian_16_se(val,raw) do {\
- swap_endian_16(val,raw);\
- val = ((val << 16) >> 16);\
- } while(0)
+ swap_endian_16(val,raw);\
+ val = ((val << 16) >> 16);\
+ } while(0)
#define swap_endian_32_se(val,raw) swap_endian_32(val,raw)
#define mem_get_ne_aligned_generic(end,sz) \
- static unsigned MEM_VALUE_T mem_get_##end##sz##_aligned(const void *vmem) {\
- const uint##sz##_t *mem = (const uint##sz##_t *)vmem;\
- return *mem;\
- }
+ static unsigned MEM_VALUE_T mem_get_##end##sz##_aligned(const void *vmem) {\
+ const uint##sz##_t *mem = (const uint##sz##_t *)vmem;\
+ return *mem;\
+ }
#define mem_get_sne_aligned_generic(end,sz) \
- static signed MEM_VALUE_T mem_get_s##end##sz##_aligned(const void *vmem) {\
- const int##sz##_t *mem = (const int##sz##_t *)vmem;\
- return *mem;\
- }
+ static signed MEM_VALUE_T mem_get_s##end##sz##_aligned(const void *vmem) {\
+ const int##sz##_t *mem = (const int##sz##_t *)vmem;\
+ return *mem;\
+ }
#define mem_get_se_aligned_generic(end,sz) \
- static unsigned MEM_VALUE_T mem_get_##end##sz##_aligned(const void *vmem) {\
- const uint##sz##_t *mem = (const uint##sz##_t *)vmem;\
- unsigned MEM_VALUE_T val, raw = *mem;\
- swap_endian_##sz(val,raw);\
- return val;\
- }
+ static unsigned MEM_VALUE_T mem_get_##end##sz##_aligned(const void *vmem) {\
+ const uint##sz##_t *mem = (const uint##sz##_t *)vmem;\
+ unsigned MEM_VALUE_T val, raw = *mem;\
+ swap_endian_##sz(val,raw);\
+ return val;\
+ }
#define mem_get_sse_aligned_generic(end,sz) \
- static signed MEM_VALUE_T mem_get_s##end##sz##_aligned(const void *vmem) {\
- const int##sz##_t *mem = (const int##sz##_t *)vmem;\
- unsigned MEM_VALUE_T val, raw = *mem;\
- swap_endian_##sz##_se(val,raw);\
- return val;\
- }
+ static signed MEM_VALUE_T mem_get_s##end##sz##_aligned(const void *vmem) {\
+ const int##sz##_t *mem = (const int##sz##_t *)vmem;\
+ unsigned MEM_VALUE_T val, raw = *mem;\
+ swap_endian_##sz##_se(val,raw);\
+ return val;\
+ }
#define mem_put_ne_aligned_generic(end,sz) \
- static void mem_put_##end##sz##_aligned(void *vmem, MEM_VALUE_T val) {\
- uint##sz##_t *mem = (uint##sz##_t *)vmem;\
- *mem = (uint##sz##_t)val;\
- }
+ static void mem_put_##end##sz##_aligned(void *vmem, MEM_VALUE_T val) {\
+ uint##sz##_t *mem = (uint##sz##_t *)vmem;\
+ *mem = (uint##sz##_t)val;\
+ }
#define mem_put_se_aligned_generic(end,sz) \
- static void mem_put_##end##sz##_aligned(void *vmem, MEM_VALUE_T val) {\
- uint##sz##_t *mem = (uint##sz##_t *)vmem, raw;\
- swap_endian_##sz(raw,val);\
- *mem = (uint##sz##_t)raw;\
- }
+ static void mem_put_##end##sz##_aligned(void *vmem, MEM_VALUE_T val) {\
+ uint##sz##_t *mem = (uint##sz##_t *)vmem, raw;\
+ swap_endian_##sz(raw,val);\
+ *mem = (uint##sz##_t)raw;\
+ }
#include "vpx_config.h"
#if CONFIG_BIG_ENDIAN
diff --git a/libvpx/vpx_ports/vpx_once.h b/libvpx/vpx_ports/vpx_once.h
new file mode 100644
index 0000000..16a735c
--- /dev/null
+++ b/libvpx/vpx_ports/vpx_once.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "vpx_config.h"
+
+#if CONFIG_MULTITHREAD && defined(_WIN32)
+#include <windows.h>
+#include <stdlib.h>
+static void once(void (*func)(void))
+{
+ static CRITICAL_SECTION *lock;
+ static LONG waiters;
+ static int done;
+ void *lock_ptr = &lock;
+
+ /* If the initialization is complete, return early. This isn't just an
+ * optimization, it prevents races on the destruction of the global
+ * lock.
+ */
+ if(done)
+ return;
+
+ InterlockedIncrement(&waiters);
+
+ /* Get a lock. We create one and try to make it the one-true-lock,
+ * throwing it away if we lost the race.
+ */
+
+ {
+ /* Scope to protect access to new_lock */
+ CRITICAL_SECTION *new_lock = malloc(sizeof(CRITICAL_SECTION));
+ InitializeCriticalSection(new_lock);
+ if (InterlockedCompareExchangePointer(lock_ptr, new_lock, NULL) != NULL)
+ {
+ DeleteCriticalSection(new_lock);
+ free(new_lock);
+ }
+ }
+
+ /* At this point, we have a lock that can be synchronized on. We don't
+ * care which thread actually performed the allocation.
+ */
+
+ EnterCriticalSection(lock);
+
+ if (!done)
+ {
+ func();
+ done = 1;
+ }
+
+ LeaveCriticalSection(lock);
+
+ /* Last one out should free resources. The destructed objects are
+ * protected by checking if(done) above.
+ */
+ if(!InterlockedDecrement(&waiters))
+ {
+ DeleteCriticalSection(lock);
+ free(lock);
+ lock = NULL;
+ }
+}
+
+
+#elif CONFIG_MULTITHREAD && HAVE_PTHREAD_H
+#include <pthread.h>
+static void once(void (*func)(void))
+{
+ static pthread_once_t lock = PTHREAD_ONCE_INIT;
+ pthread_once(&lock, func);
+}
+
+
+#else
+/* No-op version that performs no synchronization. vp8_rtcd() is idempotent,
+ * so as long as your platform provides atomic loads/stores of pointers
+ * no synchronization is strictly necessary.
+ */
+
+static void once(void (*func)(void))
+{
+ static int done;
+
+ if(!done)
+ {
+ func();
+ done = 1;
+ }
+}
+#endif
diff --git a/libvpx/vpx_ports/vpx_timer.h b/libvpx/vpx_ports/vpx_timer.h
index d07e086..cdad9ef 100644
--- a/libvpx/vpx_ports/vpx_timer.h
+++ b/libvpx/vpx_ports/vpx_timer.h
@@ -32,65 +32,61 @@
/* timersub is not provided by msys at this time. */
#ifndef timersub
#define timersub(a, b, result) \
- do { \
- (result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \
- (result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \
- if ((result)->tv_usec < 0) { \
- --(result)->tv_sec; \
- (result)->tv_usec += 1000000; \
- } \
- } while (0)
+ do { \
+ (result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \
+ (result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \
+ if ((result)->tv_usec < 0) { \
+ --(result)->tv_sec; \
+ (result)->tv_usec += 1000000; \
+ } \
+ } while (0)
#endif
#endif
-struct vpx_usec_timer
-{
+struct vpx_usec_timer {
#if defined(_WIN32)
- LARGE_INTEGER begin, end;
+ LARGE_INTEGER begin, end;
#else
- struct timeval begin, end;
+ struct timeval begin, end;
#endif
};
static void
-vpx_usec_timer_start(struct vpx_usec_timer *t)
-{
+vpx_usec_timer_start(struct vpx_usec_timer *t) {
#if defined(_WIN32)
- QueryPerformanceCounter(&t->begin);
+ QueryPerformanceCounter(&t->begin);
#else
- gettimeofday(&t->begin, NULL);
+ gettimeofday(&t->begin, NULL);
#endif
}
static void
-vpx_usec_timer_mark(struct vpx_usec_timer *t)
-{
+vpx_usec_timer_mark(struct vpx_usec_timer *t) {
#if defined(_WIN32)
- QueryPerformanceCounter(&t->end);
+ QueryPerformanceCounter(&t->end);
#else
- gettimeofday(&t->end, NULL);
+ gettimeofday(&t->end, NULL);
#endif
}
static int64_t
-vpx_usec_timer_elapsed(struct vpx_usec_timer *t)
-{
+vpx_usec_timer_elapsed(struct vpx_usec_timer *t) {
#if defined(_WIN32)
- LARGE_INTEGER freq, diff;
+ LARGE_INTEGER freq, diff;
- diff.QuadPart = t->end.QuadPart - t->begin.QuadPart;
+ diff.QuadPart = t->end.QuadPart - t->begin.QuadPart;
- QueryPerformanceFrequency(&freq);
- return diff.QuadPart * 1000000 / freq.QuadPart;
+ QueryPerformanceFrequency(&freq);
+ return diff.QuadPart * 1000000 / freq.QuadPart;
#else
- struct timeval diff;
+ struct timeval diff;
- timersub(&t->end, &t->begin, &diff);
- return diff.tv_sec * 1000000 + diff.tv_usec;
+ timersub(&t->end, &t->begin, &diff);
+ return diff.tv_sec * 1000000 + diff.tv_usec;
#endif
}
@@ -101,9 +97,8 @@ vpx_usec_timer_elapsed(struct vpx_usec_timer *t)
#define timersub(a, b, result)
#endif
-struct vpx_usec_timer
-{
- void *dummy;
+struct vpx_usec_timer {
+ void *dummy;
};
static void
@@ -113,7 +108,9 @@ static void
vpx_usec_timer_mark(struct vpx_usec_timer *t) { }
static long
-vpx_usec_timer_elapsed(struct vpx_usec_timer *t) { return 0; }
+vpx_usec_timer_elapsed(struct vpx_usec_timer *t) {
+ return 0;
+}
#endif /* CONFIG_OS_SUPPORT */
diff --git a/libvpx/vpx_ports/vpxtypes.h b/libvpx/vpx_ports/vpxtypes.h
deleted file mode 100644
index f2fb089..0000000
--- a/libvpx/vpx_ports/vpxtypes.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-
-#ifndef __VPXTYPES_H__
-#define __VPXTYPES_H__
-
-#include "vpx_config.h"
-
-//#include <sys/types.h>
-#ifdef _MSC_VER
-# include <basetsd.h>
-typedef SSIZE_T ssize_t;
-#endif
-
-#if defined(HAVE_STDINT_H) && HAVE_STDINT_H
-/* C99 types are preferred to vpx integer types */
-# include <stdint.h>
-#endif
-
-/*!\defgroup basetypes Base Types
- @{*/
-#if !defined(HAVE_STDINT_H) && !defined(INT_T_DEFINED)
-# ifdef STRICTTYPES
-typedef signed char int8_t;
-typedef signed short int16_t;
-typedef signed int int32_t;
-# else
-typedef char int8_t;
-typedef short int16_t;
-typedef int int32_t;
-# endif
-typedef unsigned char uint8_t;
-typedef unsigned short uint16_t;
-typedef unsigned int uint32_t;
-#endif
-
-typedef int8_t vpxs8;
-typedef uint8_t vpxu8;
-typedef int16_t vpxs16;
-typedef uint16_t vpxu16;
-typedef int32_t vpxs32;
-typedef uint32_t vpxu32;
-typedef int32_t vpxbool;
-
-enum {vpxfalse, vpxtrue};
-
-/*!\def OTC
- \brief a macro suitable for declaring a constant #vpxtc*/
-/*!\def VPXTC
- \brief printf format string suitable for printing an #vpxtc*/
-#ifdef UNICODE
-# ifdef NO_WCHAR
-# error "no non-wchar support added yet"
-# else
-# include <wchar.h>
-typedef wchar_t vpxtc;
-# define OTC(str) L ## str
-# define VPXTC "ls"
-# endif /*NO_WCHAR*/
-#else
-typedef char vpxtc;
-# define OTC(str) (vpxtc*)str
-# define VPXTC "s"
-#endif /*UNICODE*/
-/*@} end - base types*/
-
-/*!\addtogroup basetypes
- @{*/
-/*!\def VPX64
- \brief printf format string suitable for printing an #vpxs64*/
-#if defined(HAVE_STDINT_H)
-# define VPX64 PRId64
-typedef int64_t vpxs64;
-#elif defined(HASLONGLONG)
-# undef PRId64
-# define PRId64 "lld"
-# define VPX64 PRId64
-typedef long long vpxs64;
-#elif defined(WIN32) || defined(_WIN32_WCE)
-# undef PRId64
-# define PRId64 "I64d"
-# define VPX64 PRId64
-typedef __int64 vpxs64;
-typedef unsigned __int64 vpxu64;
-#elif defined(__uClinux__) && defined(CHIP_DM642)
-# include <lddk.h>
-# undef PRId64
-# define PRId64 "lld"
-# define VPX64 PRId64
-typedef long vpxs64;
-#else
-# error "64 bit integer type undefined for this platform!"
-#endif
-#if !defined(HAVE_STDINT_H) && !defined(INT_T_DEFINED)
-typedef vpxs64 int64_t;
-typedef vpxu64 uint64_t;
-#endif
-/*!@} end - base types*/
-
-/*!\ingroup basetypes
- \brief Common return type*/
-typedef enum
-{
- VPX_NOT_FOUND = -404,
- VPX_BUFFER_EMPTY = -202,
- VPX_BUFFER_FULL = -201,
-
- VPX_CONNREFUSED = -102,
- VPX_TIMEDOUT = -101,
- VPX_WOULDBLOCK = -100,
-
- VPX_NET_ERROR = -9,
- VPX_INVALID_VERSION = -8,
- VPX_INPROGRESS = -7,
- VPX_NOT_SUPP = -6,
- VPX_NO_MEM = -3,
- VPX_INVALID_PARAMS = -2,
- VPX_ERROR = -1,
- VPX_OK = 0,
- VPX_DONE = 1
-} vpxsc;
-
-#if defined(WIN32) || defined(_WIN32_WCE)
-# define DLLIMPORT __declspec(dllimport)
-# define DLLEXPORT __declspec(dllexport)
-# define DLLLOCAL
-#elif defined(LINUX)
-# define DLLIMPORT
-/*visibility attribute support is available in 3.4 and later.
- see: http://gcc.gnu.org/wiki/Visibility for more info*/
-# if defined(__GNUC__) && ((__GNUC__<<16|(__GNUC_MINOR__&0xff)) >= (3<<16|4))
-# define GCC_HASCLASSVISIBILITY
-# endif /*defined(__GNUC__) && __GNUC_PREREQ(3,4)*/
-# ifdef GCC_HASCLASSVISIBILITY
-# define DLLEXPORT __attribute__ ((visibility("default")))
-# define DLLLOCAL __attribute__ ((visibility("hidden")))
-# else
-# define DLLEXPORT
-# define DLLLOCAL
-# endif /*GCC_HASCLASSVISIBILITY*/
-#endif /*platform ifdefs*/
-
-#endif /*__VPXTYPES_H__*/
-
-#undef VPXAPI
-/*!\def VPXAPI
- \brief library calling convention/storage class attributes.
-
- Specifies whether the function is imported through a dll
- or is from a static library.*/
-#ifdef VPXDLL
-# ifdef VPXDLLEXPORT
-# define VPXAPI DLLEXPORT
-# else
-# define VPXAPI DLLIMPORT
-# endif /*VPXDLLEXPORT*/
-#else
-# define VPXAPI
-#endif /*VPXDLL*/
diff --git a/libvpx/vpx_ports/x86.h b/libvpx/vpx_ports/x86.h
index 9dd8c4b..b009c35 100644
--- a/libvpx/vpx_ports/x86.h
+++ b/libvpx/vpx_ports/x86.h
@@ -14,80 +14,79 @@
#include <stdlib.h>
#include "vpx_config.h"
-typedef enum
-{
- VPX_CPU_UNKNOWN = -1,
- VPX_CPU_AMD,
- VPX_CPU_AMD_OLD,
- VPX_CPU_CENTAUR,
- VPX_CPU_CYRIX,
- VPX_CPU_INTEL,
- VPX_CPU_NEXGEN,
- VPX_CPU_NSC,
- VPX_CPU_RISE,
- VPX_CPU_SIS,
- VPX_CPU_TRANSMETA,
- VPX_CPU_TRANSMETA_OLD,
- VPX_CPU_UMC,
- VPX_CPU_VIA,
-
- VPX_CPU_LAST
+typedef enum {
+ VPX_CPU_UNKNOWN = -1,
+ VPX_CPU_AMD,
+ VPX_CPU_AMD_OLD,
+ VPX_CPU_CENTAUR,
+ VPX_CPU_CYRIX,
+ VPX_CPU_INTEL,
+ VPX_CPU_NEXGEN,
+ VPX_CPU_NSC,
+ VPX_CPU_RISE,
+ VPX_CPU_SIS,
+ VPX_CPU_TRANSMETA,
+ VPX_CPU_TRANSMETA_OLD,
+ VPX_CPU_UMC,
+ VPX_CPU_VIA,
+
+ VPX_CPU_LAST
} vpx_cpu_t;
-#if defined(__GNUC__) && __GNUC__
+#if defined(__GNUC__) && __GNUC__ || defined(__ANDROID__)
#if ARCH_X86_64
#define cpuid(func,ax,bx,cx,dx)\
- __asm__ __volatile__ (\
- "cpuid \n\t" \
- : "=a" (ax), "=b" (bx), "=c" (cx), "=d" (dx) \
- : "a" (func));
+ __asm__ __volatile__ (\
+ "cpuid \n\t" \
+ : "=a" (ax), "=b" (bx), "=c" (cx), "=d" (dx) \
+ : "a" (func));
#else
#define cpuid(func,ax,bx,cx,dx)\
- __asm__ __volatile__ (\
- "mov %%ebx, %%edi \n\t" \
- "cpuid \n\t" \
- "xchg %%edi, %%ebx \n\t" \
- : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
- : "a" (func));
+ __asm__ __volatile__ (\
+ "mov %%ebx, %%edi \n\t" \
+ "cpuid \n\t" \
+ "xchg %%edi, %%ebx \n\t" \
+ : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
+ : "a" (func));
#endif
-#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC) /* end __GNUC__ or __ANDROID__*/
#if ARCH_X86_64
#define cpuid(func,ax,bx,cx,dx)\
- asm volatile (\
- "xchg %rsi, %rbx \n\t" \
- "cpuid \n\t" \
- "movl %ebx, %edi \n\t" \
- "xchg %rsi, %rbx \n\t" \
- : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
- : "a" (func));
+ asm volatile (\
+ "xchg %rsi, %rbx \n\t" \
+ "cpuid \n\t" \
+ "movl %ebx, %edi \n\t" \
+ "xchg %rsi, %rbx \n\t" \
+ : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
+ : "a" (func));
#else
#define cpuid(func,ax,bx,cx,dx)\
- asm volatile (\
- "pushl %ebx \n\t" \
- "cpuid \n\t" \
- "movl %ebx, %edi \n\t" \
- "popl %ebx \n\t" \
- : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
- : "a" (func));
+ asm volatile (\
+ "pushl %ebx \n\t" \
+ "cpuid \n\t" \
+ "movl %ebx, %edi \n\t" \
+ "popl %ebx \n\t" \
+ : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
+ : "a" (func));
#endif
-#else
+#else /* end __SUNPRO__ */
#if ARCH_X86_64
void __cpuid(int CPUInfo[4], int info_type);
#pragma intrinsic(__cpuid)
#define cpuid(func,a,b,c,d) do{\
- int regs[4];\
- __cpuid(regs,func); a=regs[0]; b=regs[1]; c=regs[2]; d=regs[3];\
- } while(0)
+ int regs[4];\
+ __cpuid(regs,func); a=regs[0]; b=regs[1]; c=regs[2]; d=regs[3];\
+ } while(0)
#else
#define cpuid(func,a,b,c,d)\
- __asm mov eax, func\
- __asm cpuid\
- __asm mov a, eax\
- __asm mov b, ebx\
- __asm mov c, ecx\
- __asm mov d, edx
-#endif
+ __asm mov eax, func\
+ __asm cpuid\
+ __asm mov a, eax\
+ __asm mov b, ebx\
+ __asm mov c, ecx\
+ __asm mov d, edx
#endif
+#endif /* end others */
#define HAS_MMX 0x01
#define HAS_SSE 0x02
@@ -100,47 +99,46 @@ void __cpuid(int CPUInfo[4], int info_type);
#endif
static int
-x86_simd_caps(void)
-{
- unsigned int flags = 0;
- unsigned int mask = ~0;
- unsigned int reg_eax, reg_ebx, reg_ecx, reg_edx;
- char *env;
- (void)reg_ebx;
+x86_simd_caps(void) {
+ unsigned int flags = 0;
+ unsigned int mask = ~0;
+ unsigned int reg_eax, reg_ebx, reg_ecx, reg_edx;
+ char *env;
+ (void)reg_ebx;
- /* See if the CPU capabilities are being overridden by the environment */
- env = getenv("VPX_SIMD_CAPS");
+ /* See if the CPU capabilities are being overridden by the environment */
+ env = getenv("VPX_SIMD_CAPS");
- if (env && *env)
- return (int)strtol(env, NULL, 0);
+ if (env && *env)
+ return (int)strtol(env, NULL, 0);
- env = getenv("VPX_SIMD_CAPS_MASK");
+ env = getenv("VPX_SIMD_CAPS_MASK");
- if (env && *env)
- mask = strtol(env, NULL, 0);
+ if (env && *env)
+ mask = strtol(env, NULL, 0);
- /* Ensure that the CPUID instruction supports extended features */
- cpuid(0, reg_eax, reg_ebx, reg_ecx, reg_edx);
+ /* Ensure that the CPUID instruction supports extended features */
+ cpuid(0, reg_eax, reg_ebx, reg_ecx, reg_edx);
- if (reg_eax < 1)
- return 0;
+ if (reg_eax < 1)
+ return 0;
- /* Get the standard feature flags */
- cpuid(1, reg_eax, reg_ebx, reg_ecx, reg_edx);
+ /* Get the standard feature flags */
+ cpuid(1, reg_eax, reg_ebx, reg_ecx, reg_edx);
- if (reg_edx & BIT(23)) flags |= HAS_MMX;
+ if (reg_edx & BIT(23)) flags |= HAS_MMX;
- if (reg_edx & BIT(25)) flags |= HAS_SSE; /* aka xmm */
+ if (reg_edx & BIT(25)) flags |= HAS_SSE; /* aka xmm */
- if (reg_edx & BIT(26)) flags |= HAS_SSE2; /* aka wmt */
+ if (reg_edx & BIT(26)) flags |= HAS_SSE2; /* aka wmt */
- if (reg_ecx & BIT(0)) flags |= HAS_SSE3;
+ if (reg_ecx & BIT(0)) flags |= HAS_SSE3;
- if (reg_ecx & BIT(9)) flags |= HAS_SSSE3;
+ if (reg_ecx & BIT(9)) flags |= HAS_SSSE3;
- if (reg_ecx & BIT(19)) flags |= HAS_SSE4_1;
+ if (reg_ecx & BIT(19)) flags |= HAS_SSE4_1;
- return flags & mask;
+ return flags & mask;
}
vpx_cpu_t vpx_x86_vendor(void);
@@ -150,21 +148,20 @@ unsigned __int64 __rdtsc(void);
#pragma intrinsic(__rdtsc)
#endif
static unsigned int
-x86_readtsc(void)
-{
+x86_readtsc(void) {
#if defined(__GNUC__) && __GNUC__
- unsigned int tsc;
- __asm__ __volatile__("rdtsc\n\t":"=a"(tsc):);
- return tsc;
+ unsigned int tsc;
+ __asm__ __volatile__("rdtsc\n\t":"=a"(tsc):);
+ return tsc;
#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
- unsigned int tsc;
- asm volatile("rdtsc\n\t":"=a"(tsc):);
- return tsc;
+ unsigned int tsc;
+ asm volatile("rdtsc\n\t":"=a"(tsc):);
+ return tsc;
#else
#if ARCH_X86_64
- return (unsigned int)__rdtsc();
+ return (unsigned int)__rdtsc();
#else
- __asm rdtsc;
+ __asm rdtsc;
#endif
#endif
}
@@ -172,45 +169,41 @@ x86_readtsc(void)
#if defined(__GNUC__) && __GNUC__
#define x86_pause_hint()\
- __asm__ __volatile__ ("pause \n\t")
+ __asm__ __volatile__ ("pause \n\t")
#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
#define x86_pause_hint()\
- asm volatile ("pause \n\t")
+ asm volatile ("pause \n\t")
#else
#if ARCH_X86_64
#define x86_pause_hint()\
- _mm_pause();
+ _mm_pause();
#else
#define x86_pause_hint()\
- __asm pause
+ __asm pause
#endif
#endif
#if defined(__GNUC__) && __GNUC__
static void
-x87_set_control_word(unsigned short mode)
-{
- __asm__ __volatile__("fldcw %0" : : "m"(*&mode));
+x87_set_control_word(unsigned short mode) {
+ __asm__ __volatile__("fldcw %0" : : "m"(*&mode));
}
static unsigned short
-x87_get_control_word(void)
-{
- unsigned short mode;
- __asm__ __volatile__("fstcw %0\n\t":"=m"(*&mode):);
+x87_get_control_word(void) {
+ unsigned short mode;
+ __asm__ __volatile__("fstcw %0\n\t":"=m"(*&mode):);
return mode;
}
#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
static void
-x87_set_control_word(unsigned short mode)
-{
- asm volatile("fldcw %0" : : "m"(*&mode));
+x87_set_control_word(unsigned short mode) {
+ asm volatile("fldcw %0" : : "m"(*&mode));
}
static unsigned short
-x87_get_control_word(void)
-{
- unsigned short mode;
- asm volatile("fstcw %0\n\t":"=m"(*&mode):);
- return mode;
+x87_get_control_word(void) {
+ unsigned short mode;
+ asm volatile("fstcw %0\n\t":"=m"(*&mode):);
+ return mode;
}
#elif ARCH_X86_64
/* No fldcw intrinsics on Windows x64, punt to external asm */
@@ -220,25 +213,22 @@ extern unsigned short vpx_winx64_fstcw(void);
#define x87_get_control_word vpx_winx64_fstcw
#else
static void
-x87_set_control_word(unsigned short mode)
-{
- __asm { fldcw mode }
+x87_set_control_word(unsigned short mode) {
+ __asm { fldcw mode }
}
static unsigned short
-x87_get_control_word(void)
-{
- unsigned short mode;
- __asm { fstcw mode }
- return mode;
+x87_get_control_word(void) {
+ unsigned short mode;
+ __asm { fstcw mode }
+ return mode;
}
#endif
static unsigned short
-x87_set_double_precision(void)
-{
- unsigned short mode = x87_get_control_word();
- x87_set_control_word((mode&~0x300) | 0x200);
- return mode;
+x87_set_double_precision(void) {
+ unsigned short mode = x87_get_control_word();
+ x87_set_control_word((mode&~0x300) | 0x200);
+ return mode;
}
diff --git a/libvpx/vpx_ports/x86_abi_support.asm b/libvpx/vpx_ports/x86_abi_support.asm
index 0c9fe37..eccbfa3 100644
--- a/libvpx/vpx_ports/x86_abi_support.asm
+++ b/libvpx/vpx_ports/x86_abi_support.asm
@@ -78,6 +78,17 @@
%endif
+; LIBVPX_YASM_WIN64
+; Set LIBVPX_YASM_WIN64 if output is Windows 64bit so the code will work if x64
+; or win64 is defined on the Yasm command line.
+%ifidn __OUTPUT_FORMAT__,win64
+%define LIBVPX_YASM_WIN64 1
+%elifidn __OUTPUT_FORMAT__,x64
+%define LIBVPX_YASM_WIN64 1
+%else
+%define LIBVPX_YASM_WIN64 0
+%endif
+
; sym()
; Return the proper symbol name for the target ABI.
;
@@ -90,7 +101,7 @@
%define sym(x) x
%elifidn __OUTPUT_FORMAT__,elfx32
%define sym(x) x
-%elifidn __OUTPUT_FORMAT__,x64
+%elif LIBVPX_YASM_WIN64
%define sym(x) x
%else
%define sym(x) _ %+ x
@@ -114,7 +125,7 @@
%define PRIVATE :hidden
%elifidn __OUTPUT_FORMAT__,elfx32
%define PRIVATE :hidden
- %elifidn __OUTPUT_FORMAT__,x64
+ %elif LIBVPX_YASM_WIN64
%define PRIVATE
%else
%define PRIVATE :private_extern
@@ -131,7 +142,7 @@
%else
; 64 bit ABI passes arguments in registers. This is a workaround to get up
; and running quickly. Relies on SHADOW_ARGS_TO_STACK
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
%define arg(x) [rbp+16+8*x]
%else
%define arg(x) [rbp-8-8*x]
@@ -230,6 +241,12 @@
%elifidn __OUTPUT_FORMAT__,elfx32
%define WRT_PLT wrt ..plt
%define HIDDEN_DATA(x) x:data hidden
+ %elifidn __OUTPUT_FORMAT__,macho64
+ %ifdef CHROMIUM
+ %define HIDDEN_DATA(x) x:private_extern
+ %else
+ %define HIDDEN_DATA(x) x
+ %endif
%else
%define HIDDEN_DATA(x) x
%endif
@@ -251,7 +268,7 @@
%endm
%define UNSHADOW_ARGS
%else
-%ifidn __OUTPUT_FORMAT__,x64
+%if LIBVPX_YASM_WIN64
%macro SHADOW_ARGS_TO_STACK 1 ; argc
%if %1 > 0
mov arg(0),rcx
@@ -307,7 +324,7 @@
; Win64 ABI requires 16 byte stack alignment, but then pushes an 8 byte return
; value. Typically we follow this up with 'push rbp' - re-aligning the stack -
; but in some cases this is not done and unaligned movs must be used.
-%ifidn __OUTPUT_FORMAT__,x64
+%if LIBVPX_YASM_WIN64
%macro SAVE_XMM 1-2 a
%if %1 < 6
%error Only xmm registers 6-15 must be preserved
diff --git a/libvpx/vpx_ports/x86_cpuid.c b/libvpx/vpx_ports/x86_cpuid.c
index ce64033..fe86cfc 100644
--- a/libvpx/vpx_ports/x86_cpuid.c
+++ b/libvpx/vpx_ports/x86_cpuid.c
@@ -11,43 +11,39 @@
#include <string.h>
#include "x86.h"
-struct cpuid_vendors
-{
- char vendor_string[12];
- vpx_cpu_t vendor_id;
+struct cpuid_vendors {
+ char vendor_string[12];
+ vpx_cpu_t vendor_id;
};
-static struct cpuid_vendors cpuid_vendor_list[VPX_CPU_LAST] =
-{
- { "AuthenticAMD", VPX_CPU_AMD },
- { "AMDisbetter!", VPX_CPU_AMD_OLD },
- { "CentaurHauls", VPX_CPU_CENTAUR },
- { "CyrixInstead", VPX_CPU_CYRIX },
- { "GenuineIntel", VPX_CPU_INTEL },
- { "NexGenDriven", VPX_CPU_NEXGEN },
- { "Geode by NSC", VPX_CPU_NSC },
- { "RiseRiseRise", VPX_CPU_RISE },
- { "SiS SiS SiS ", VPX_CPU_SIS },
- { "GenuineTMx86", VPX_CPU_TRANSMETA },
- { "TransmetaCPU", VPX_CPU_TRANSMETA_OLD },
- { "UMC UMC UMC ", VPX_CPU_UMC },
- { "VIA VIA VIA ", VPX_CPU_VIA },
+static struct cpuid_vendors cpuid_vendor_list[VPX_CPU_LAST] = {
+ { "AuthenticAMD", VPX_CPU_AMD },
+ { "AMDisbetter!", VPX_CPU_AMD_OLD },
+ { "CentaurHauls", VPX_CPU_CENTAUR },
+ { "CyrixInstead", VPX_CPU_CYRIX },
+ { "GenuineIntel", VPX_CPU_INTEL },
+ { "NexGenDriven", VPX_CPU_NEXGEN },
+ { "Geode by NSC", VPX_CPU_NSC },
+ { "RiseRiseRise", VPX_CPU_RISE },
+ { "SiS SiS SiS ", VPX_CPU_SIS },
+ { "GenuineTMx86", VPX_CPU_TRANSMETA },
+ { "TransmetaCPU", VPX_CPU_TRANSMETA_OLD },
+ { "UMC UMC UMC ", VPX_CPU_UMC },
+ { "VIA VIA VIA ", VPX_CPU_VIA },
};
-vpx_cpu_t vpx_x86_vendor(void)
-{
- unsigned int reg_eax;
- unsigned int vs[3];
- int i;
+vpx_cpu_t vpx_x86_vendor(void) {
+ unsigned int reg_eax;
+ unsigned int vs[3];
+ int i;
- /* Get the Vendor String from the CPU */
- cpuid(0, reg_eax, vs[0], vs[2], vs[1]);
+ /* Get the Vendor String from the CPU */
+ cpuid(0, reg_eax, vs[0], vs[2], vs[1]);
- for (i = 0; i < VPX_CPU_LAST; i++)
- {
- if (strncmp ((const char *)vs, cpuid_vendor_list[i].vendor_string, 12) == 0)
- return (cpuid_vendor_list[i].vendor_id);
- }
+ for (i = 0; i < VPX_CPU_LAST; i++) {
+ if (strncmp((const char *)vs, cpuid_vendor_list[i].vendor_string, 12) == 0)
+ return (cpuid_vendor_list[i].vendor_id);
+ }
- return VPX_CPU_UNKNOWN;
+ return VPX_CPU_UNKNOWN;
}
diff --git a/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copy_y_neon.asm b/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copy_y_neon.asm
index 9189641..cc1789a 100644
--- a/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copy_y_neon.asm
+++ b/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copy_y_neon.asm
@@ -15,7 +15,7 @@
REQUIRE8
PRESERVE8
- INCLUDE asm_com_offsets.asm
+ INCLUDE vpx_scale_asm_offsets.asm
AREA ||.text||, CODE, READONLY, ALIGN=2
diff --git a/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copyframe_func_neon.asm b/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copyframe_func_neon.asm
index e55d076..3f17883 100644
--- a/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copyframe_func_neon.asm
+++ b/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copyframe_func_neon.asm
@@ -14,7 +14,7 @@
REQUIRE8
PRESERVE8
- INCLUDE asm_com_offsets.asm
+ INCLUDE vpx_scale_asm_offsets.asm
AREA ||.text||, CODE, READONLY, ALIGN=2
diff --git a/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copysrcframe_func_neon.asm b/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copysrcframe_func_neon.asm
index ec64dbc..d452ad2 100644
--- a/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copysrcframe_func_neon.asm
+++ b/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copysrcframe_func_neon.asm
@@ -14,7 +14,7 @@
REQUIRE8
PRESERVE8
- INCLUDE asm_com_offsets.asm
+ INCLUDE vpx_scale_asm_offsets.asm
AREA ||.text||, CODE, READONLY, ALIGN=2
;Note: This function is used to copy source data in src_buffer[i] at beginning of
diff --git a/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_extendframeborders_neon.asm b/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_extendframeborders_neon.asm
index ebc4242..b2eb9eb 100644
--- a/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_extendframeborders_neon.asm
+++ b/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_extendframeborders_neon.asm
@@ -14,7 +14,7 @@
REQUIRE8
PRESERVE8
- INCLUDE asm_com_offsets.asm
+ INCLUDE vpx_scale_asm_offsets.asm
AREA ||.text||, CODE, READONLY, ALIGN=2
;void vp8_yv12_extend_frame_borders_neon (YV12_BUFFER_CONFIG *ybf);
diff --git a/libvpx/vpx_scale/arm/neon/yv12extend_arm.c b/libvpx/vpx_scale/arm/neon/yv12extend_arm.c
index eabd495..4535b8f 100644
--- a/libvpx/vpx_scale/arm/neon/yv12extend_arm.c
+++ b/libvpx/vpx_scale/arm/neon/yv12extend_arm.c
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_rtcd.h"
+#include "./vpx_scale_rtcd.h"
extern void vp8_yv12_copy_frame_func_neon(struct yv12_buffer_config *src_ybc,
struct yv12_buffer_config *dst_ybc);
diff --git a/libvpx/vpx_scale/generic/bicubic_scaler.c b/libvpx/vpx_scale/generic/bicubic_scaler.c
deleted file mode 100644
index c116740..0000000
--- a/libvpx/vpx_scale/generic/bicubic_scaler.c
+++ /dev/null
@@ -1,569 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-
-#include <float.h>
-#include <math.h>
-#include <stdio.h>
-#include "vpx_mem/vpx_mem.h"
-#include "vpxscale_arbitrary.h"
-
-#define FIXED_POINT
-
-#define MAX_IN_WIDTH 800
-#define MAX_IN_HEIGHT 600
-#define MAX_OUT_WIDTH 800
-#define MAX_OUT_HEIGHT 600
-#define MAX_OUT_DIMENSION ((MAX_OUT_WIDTH > MAX_OUT_HEIGHT) ? \
- MAX_OUT_WIDTH : MAX_OUT_HEIGHT)
-
-BICUBIC_SCALER_STRUCT g_b_scaler;
-static int g_first_time = 1;
-
-#pragma DATA_SECTION(g_hbuf, "VP6_HEAP")
-#pragma DATA_ALIGN (g_hbuf, 32);
-unsigned char g_hbuf[MAX_OUT_DIMENSION];
-
-#pragma DATA_SECTION(g_hbuf_uv, "VP6_HEAP")
-#pragma DATA_ALIGN (g_hbuf_uv, 32);
-unsigned char g_hbuf_uv[MAX_OUT_DIMENSION];
-
-
-#ifdef FIXED_POINT
-static int a_i = 0.6 * 65536;
-#else
-static float a = -0.6;
-#endif
-
-#ifdef FIXED_POINT
-// 3 2
-// C0 = a*t - a*t
-//
-static short c0_fixed(unsigned int t) {
- // put t in Q16 notation
- unsigned short v1, v2;
-
- // Q16
- v1 = (a_i * t) >> 16;
- v1 = (v1 * t) >> 16;
-
- // Q16
- v2 = (a_i * t) >> 16;
- v2 = (v2 * t) >> 16;
- v2 = (v2 * t) >> 16;
-
- // Q12
- return -((v1 - v2) >> 4);
-}
-
-// 2 3
-// C1 = a*t + (3-2*a)*t - (2-a)*t
-//
-static short c1_fixed(unsigned int t) {
- unsigned short v1, v2, v3;
- unsigned short two, three;
-
- // Q16
- v1 = (a_i * t) >> 16;
-
- // Q13
- two = 2 << 13;
- v2 = two - (a_i >> 3);
- v2 = (v2 * t) >> 16;
- v2 = (v2 * t) >> 16;
- v2 = (v2 * t) >> 16;
-
- // Q13
- three = 3 << 13;
- v3 = three - (2 * (a_i >> 3));
- v3 = (v3 * t) >> 16;
- v3 = (v3 * t) >> 16;
-
- // Q12
- return (((v1 >> 3) - v2 + v3) >> 1);
-
-}
-
-// 2 3
-// C2 = 1 - (3-a)*t + (2-a)*t
-//
-static short c2_fixed(unsigned int t) {
- unsigned short v1, v2, v3;
- unsigned short two, three;
-
- // Q13
- v1 = 1 << 13;
-
- // Q13
- three = 3 << 13;
- v2 = three - (a_i >> 3);
- v2 = (v2 * t) >> 16;
- v2 = (v2 * t) >> 16;
-
- // Q13
- two = 2 << 13;
- v3 = two - (a_i >> 3);
- v3 = (v3 * t) >> 16;
- v3 = (v3 * t) >> 16;
- v3 = (v3 * t) >> 16;
-
- // Q12
- return (v1 - v2 + v3) >> 1;
-}
-
-// 2 3
-// C3 = a*t - 2*a*t + a*t
-//
-static short c3_fixed(unsigned int t) {
- int v1, v2, v3;
-
- // Q16
- v1 = (a_i * t) >> 16;
-
- // Q15
- v2 = 2 * (a_i >> 1);
- v2 = (v2 * t) >> 16;
- v2 = (v2 * t) >> 16;
-
- // Q16
- v3 = (a_i * t) >> 16;
- v3 = (v3 * t) >> 16;
- v3 = (v3 * t) >> 16;
-
- // Q12
- return ((v2 - (v1 >> 1) - (v3 >> 1)) >> 3);
-}
-#else
-// 3 2
-// C0 = -a*t + a*t
-//
-float C0(float t) {
- return -a * t * t * t + a * t * t;
-}
-
-// 2 3
-// C1 = -a*t + (2*a+3)*t - (a+2)*t
-//
-float C1(float t) {
- return -(a + 2.0f) * t * t * t + (2.0f * a + 3.0f) * t * t - a * t;
-}
-
-// 2 3
-// C2 = 1 - (a+3)*t + (a+2)*t
-//
-float C2(float t) {
- return (a + 2.0f) * t * t * t - (a + 3.0f) * t * t + 1.0f;
-}
-
-// 2 3
-// C3 = a*t - 2*a*t + a*t
-//
-float C3(float t) {
- return a * t * t * t - 2.0f * a * t * t + a * t;
-}
-#endif
-
-#if 0
-int compare_real_fixed() {
- int i, errors = 0;
- float mult = 1.0 / 10000.0;
- unsigned int fixed_mult = mult * 4294967296;// 65536;
- unsigned int phase_offset_int;
- float phase_offset_real;
-
- for (i = 0; i < 10000; i++) {
- int fixed0, fixed1, fixed2, fixed3, fixed_total;
- int real0, real1, real2, real3, real_total;
-
- phase_offset_real = (float)i * mult;
- phase_offset_int = (fixed_mult * i) >> 16;
-// phase_offset_int = phase_offset_real * 65536;
-
- fixed0 = c0_fixed(phase_offset_int);
- real0 = C0(phase_offset_real) * 4096.0;
-
- if ((abs(fixed0) > (abs(real0) + 1)) || (abs(fixed0) < (abs(real0) - 1)))
- errors++;
-
- fixed1 = c1_fixed(phase_offset_int);
- real1 = C1(phase_offset_real) * 4096.0;
-
- if ((abs(fixed1) > (abs(real1) + 1)) || (abs(fixed1) < (abs(real1) - 1)))
- errors++;
-
- fixed2 = c2_fixed(phase_offset_int);
- real2 = C2(phase_offset_real) * 4096.0;
-
- if ((abs(fixed2) > (abs(real2) + 1)) || (abs(fixed2) < (abs(real2) - 1)))
- errors++;
-
- fixed3 = c3_fixed(phase_offset_int);
- real3 = C3(phase_offset_real) * 4096.0;
-
- if ((abs(fixed3) > (abs(real3) + 1)) || (abs(fixed3) < (abs(real3) - 1)))
- errors++;
-
- fixed_total = fixed0 + fixed1 + fixed2 + fixed3;
- real_total = real0 + real1 + real2 + real3;
-
- if ((fixed_total > 4097) || (fixed_total < 4094))
- errors++;
-
- if ((real_total > 4097) || (real_total < 4095))
- errors++;
- }
-
- return errors;
-}
-#endif
-
-// Find greatest common denominator between two integers. Method used here is
-// slow compared to Euclid's algorithm, but does not require any division.
-int gcd(int a, int b) {
- // Problem with this algorithm is that if a or b = 0 this function
- // will never exit. Don't want to return 0 because any computation
- // that was based on a common denoninator and tried to reduce by
- // dividing by 0 would fail. Best solution that could be thought of
- // would to be fail by returing a 1;
- if (a <= 0 || b <= 0)
- return 1;
-
- while (a != b) {
- if (b > a)
- b = b - a;
- else {
- int tmp = a;// swap large and
- a = b; // small
- b = tmp;
- }
- }
-
- return b;
-}
-
-void bicubic_coefficient_init() {
- vpx_memset(&g_b_scaler, 0, sizeof(BICUBIC_SCALER_STRUCT));
- g_first_time = 0;
-}
-
-void bicubic_coefficient_destroy() {
- if (!g_first_time) {
- vpx_free(g_b_scaler.l_w);
-
- vpx_free(g_b_scaler.l_h);
-
- vpx_free(g_b_scaler.l_h_uv);
-
- vpx_free(g_b_scaler.c_w);
-
- vpx_free(g_b_scaler.c_h);
-
- vpx_free(g_b_scaler.c_h_uv);
-
- vpx_memset(&g_b_scaler, 0, sizeof(BICUBIC_SCALER_STRUCT));
- }
-}
-
-// Create the coeffients that will be used for the cubic interpolation.
-// Because scaling does not have to be equal in the vertical and horizontal
-// regimes the phase offsets will be different. There are 4 coefficents
-// for each point, two on each side. The layout is that there are the
-// 4 coefficents for each phase in the array and then the next phase.
-int bicubic_coefficient_setup(int in_width, int in_height, int out_width, int out_height) {
- int i;
-#ifdef FIXED_POINT
- int phase_offset_int;
- unsigned int fixed_mult;
- int product_val = 0;
-#else
- float phase_offset;
-#endif
- int gcd_w, gcd_h, gcd_h_uv, d_w, d_h, d_h_uv;
-
- if (g_first_time)
- bicubic_coefficient_init();
-
-
- // check to see if the coefficents have already been set up correctly
- if ((in_width == g_b_scaler.in_width) && (in_height == g_b_scaler.in_height)
- && (out_width == g_b_scaler.out_width) && (out_height == g_b_scaler.out_height))
- return 0;
-
- g_b_scaler.in_width = in_width;
- g_b_scaler.in_height = in_height;
- g_b_scaler.out_width = out_width;
- g_b_scaler.out_height = out_height;
-
- // Don't want to allow crazy scaling, just try and prevent a catastrophic
- // failure here. Want to fail after setting the member functions so if
- // if the scaler is called the member functions will not scale.
- if (out_width <= 0 || out_height <= 0)
- return -1;
-
- // reduce in/out width and height ratios using the gcd
- gcd_w = gcd(out_width, in_width);
- gcd_h = gcd(out_height, in_height);
- gcd_h_uv = gcd(out_height, in_height / 2);
-
- // the numerator width and height are to be saved in
- // globals so they can be used during the scaling process
- // without having to be recalculated.
- g_b_scaler.nw = out_width / gcd_w;
- d_w = in_width / gcd_w;
-
- g_b_scaler.nh = out_height / gcd_h;
- d_h = in_height / gcd_h;
-
- g_b_scaler.nh_uv = out_height / gcd_h_uv;
- d_h_uv = (in_height / 2) / gcd_h_uv;
-
- // allocate memory for the coefficents
- vpx_free(g_b_scaler.l_w);
-
- vpx_free(g_b_scaler.l_h);
-
- vpx_free(g_b_scaler.l_h_uv);
-
- g_b_scaler.l_w = (short *)vpx_memalign(32, out_width * 2);
- g_b_scaler.l_h = (short *)vpx_memalign(32, out_height * 2);
- g_b_scaler.l_h_uv = (short *)vpx_memalign(32, out_height * 2);
-
- vpx_free(g_b_scaler.c_w);
-
- vpx_free(g_b_scaler.c_h);
-
- vpx_free(g_b_scaler.c_h_uv);
-
- g_b_scaler.c_w = (short *)vpx_memalign(32, g_b_scaler.nw * 4 * 2);
- g_b_scaler.c_h = (short *)vpx_memalign(32, g_b_scaler.nh * 4 * 2);
- g_b_scaler.c_h_uv = (short *)vpx_memalign(32, g_b_scaler.nh_uv * 4 * 2);
-
- g_b_scaler.hbuf = g_hbuf;
- g_b_scaler.hbuf_uv = g_hbuf_uv;
-
- // Set up polyphase filter taps. This needs to be done before
- // the scaling because of the floating point math required. The
- // coefficients are multiplied by 2^12 so that fixed point math
- // can be used in the main scaling loop.
-#ifdef FIXED_POINT
- fixed_mult = (1.0 / (float)g_b_scaler.nw) * 4294967296;
-
- product_val = 0;
-
- for (i = 0; i < g_b_scaler.nw; i++) {
- if (product_val > g_b_scaler.nw)
- product_val -= g_b_scaler.nw;
-
- phase_offset_int = (fixed_mult * product_val) >> 16;
-
- g_b_scaler.c_w[i * 4] = c3_fixed(phase_offset_int);
- g_b_scaler.c_w[i * 4 + 1] = c2_fixed(phase_offset_int);
- g_b_scaler.c_w[i * 4 + 2] = c1_fixed(phase_offset_int);
- g_b_scaler.c_w[i * 4 + 3] = c0_fixed(phase_offset_int);
-
- product_val += d_w;
- }
-
-
- fixed_mult = (1.0 / (float)g_b_scaler.nh) * 4294967296;
-
- product_val = 0;
-
- for (i = 0; i < g_b_scaler.nh; i++) {
- if (product_val > g_b_scaler.nh)
- product_val -= g_b_scaler.nh;
-
- phase_offset_int = (fixed_mult * product_val) >> 16;
-
- g_b_scaler.c_h[i * 4] = c0_fixed(phase_offset_int);
- g_b_scaler.c_h[i * 4 + 1] = c1_fixed(phase_offset_int);
- g_b_scaler.c_h[i * 4 + 2] = c2_fixed(phase_offset_int);
- g_b_scaler.c_h[i * 4 + 3] = c3_fixed(phase_offset_int);
-
- product_val += d_h;
- }
-
- fixed_mult = (1.0 / (float)g_b_scaler.nh_uv) * 4294967296;
-
- product_val = 0;
-
- for (i = 0; i < g_b_scaler.nh_uv; i++) {
- if (product_val > g_b_scaler.nh_uv)
- product_val -= g_b_scaler.nh_uv;
-
- phase_offset_int = (fixed_mult * product_val) >> 16;
-
- g_b_scaler.c_h_uv[i * 4] = c0_fixed(phase_offset_int);
- g_b_scaler.c_h_uv[i * 4 + 1] = c1_fixed(phase_offset_int);
- g_b_scaler.c_h_uv[i * 4 + 2] = c2_fixed(phase_offset_int);
- g_b_scaler.c_h_uv[i * 4 + 3] = c3_fixed(phase_offset_int);
-
- product_val += d_h_uv;
- }
-
-#else
-
- for (i = 0; i < g_nw; i++) {
- phase_offset = (float)((i * d_w) % g_nw) / (float)g_nw;
- g_c_w[i * 4] = (C3(phase_offset) * 4096.0);
- g_c_w[i * 4 + 1] = (C2(phase_offset) * 4096.0);
- g_c_w[i * 4 + 2] = (C1(phase_offset) * 4096.0);
- g_c_w[i * 4 + 3] = (C0(phase_offset) * 4096.0);
- }
-
- for (i = 0; i < g_nh; i++) {
- phase_offset = (float)((i * d_h) % g_nh) / (float)g_nh;
- g_c_h[i * 4] = (C0(phase_offset) * 4096.0);
- g_c_h[i * 4 + 1] = (C1(phase_offset) * 4096.0);
- g_c_h[i * 4 + 2] = (C2(phase_offset) * 4096.0);
- g_c_h[i * 4 + 3] = (C3(phase_offset) * 4096.0);
- }
-
- for (i = 0; i < g_nh_uv; i++) {
- phase_offset = (float)((i * d_h_uv) % g_nh_uv) / (float)g_nh_uv;
- g_c_h_uv[i * 4] = (C0(phase_offset) * 4096.0);
- g_c_h_uv[i * 4 + 1] = (C1(phase_offset) * 4096.0);
- g_c_h_uv[i * 4 + 2] = (C2(phase_offset) * 4096.0);
- g_c_h_uv[i * 4 + 3] = (C3(phase_offset) * 4096.0);
- }
-
-#endif
-
- // Create an array that corresponds input lines to output lines.
- // This doesn't require floating point math, but it does require
- // a division and because hardware division is not present that
- // is a call.
- for (i = 0; i < out_width; i++) {
- g_b_scaler.l_w[i] = (i * d_w) / g_b_scaler.nw;
-
- if ((g_b_scaler.l_w[i] + 2) <= in_width)
- g_b_scaler.max_usable_out_width = i;
-
- }
-
- for (i = 0; i < out_height + 1; i++) {
- g_b_scaler.l_h[i] = (i * d_h) / g_b_scaler.nh;
- g_b_scaler.l_h_uv[i] = (i * d_h_uv) / g_b_scaler.nh_uv;
- }
-
- return 0;
-}
-
-int bicubic_scale(int in_width, int in_height, int in_stride,
- int out_width, int out_height, int out_stride,
- unsigned char *input_image, unsigned char *output_image) {
- short *RESTRICT l_w, * RESTRICT l_h;
- short *RESTRICT c_w, * RESTRICT c_h;
- unsigned char *RESTRICT ip, * RESTRICT op;
- unsigned char *RESTRICT hbuf;
- int h, w, lw, lh;
- int temp_sum;
- int phase_offset_w, phase_offset_h;
-
- c_w = g_b_scaler.c_w;
- c_h = g_b_scaler.c_h;
-
- op = output_image;
-
- l_w = g_b_scaler.l_w;
- l_h = g_b_scaler.l_h;
-
- phase_offset_h = 0;
-
- for (h = 0; h < out_height; h++) {
- // select the row to work on
- lh = l_h[h];
- ip = input_image + (in_stride * lh);
-
- // vp8_filter the row vertically into an temporary buffer.
- // If the phase offset == 0 then all the multiplication
- // is going to result in the output equalling the input.
- // So instead point the temporary buffer to the input.
- // Also handle the boundry condition of not being able to
- // filter that last lines.
- if (phase_offset_h && (lh < in_height - 2)) {
- hbuf = g_b_scaler.hbuf;
-
- for (w = 0; w < in_width; w++) {
- temp_sum = c_h[phase_offset_h * 4 + 3] * ip[w - in_stride];
- temp_sum += c_h[phase_offset_h * 4 + 2] * ip[w];
- temp_sum += c_h[phase_offset_h * 4 + 1] * ip[w + in_stride];
- temp_sum += c_h[phase_offset_h * 4] * ip[w + 2 * in_stride];
-
- hbuf[w] = temp_sum >> 12;
- }
- } else
- hbuf = ip;
-
- // increase the phase offset for the next time around.
- if (++phase_offset_h >= g_b_scaler.nh)
- phase_offset_h = 0;
-
- // now filter and expand it horizontally into the final
- // output buffer
- phase_offset_w = 0;
-
- for (w = 0; w < out_width; w++) {
- // get the index to use to expand the image
- lw = l_w[w];
-
- temp_sum = c_w[phase_offset_w * 4] * hbuf[lw - 1];
- temp_sum += c_w[phase_offset_w * 4 + 1] * hbuf[lw];
- temp_sum += c_w[phase_offset_w * 4 + 2] * hbuf[lw + 1];
- temp_sum += c_w[phase_offset_w * 4 + 3] * hbuf[lw + 2];
- temp_sum = temp_sum >> 12;
-
- if (++phase_offset_w >= g_b_scaler.nw)
- phase_offset_w = 0;
-
- // boundry conditions
- if ((lw + 2) >= in_width)
- temp_sum = hbuf[lw];
-
- if (lw == 0)
- temp_sum = hbuf[0];
-
- op[w] = temp_sum;
- }
-
- op += out_stride;
- }
-
- return 0;
-}
-
-void bicubic_scale_frame_reset() {
- g_b_scaler.out_width = 0;
- g_b_scaler.out_height = 0;
-}
-
-void bicubic_scale_frame(YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst,
- int new_width, int new_height) {
-
- dst->y_width = new_width;
- dst->y_height = new_height;
- dst->uv_width = new_width / 2;
- dst->uv_height = new_height / 2;
-
- dst->y_stride = dst->y_width;
- dst->uv_stride = dst->uv_width;
-
- bicubic_scale(src->y_width, src->y_height, src->y_stride,
- new_width, new_height, dst->y_stride,
- src->y_buffer, dst->y_buffer);
-
- bicubic_scale(src->uv_width, src->uv_height, src->uv_stride,
- new_width / 2, new_height / 2, dst->uv_stride,
- src->u_buffer, dst->u_buffer);
-
- bicubic_scale(src->uv_width, src->uv_height, src->uv_stride,
- new_width / 2, new_height / 2, dst->uv_stride,
- src->v_buffer, dst->v_buffer);
-}
diff --git a/libvpx/vpx_scale/generic/gen_scalers.c b/libvpx/vpx_scale/generic/gen_scalers.c
index 60c21fb..5f355c5 100644
--- a/libvpx/vpx_scale/generic/gen_scalers.c
+++ b/libvpx/vpx_scale/generic/gen_scalers.c
@@ -9,7 +9,7 @@
*/
-#include "vpx_scale/vpxscale.h"
+#include "vpx_scale/vpx_scale.h"
#include "vpx_mem/vpx_mem.h"
/****************************************************************************
* Imports
@@ -17,688 +17,6 @@
/****************************************************************************
*
- * ROUTINE : vp8_horizontal_line_4_5_scale_c
- *
- * INPUTS : const unsigned char *source : Pointer to source data.
- * unsigned int source_width : Stride of source.
- * unsigned char *dest : Pointer to destination data.
- * unsigned int dest_width : Stride of destination (NOT USED).
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Copies horizontal line of pixels from source to
- * destination scaling up by 4 to 5.
- *
- * SPECIAL NOTES : None.
- *
- ****************************************************************************/
-void vp8_horizontal_line_4_5_scale_c(const unsigned char *source,
- unsigned int source_width,
- unsigned char *dest,
- unsigned int dest_width) {
- unsigned i;
- unsigned int a, b, c;
- unsigned char *des = dest;
- const unsigned char *src = source;
-
- (void) dest_width;
-
- for (i = 0; i < source_width - 4; i += 4) {
- a = src[0];
- b = src[1];
- des [0] = (unsigned char) a;
- des [1] = (unsigned char)((a * 51 + 205 * b + 128) >> 8);
- c = src[2] * 154;
- a = src[3];
- des [2] = (unsigned char)((b * 102 + c + 128) >> 8);
- des [3] = (unsigned char)((c + 102 * a + 128) >> 8);
- b = src[4];
- des [4] = (unsigned char)((a * 205 + 51 * b + 128) >> 8);
-
- src += 4;
- des += 5;
- }
-
- a = src[0];
- b = src[1];
- des [0] = (unsigned char)(a);
- des [1] = (unsigned char)((a * 51 + 205 * b + 128) >> 8);
- c = src[2] * 154;
- a = src[3];
- des [2] = (unsigned char)((b * 102 + c + 128) >> 8);
- des [3] = (unsigned char)((c + 102 * a + 128) >> 8);
- des [4] = (unsigned char)(a);
-
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_vertical_band_4_5_scale_c
- *
- * INPUTS : unsigned char *dest : Pointer to destination data.
- * unsigned int dest_pitch : Stride of destination data.
- * unsigned int dest_width : Width of destination data.
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Scales vertical band of pixels by scale 4 to 5. The
- * height of the band scaled is 4-pixels.
- *
- * SPECIAL NOTES : The routine uses the first line of the band below
- * the current band.
- *
- ****************************************************************************/
-void vp8_vertical_band_4_5_scale_c(unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b, c, d;
- unsigned char *des = dest;
-
- for (i = 0; i < dest_width; i++) {
- a = des [0];
- b = des [dest_pitch];
-
- des[dest_pitch] = (unsigned char)((a * 51 + 205 * b + 128) >> 8);
-
- c = des[dest_pitch * 2] * 154;
- d = des[dest_pitch * 3];
-
- des [dest_pitch * 2] = (unsigned char)((b * 102 + c + 128) >> 8);
- des [dest_pitch * 3] = (unsigned char)((c + 102 * d + 128) >> 8);
-
- /* First line in next band */
- a = des [dest_pitch * 5];
- des [dest_pitch * 4] = (unsigned char)((d * 205 + 51 * a + 128) >> 8);
-
- des++;
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_last_vertical_band_4_5_scale_c
- *
- * INPUTS : unsigned char *dest : Pointer to destination data.
- * unsigned int dest_pitch : Stride of destination data.
- * unsigned int dest_width : Width of destination data.
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Scales last vertical band of pixels by scale 4 to 5. The
- * height of the band scaled is 4-pixels.
- *
- * SPECIAL NOTES : The routine does not have available the first line of
- * the band below the current band, since this is the
- * last band.
- *
- ****************************************************************************/
-void vp8_last_vertical_band_4_5_scale_c(unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b, c, d;
- unsigned char *des = dest;
-
- for (i = 0; i < dest_width; ++i) {
- a = des[0];
- b = des[dest_pitch];
-
- des[dest_pitch] = (unsigned char)((a * 51 + 205 * b + 128) >> 8);
-
- c = des[dest_pitch * 2] * 154;
- d = des[dest_pitch * 3];
-
- des [dest_pitch * 2] = (unsigned char)((b * 102 + c + 128) >> 8);
- des [dest_pitch * 3] = (unsigned char)((c + 102 * d + 128) >> 8);
-
- /* No other line for interplation of this line, so .. */
- des[dest_pitch * 4] = (unsigned char) d;
-
- des++;
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_horizontal_line_2_3_scale_c
- *
- * INPUTS : const unsigned char *source : Pointer to source data.
- * unsigned int source_width : Stride of source.
- * unsigned char *dest : Pointer to destination data.
- * unsigned int dest_width : Stride of destination (NOT USED).
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Copies horizontal line of pixels from source to
- * destination scaling up by 2 to 3.
- *
- * SPECIAL NOTES : None.
- *
- *
- ****************************************************************************/
-void vp8_horizontal_line_2_3_scale_c(const unsigned char *source,
- unsigned int source_width,
- unsigned char *dest,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b, c;
- unsigned char *des = dest;
- const unsigned char *src = source;
-
- (void) dest_width;
-
- for (i = 0; i < source_width - 2; i += 2) {
- a = src[0];
- b = src[1];
- c = src[2];
-
- des [0] = (unsigned char)(a);
- des [1] = (unsigned char)((a * 85 + 171 * b + 128) >> 8);
- des [2] = (unsigned char)((b * 171 + 85 * c + 128) >> 8);
-
- src += 2;
- des += 3;
- }
-
- a = src[0];
- b = src[1];
- des [0] = (unsigned char)(a);
- des [1] = (unsigned char)((a * 85 + 171 * b + 128) >> 8);
- des [2] = (unsigned char)(b);
-}
-
-
-/****************************************************************************
- *
- * ROUTINE : vp8_vertical_band_2_3_scale_c
- *
- * INPUTS : unsigned char *dest : Pointer to destination data.
- * unsigned int dest_pitch : Stride of destination data.
- * unsigned int dest_width : Width of destination data.
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Scales vertical band of pixels by scale 2 to 3. The
- * height of the band scaled is 2-pixels.
- *
- * SPECIAL NOTES : The routine uses the first line of the band below
- * the current band.
- *
- ****************************************************************************/
-void vp8_vertical_band_2_3_scale_c(unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b, c;
- unsigned char *des = dest;
-
- for (i = 0; i < dest_width; i++) {
- a = des [0];
- b = des [dest_pitch];
- c = des[dest_pitch * 3];
- des [dest_pitch ] = (unsigned char)((a * 85 + 171 * b + 128) >> 8);
- des [dest_pitch * 2] = (unsigned char)((b * 171 + 85 * c + 128) >> 8);
-
- des++;
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_last_vertical_band_2_3_scale_c
- *
- * INPUTS : unsigned char *dest : Pointer to destination data.
- * unsigned int dest_pitch : Stride of destination data.
- * unsigned int dest_width : Width of destination data.
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Scales last vertical band of pixels by scale 2 to 3. The
- * height of the band scaled is 2-pixels.
- *
- * SPECIAL NOTES : The routine does not have available the first line of
- * the band below the current band, since this is the
- * last band.
- *
- ****************************************************************************/
-void vp8_last_vertical_band_2_3_scale_c(unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b;
- unsigned char *des = dest;
-
- for (i = 0; i < dest_width; ++i) {
- a = des [0];
- b = des [dest_pitch];
-
- des [dest_pitch ] = (unsigned char)((a * 85 + 171 * b + 128) >> 8);
- des [dest_pitch * 2] = (unsigned char)(b);
- des++;
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_horizontal_line_3_5_scale_c
- *
- * INPUTS : const unsigned char *source : Pointer to source data.
- * unsigned int source_width : Stride of source.
- * unsigned char *dest : Pointer to destination data.
- * unsigned int dest_width : Stride of destination (NOT USED).
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Copies horizontal line of pixels from source to
- * destination scaling up by 3 to 5.
- *
- * SPECIAL NOTES : None.
- *
- *
- ****************************************************************************/
-void vp8_horizontal_line_3_5_scale_c(const unsigned char *source,
- unsigned int source_width,
- unsigned char *dest,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b, c;
- unsigned char *des = dest;
- const unsigned char *src = source;
-
- (void) dest_width;
-
- for (i = 0; i < source_width - 3; i += 3) {
- a = src[0];
- b = src[1];
- des [0] = (unsigned char)(a);
- des [1] = (unsigned char)((a * 102 + 154 * b + 128) >> 8);
-
- c = src[2];
- des [2] = (unsigned char)((b * 205 + c * 51 + 128) >> 8);
- des [3] = (unsigned char)((b * 51 + c * 205 + 128) >> 8);
-
- a = src[3];
- des [4] = (unsigned char)((c * 154 + a * 102 + 128) >> 8);
-
- src += 3;
- des += 5;
- }
-
- a = src[0];
- b = src[1];
- des [0] = (unsigned char)(a);
-
- des [1] = (unsigned char)((a * 102 + 154 * b + 128) >> 8);
- c = src[2];
- des [2] = (unsigned char)((b * 205 + c * 51 + 128) >> 8);
- des [3] = (unsigned char)((b * 51 + c * 205 + 128) >> 8);
-
- des [4] = (unsigned char)(c);
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_vertical_band_3_5_scale_c
- *
- * INPUTS : unsigned char *dest : Pointer to destination data.
- * unsigned int dest_pitch : Stride of destination data.
- * unsigned int dest_width : Width of destination data.
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Scales vertical band of pixels by scale 3 to 5. The
- * height of the band scaled is 3-pixels.
- *
- * SPECIAL NOTES : The routine uses the first line of the band below
- * the current band.
- *
- ****************************************************************************/
-void vp8_vertical_band_3_5_scale_c(unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b, c;
- unsigned char *des = dest;
-
- for (i = 0; i < dest_width; i++) {
- a = des [0];
- b = des [dest_pitch];
- des [dest_pitch] = (unsigned char)((a * 102 + 154 * b + 128) >> 8);
-
- c = des[dest_pitch * 2];
- des [dest_pitch * 2] = (unsigned char)((b * 205 + c * 51 + 128) >> 8);
- des [dest_pitch * 3] = (unsigned char)((b * 51 + c * 205 + 128) >> 8);
-
- /* First line in next band... */
- a = des [dest_pitch * 5];
- des [dest_pitch * 4] = (unsigned char)((c * 154 + a * 102 + 128) >> 8);
-
- des++;
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_last_vertical_band_3_5_scale_c
- *
- * INPUTS : unsigned char *dest : Pointer to destination data.
- * unsigned int dest_pitch : Stride of destination data.
- * unsigned int dest_width : Width of destination data.
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Scales last vertical band of pixels by scale 3 to 5. The
- * height of the band scaled is 3-pixels.
- *
- * SPECIAL NOTES : The routine does not have available the first line of
- * the band below the current band, since this is the
- * last band.
- *
- ****************************************************************************/
-void vp8_last_vertical_band_3_5_scale_c(unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b, c;
- unsigned char *des = dest;
-
- for (i = 0; i < dest_width; ++i) {
- a = des [0];
- b = des [dest_pitch];
-
- des [ dest_pitch ] = (unsigned char)((a * 102 + 154 * b + 128) >> 8);
-
- c = des[dest_pitch * 2];
- des [dest_pitch * 2] = (unsigned char)((b * 205 + c * 51 + 128) >> 8);
- des [dest_pitch * 3] = (unsigned char)((b * 51 + c * 205 + 128) >> 8);
-
- /* No other line for interplation of this line, so .. */
- des [ dest_pitch * 4 ] = (unsigned char)(c);
-
- des++;
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_horizontal_line_3_4_scale_c
- *
- * INPUTS : const unsigned char *source : Pointer to source data.
- * unsigned int source_width : Stride of source.
- * unsigned char *dest : Pointer to destination data.
- * unsigned int dest_width : Stride of destination (NOT USED).
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Copies horizontal line of pixels from source to
- * destination scaling up by 3 to 4.
- *
- * SPECIAL NOTES : None.
- *
- *
- ****************************************************************************/
-void vp8_horizontal_line_3_4_scale_c(const unsigned char *source,
- unsigned int source_width,
- unsigned char *dest,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b, c;
- unsigned char *des = dest;
- const unsigned char *src = source;
-
- (void) dest_width;
-
- for (i = 0; i < source_width - 3; i += 3) {
- a = src[0];
- b = src[1];
- des [0] = (unsigned char)(a);
- des [1] = (unsigned char)((a * 64 + b * 192 + 128) >> 8);
-
- c = src[2];
- des [2] = (unsigned char)((b + c + 1) >> 1);
-
- a = src[3];
- des [3] = (unsigned char)((c * 192 + a * 64 + 128) >> 8);
-
- src += 3;
- des += 4;
- }
-
- a = src[0];
- b = src[1];
- des [0] = (unsigned char)(a);
- des [1] = (unsigned char)((a * 64 + b * 192 + 128) >> 8);
-
- c = src[2];
- des [2] = (unsigned char)((b + c + 1) >> 1);
- des [3] = (unsigned char)(c);
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_vertical_band_3_4_scale_c
- *
- * INPUTS : unsigned char *dest : Pointer to destination data.
- * unsigned int dest_pitch : Stride of destination data.
- * unsigned int dest_width : Width of destination data.
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Scales vertical band of pixels by scale 3 to 4. The
- * height of the band scaled is 3-pixels.
- *
- * SPECIAL NOTES : The routine uses the first line of the band below
- * the current band.
- *
- ****************************************************************************/
-void vp8_vertical_band_3_4_scale_c(unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b, c;
- unsigned char *des = dest;
-
- for (i = 0; i < dest_width; i++) {
- a = des [0];
- b = des [dest_pitch];
- des [dest_pitch] = (unsigned char)((a * 64 + b * 192 + 128) >> 8);
-
- c = des[dest_pitch * 2];
- des [dest_pitch * 2] = (unsigned char)((b + c + 1) >> 1);
-
- /* First line in next band... */
- a = des [dest_pitch * 4];
- des [dest_pitch * 3] = (unsigned char)((c * 192 + a * 64 + 128) >> 8);
-
- des++;
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_last_vertical_band_3_4_scale_c
- *
- * INPUTS : unsigned char *dest : Pointer to destination data.
- * unsigned int dest_pitch : Stride of destination data.
- * unsigned int dest_width : Width of destination data.
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Scales last vertical band of pixels by scale 3 to 4. The
- * height of the band scaled is 3-pixels.
- *
- * SPECIAL NOTES : The routine does not have available the first line of
- * the band below the current band, since this is the
- * last band.
- *
- ****************************************************************************/
-void vp8_last_vertical_band_3_4_scale_c(unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b, c;
- unsigned char *des = dest;
-
- for (i = 0; i < dest_width; ++i) {
- a = des [0];
- b = des [dest_pitch];
-
- des [dest_pitch] = (unsigned char)((a * 64 + b * 192 + 128) >> 8);
-
- c = des[dest_pitch * 2];
- des [dest_pitch * 2] = (unsigned char)((b + c + 1) >> 1);
-
- /* No other line for interplation of this line, so .. */
- des [dest_pitch * 3] = (unsigned char)(c);
-
- des++;
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_horizontal_line_1_2_scale_c
- *
- * INPUTS : const unsigned char *source : Pointer to source data.
- * unsigned int source_width : Stride of source.
- * unsigned char *dest : Pointer to destination data.
- * unsigned int dest_width : Stride of destination (NOT USED).
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Copies horizontal line of pixels from source to
- * destination scaling up by 1 to 2.
- *
- * SPECIAL NOTES : None.
- *
- ****************************************************************************/
-void vp8_horizontal_line_1_2_scale_c(const unsigned char *source,
- unsigned int source_width,
- unsigned char *dest,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b;
- unsigned char *des = dest;
- const unsigned char *src = source;
-
- (void) dest_width;
-
- for (i = 0; i < source_width - 1; i += 1) {
- a = src[0];
- b = src[1];
- des [0] = (unsigned char)(a);
- des [1] = (unsigned char)((a + b + 1) >> 1);
- src += 1;
- des += 2;
- }
-
- a = src[0];
- des [0] = (unsigned char)(a);
- des [1] = (unsigned char)(a);
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_vertical_band_1_2_scale_c
- *
- * INPUTS : unsigned char *dest : Pointer to destination data.
- * unsigned int dest_pitch : Stride of destination data.
- * unsigned int dest_width : Width of destination data.
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Scales vertical band of pixels by scale 1 to 2. The
- * height of the band scaled is 1-pixel.
- *
- * SPECIAL NOTES : The routine uses the first line of the band below
- * the current band.
- *
- ****************************************************************************/
-void vp8_vertical_band_1_2_scale_c(unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width) {
- unsigned int i;
- unsigned int a, b;
- unsigned char *des = dest;
-
- for (i = 0; i < dest_width; i++) {
- a = des [0];
- b = des [dest_pitch * 2];
-
- des[dest_pitch] = (unsigned char)((a + b + 1) >> 1);
-
- des++;
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : vp8_last_vertical_band_1_2_scale_c
- *
- * INPUTS : unsigned char *dest : Pointer to destination data.
- * unsigned int dest_pitch : Stride of destination data.
- * unsigned int dest_width : Width of destination data.
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Scales last vertical band of pixels by scale 1 to 2. The
- * height of the band scaled is 1-pixel.
- *
- * SPECIAL NOTES : The routine does not have available the first line of
- * the band below the current band, since this is the
- * last band.
- *
- ****************************************************************************/
-void vp8_last_vertical_band_1_2_scale_c(unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width) {
- unsigned int i;
- unsigned char *des = dest;
-
- for (i = 0; i < dest_width; ++i) {
- des[dest_pitch] = des[0];
- des++;
- }
-}
-
-
-
-
-
-/****************************************************************************
- *
- * ROUTINE : vp8_horizontal_line_4_5_scale_c
*
* INPUTS : const unsigned char *source : Pointer to source data.
* unsigned int source_width : Stride of source.
diff --git a/libvpx/vpx_scale/generic/vpxscale.c b/libvpx/vpx_scale/generic/vpx_scale.c
index 7de85ca..8044d2a 100644
--- a/libvpx/vpx_scale/generic/vpxscale.c
+++ b/libvpx/vpx_scale/generic/vpx_scale.c
@@ -20,10 +20,9 @@
/****************************************************************************
* Header Files
****************************************************************************/
-#include "./vpx_rtcd.h"
+#include "./vpx_scale_rtcd.h"
#include "vpx_mem/vpx_mem.h"
#include "vpx_scale/yv12config.h"
-#include "vpx_scale/scale_mode.h"
typedef struct {
int expanded_frame_width;
@@ -41,66 +40,6 @@ typedef struct {
/****************************************************************************
*
- * ROUTINE : horizontal_line_copy
- *
- * INPUTS : None
- *
- *
- * OUTPUTS : None.
- *
- * RETURNS : None
- *
- * FUNCTION : 1 to 1 scaling up for a horizontal line of pixles
- *
- * SPECIAL NOTES : None.
- *
- * ERRORS : None.
- *
- ****************************************************************************/
-static
-void horizontal_line_copy(
- const unsigned char *source,
- unsigned int source_width,
- unsigned char *dest,
- unsigned int dest_width
-) {
- (void) dest_width;
-
- duck_memcpy(dest, source, source_width);
-}
-/****************************************************************************
- *
- * ROUTINE : null_scale
- *
- * INPUTS : None
- *
- *
- * OUTPUTS : None.
- *
- * RETURNS : None
- *
- * FUNCTION : 1 to 1 scaling up for a vertical band
- *
- * SPECIAL NOTES : None.
- *
- * ERRORS : None.
- *
- ****************************************************************************/
-static
-void null_scale(
- unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width
-) {
- (void) dest;
- (void) dest_pitch;
- (void) dest_width;
-
- return;
-}
-
-/****************************************************************************
- *
* ROUTINE : scale1d_2t1_i
*
* INPUTS : const unsigned char *source : Pointer to data to be scaled.
@@ -493,7 +432,7 @@ void Scale2D
temp_area + i * dest_pitch, 1, hratio, dest_width);
} else { /* Duplicate the last row */
/* copy temp_area row 0 over from last row in the past */
- duck_memcpy(temp_area + i * dest_pitch, temp_area + (i - 1)*dest_pitch, dest_pitch);
+ vpx_memcpy(temp_area + i * dest_pitch, temp_area + (i - 1)*dest_pitch, dest_pitch);
}
}
@@ -504,7 +443,7 @@ void Scale2D
}
/* copy temp_area row 0 over from last row in the past */
- duck_memcpy(temp_area, temp_area + source_band_height * dest_pitch, dest_pitch);
+ vpx_memcpy(temp_area, temp_area + source_band_height * dest_pitch, dest_pitch);
/* move to the next band */
source += source_band_height * source_pitch;
@@ -514,7 +453,7 @@ void Scale2D
/****************************************************************************
*
- * ROUTINE :
+ * ROUTINE : vpx_scale_frame
*
* INPUTS : YV12_BUFFER_CONFIG *src : Pointer to frame to be scaled.
* YV12_BUFFER_CONFIG *dst : Pointer to buffer to hold scaled frame.
@@ -536,7 +475,7 @@ void Scale2D
* caching.
*
****************************************************************************/
-void vp8_scale_frame
+void vpx_scale_frame
(
YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst,
@@ -559,11 +498,11 @@ void vp8_scale_frame
if (dw < (int)dst->y_width)
for (i = 0; i < dh; i++)
- duck_memset(dst->y_buffer + i * dst->y_stride + dw - 1, dst->y_buffer[i * dst->y_stride + dw - 2], dst->y_width - dw + 1);
+ vpx_memset(dst->y_buffer + i * dst->y_stride + dw - 1, dst->y_buffer[i * dst->y_stride + dw - 2], dst->y_width - dw + 1);
if (dh < (int)dst->y_height)
for (i = dh - 1; i < (int)dst->y_height; i++)
- duck_memcpy(dst->y_buffer + i * dst->y_stride, dst->y_buffer + (dh - 2) * dst->y_stride, dst->y_width + 1);
+ vpx_memcpy(dst->y_buffer + i * dst->y_stride, dst->y_buffer + (dh - 2) * dst->y_stride, dst->y_width + 1);
Scale2D((unsigned char *) src->u_buffer, src->uv_stride, src->uv_width, src->uv_height,
(unsigned char *) dst->u_buffer, dst->uv_stride, dw / 2, dh / 2,
@@ -571,11 +510,11 @@ void vp8_scale_frame
if (dw / 2 < (int)dst->uv_width)
for (i = 0; i < dst->uv_height; i++)
- duck_memset(dst->u_buffer + i * dst->uv_stride + dw / 2 - 1, dst->u_buffer[i * dst->uv_stride + dw / 2 - 2], dst->uv_width - dw / 2 + 1);
+ vpx_memset(dst->u_buffer + i * dst->uv_stride + dw / 2 - 1, dst->u_buffer[i * dst->uv_stride + dw / 2 - 2], dst->uv_width - dw / 2 + 1);
if (dh / 2 < (int)dst->uv_height)
for (i = dh / 2 - 1; i < (int)dst->y_height / 2; i++)
- duck_memcpy(dst->u_buffer + i * dst->uv_stride, dst->u_buffer + (dh / 2 - 2)*dst->uv_stride, dst->uv_width);
+ vpx_memcpy(dst->u_buffer + i * dst->uv_stride, dst->u_buffer + (dh / 2 - 2)*dst->uv_stride, dst->uv_width);
Scale2D((unsigned char *) src->v_buffer, src->uv_stride, src->uv_width, src->uv_height,
(unsigned char *) dst->v_buffer, dst->uv_stride, dw / 2, dh / 2,
@@ -583,428 +522,9 @@ void vp8_scale_frame
if (dw / 2 < (int)dst->uv_width)
for (i = 0; i < dst->uv_height; i++)
- duck_memset(dst->v_buffer + i * dst->uv_stride + dw / 2 - 1, dst->v_buffer[i * dst->uv_stride + dw / 2 - 2], dst->uv_width - dw / 2 + 1);
+ vpx_memset(dst->v_buffer + i * dst->uv_stride + dw / 2 - 1, dst->v_buffer[i * dst->uv_stride + dw / 2 - 2], dst->uv_width - dw / 2 + 1);
if (dh / 2 < (int) dst->uv_height)
for (i = dh / 2 - 1; i < (int)dst->y_height / 2; i++)
- duck_memcpy(dst->v_buffer + i * dst->uv_stride, dst->v_buffer + (dh / 2 - 2)*dst->uv_stride, dst->uv_width);
-}
-/****************************************************************************
- *
- * ROUTINE : any_ratio_2d_scale
- *
- * INPUTS : SCALE_INSTANCE *si : Pointer to post-processor instance (NOT USED).
- * const unsigned char *source : Pointer to source image.
- * unsigned int source_pitch : Stride of source image.
- * unsigned int source_width : Width of source image.
- * unsigned int source_height : Height of source image (NOT USED).
- * unsigned char *dest : Pointer to destination image.
- * unsigned int dest_pitch : Stride of destination image.
- * unsigned int dest_width : Width of destination image.
- * unsigned int dest_height : Height of destination image.
- *
- * OUTPUTS : None.
- *
- * RETURNS : int: 1 if image scaled, 0 if image could not be scaled.
- *
- * FUNCTION : Scale the image with changing apect ratio.
- *
- * SPECIAL NOTES : This scaling is a bi-linear scaling. Need to re-work the
- * whole function for new scaling algorithm.
- *
- ****************************************************************************/
-static
-int any_ratio_2d_scale
-(
- SCALE_VARS *si,
- const unsigned char *source,
- int source_pitch,
- unsigned int source_width,
- unsigned int source_height,
- unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width,
- unsigned int dest_height
-) {
- unsigned int i, k;
- unsigned int src_band_height = 0;
- unsigned int dest_band_height = 0;
-
- /* suggested scale factors */
- int hs = si->HScale;
- int hr = si->HRatio;
- int vs = si->VScale;
- int vr = si->VRatio;
-
- /* assume the ratios are scalable instead of should be centered */
- int ratio_scalable = 1;
-
- const unsigned char *source_base = ((source_pitch >= 0) ? source : (source + ((source_height - 1) * source_pitch)));
- const unsigned char *line_src;
-
- void (*horiz_line_scale)(const unsigned char *, unsigned int, unsigned char *, unsigned int) = NULL;
- void (*vert_band_scale)(unsigned char *, unsigned int, unsigned int) = NULL;
- void (*last_vert_band_scale)(unsigned char *, unsigned int, unsigned int) = NULL;
-
- (void) si;
-
- /* find out the ratio for each direction */
- switch (hr * 30 / hs) {
- case 24:
- /* 4-5 Scale in Width direction */
- horiz_line_scale = vp8_horizontal_line_4_5_scale;
- break;
- case 22:
- /* 3-4 Scale in Width direction */
- horiz_line_scale = vp8_horizontal_line_3_4_scale;
- break;
-
- case 20:
- /* 4-5 Scale in Width direction */
- horiz_line_scale = vp8_horizontal_line_2_3_scale;
- break;
- case 18:
- /* 3-5 Scale in Width direction */
- horiz_line_scale = vp8_horizontal_line_3_5_scale;
- break;
- case 15:
- /* 1-2 Scale in Width direction */
- horiz_line_scale = vp8_horizontal_line_1_2_scale;
- break;
- case 30:
- /* no scale in Width direction */
- horiz_line_scale = horizontal_line_copy;
- break;
- default:
- /* The ratio is not acceptable now */
- /* throw("The ratio is not acceptable for now!"); */
- ratio_scalable = 0;
- break;
- }
-
- switch (vr * 30 / vs) {
- case 24:
- /* 4-5 Scale in vertical direction */
- vert_band_scale = vp8_vertical_band_4_5_scale;
- last_vert_band_scale = vp8_last_vertical_band_4_5_scale;
- src_band_height = 4;
- dest_band_height = 5;
- break;
- case 22:
- /* 3-4 Scale in vertical direction */
- vert_band_scale = vp8_vertical_band_3_4_scale;
- last_vert_band_scale = vp8_last_vertical_band_3_4_scale;
- src_band_height = 3;
- dest_band_height = 4;
- break;
- case 20:
- /* 2-3 Scale in vertical direction */
- vert_band_scale = vp8_vertical_band_2_3_scale;
- last_vert_band_scale = vp8_last_vertical_band_2_3_scale;
- src_band_height = 2;
- dest_band_height = 3;
- break;
- case 18:
- /* 3-5 Scale in vertical direction */
- vert_band_scale = vp8_vertical_band_3_5_scale;
- last_vert_band_scale = vp8_last_vertical_band_3_5_scale;
- src_band_height = 3;
- dest_band_height = 5;
- break;
- case 15:
- /* 1-2 Scale in vertical direction */
- vert_band_scale = vp8_vertical_band_1_2_scale;
- last_vert_band_scale = vp8_last_vertical_band_1_2_scale;
- src_band_height = 1;
- dest_band_height = 2;
- break;
- case 30:
- /* no scale in Width direction */
- vert_band_scale = null_scale;
- last_vert_band_scale = null_scale;
- src_band_height = 4;
- dest_band_height = 4;
- break;
- default:
- /* The ratio is not acceptable now */
- /* throw("The ratio is not acceptable for now!"); */
- ratio_scalable = 0;
- break;
- }
-
- if (ratio_scalable == 0)
- return ratio_scalable;
-
- horiz_line_scale(source, source_width, dest, dest_width);
-
- /* except last band */
- for (k = 0; k < (dest_height + dest_band_height - 1) / dest_band_height - 1; k++) {
- /* scale one band horizontally */
- for (i = 1; i < src_band_height; i++) {
- /* Trap case where we could read off the base of the source buffer */
- line_src = source + i * source_pitch;
-
- if (line_src < source_base)
- line_src = source_base;
-
- horiz_line_scale(line_src, source_width,
- dest + i * dest_pitch, dest_width);
- }
-
- /* first line of next band */
- /* Trap case where we could read off the base of the source buffer */
- line_src = source + src_band_height * source_pitch;
-
- if (line_src < source_base)
- line_src = source_base;
-
- horiz_line_scale(line_src, source_width,
- dest + dest_band_height * dest_pitch,
- dest_width);
-
- /* Vertical scaling is in place */
- vert_band_scale(dest, dest_pitch, dest_width);
-
- /* Next band... */
- source += src_band_height * source_pitch;
- dest += dest_band_height * dest_pitch;
- }
-
- /* scale one band horizontally */
- for (i = 1; i < src_band_height; i++) {
- /* Trap case where we could read off the base of the source buffer */
- line_src = source + i * source_pitch;
-
- if (line_src < source_base)
- line_src = source_base;
-
- horiz_line_scale(line_src, source_width,
- dest + i * dest_pitch,
- dest_width);
- }
-
- /* Vertical scaling is in place */
- last_vert_band_scale(dest, dest_pitch, dest_width);
-
- return ratio_scalable;
-}
-
-/****************************************************************************
- *
- * ROUTINE : any_ratio_frame_scale
- *
- * INPUTS : SCALE_INSTANCE *si : Pointer to post-processor instance (NOT USED).
- * unsigned char *frame_buffer : Pointer to source image.
- * int YOffset : Offset from start of buffer to Y samples.
- * int UVOffset : Offset from start of buffer to UV samples.
- *
- * OUTPUTS : None.
- *
- * RETURNS : int: 1 if image scaled, 0 if image could not be scaled.
- *
- * FUNCTION : Scale the image with changing apect ratio.
- *
- * SPECIAL NOTES : None.
- *
- ****************************************************************************/
-static
-int any_ratio_frame_scale(SCALE_VARS *scale_vars, int YOffset, int UVOffset) {
- int i;
- int ew;
- int eh;
-
- /* suggested scale factors */
- int hs = scale_vars->HScale;
- int hr = scale_vars->HRatio;
- int vs = scale_vars->VScale;
- int vr = scale_vars->VRatio;
-
- int ratio_scalable = 1;
-
- int sw = (scale_vars->expanded_frame_width * hr + hs - 1) / hs;
- int sh = (scale_vars->expanded_frame_height * vr + vs - 1) / vs;
- int dw = scale_vars->expanded_frame_width;
- int dh = scale_vars->expanded_frame_height;
- YV12_BUFFER_CONFIG *src_yuv_config = scale_vars->src_yuv_config;
- YV12_BUFFER_CONFIG *dst_yuv_config = scale_vars->dst_yuv_config;
-
- if (hr == 3)
- ew = (sw + 2) / 3 * 3 * hs / hr;
- else
- ew = (sw + 7) / 8 * 8 * hs / hr;
-
- if (vr == 3)
- eh = (sh + 2) / 3 * 3 * vs / vr;
- else
- eh = (sh + 7) / 8 * 8 * vs / vr;
-
- ratio_scalable = any_ratio_2d_scale(scale_vars,
- (const unsigned char *)src_yuv_config->y_buffer,
- src_yuv_config->y_stride, sw, sh,
- (unsigned char *) dst_yuv_config->y_buffer + YOffset,
- dst_yuv_config->y_stride, dw, dh);
-
- for (i = 0; i < eh; i++)
- duck_memset(dst_yuv_config->y_buffer + YOffset + i * dst_yuv_config->y_stride + dw, 0, ew - dw);
-
- for (i = dh; i < eh; i++)
- duck_memset(dst_yuv_config->y_buffer + YOffset + i * dst_yuv_config->y_stride, 0, ew);
-
- if (ratio_scalable == 0)
- return ratio_scalable;
-
- sw = (sw + 1) >> 1;
- sh = (sh + 1) >> 1;
- dw = (dw + 1) >> 1;
- dh = (dh + 1) >> 1;
-
- any_ratio_2d_scale(scale_vars,
- (const unsigned char *)src_yuv_config->u_buffer,
- src_yuv_config->y_stride / 2, sw, sh,
- (unsigned char *)dst_yuv_config->u_buffer + UVOffset,
- dst_yuv_config->uv_stride, dw, dh);
-
- any_ratio_2d_scale(scale_vars,
- (const unsigned char *)src_yuv_config->v_buffer,
- src_yuv_config->y_stride / 2, sw, sh,
- (unsigned char *)dst_yuv_config->v_buffer + UVOffset,
- dst_yuv_config->uv_stride, dw, dh);
-
- return ratio_scalable;
-}
-
-/****************************************************************************
- *
- * ROUTINE : center_image
- *
- * INPUTS : SCALE_INSTANCE *si : Pointer to post-processor instance.
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Centers the image without scaling in the output buffer.
- *
- * SPECIAL NOTES : None.
- *
- ****************************************************************************/
-static void
-center_image(YV12_BUFFER_CONFIG *src_yuv_config, YV12_BUFFER_CONFIG *dst_yuv_config) {
- int i;
- int row_offset, col_offset;
- unsigned char *src_data_pointer;
- unsigned char *dst_data_pointer;
-
- /* center values */
- row_offset = (dst_yuv_config->y_height - src_yuv_config->y_height) / 2;
- col_offset = (dst_yuv_config->y_width - src_yuv_config->y_width) / 2;
-
- /* Y's */
- src_data_pointer = src_yuv_config->y_buffer;
- dst_data_pointer = (unsigned char *)dst_yuv_config->y_buffer + (row_offset * dst_yuv_config->y_stride) + col_offset;
-
- for (i = 0; i < src_yuv_config->y_height; i++) {
- duck_memcpy(dst_data_pointer, src_data_pointer, src_yuv_config->y_width);
- dst_data_pointer += dst_yuv_config->y_stride;
- src_data_pointer += src_yuv_config->y_stride;
- }
-
- row_offset /= 2;
- col_offset /= 2;
-
- /* U's */
- src_data_pointer = src_yuv_config->u_buffer;
- dst_data_pointer = (unsigned char *)dst_yuv_config->u_buffer + (row_offset * dst_yuv_config->uv_stride) + col_offset;
-
- for (i = 0; i < src_yuv_config->uv_height; i++) {
- duck_memcpy(dst_data_pointer, src_data_pointer, src_yuv_config->uv_width);
- dst_data_pointer += dst_yuv_config->uv_stride;
- src_data_pointer += src_yuv_config->uv_stride;
- }
-
- /* V's */
- src_data_pointer = src_yuv_config->v_buffer;
- dst_data_pointer = (unsigned char *)dst_yuv_config->v_buffer + (row_offset * dst_yuv_config->uv_stride) + col_offset;
-
- for (i = 0; i < src_yuv_config->uv_height; i++) {
- duck_memcpy(dst_data_pointer, src_data_pointer, src_yuv_config->uv_width);
- dst_data_pointer += dst_yuv_config->uv_stride;
- src_data_pointer += src_yuv_config->uv_stride;
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : scale_or_center
- *
- * INPUTS : SCALE_INSTANCE *si : Pointer to post-processor instance.
- *
- *
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Decides to scale or center image in scale buffer for blit
- *
- * SPECIAL NOTES : None.
- *
- ****************************************************************************/
-void
-vp8_yv12_scale_or_center
-(
- YV12_BUFFER_CONFIG *src_yuv_config,
- YV12_BUFFER_CONFIG *dst_yuv_config,
- int expanded_frame_width,
- int expanded_frame_height,
- int scaling_mode,
- int HScale,
- int HRatio,
- int VScale,
- int VRatio
-) {
- /*if ( ppi->post_processing_level )
- update_umvborder ( ppi, frame_buffer );*/
-
-
- switch (scaling_mode) {
- case SCALE_TO_FIT:
- case MAINTAIN_ASPECT_RATIO: {
- SCALE_VARS scale_vars;
- /* center values */
-#if 1
- int row = (dst_yuv_config->y_height - expanded_frame_height) / 2;
- int col = (dst_yuv_config->y_width - expanded_frame_width) / 2;
- /*int YOffset = row * dst_yuv_config->y_width + col;
- int UVOffset = (row>>1) * dst_yuv_config->uv_width + (col>>1);*/
- int YOffset = row * dst_yuv_config->y_stride + col;
- int UVOffset = (row >> 1) * dst_yuv_config->uv_stride + (col >> 1);
-#else
- int row = (src_yuv_config->y_height - expanded_frame_height) / 2;
- int col = (src_yuv_config->y_width - expanded_frame_width) / 2;
- int YOffset = row * src_yuv_config->y_width + col;
- int UVOffset = (row >> 1) * src_yuv_config->uv_width + (col >> 1);
-#endif
-
- scale_vars.dst_yuv_config = dst_yuv_config;
- scale_vars.src_yuv_config = src_yuv_config;
- scale_vars.HScale = HScale;
- scale_vars.HRatio = HRatio;
- scale_vars.VScale = VScale;
- scale_vars.VRatio = VRatio;
- scale_vars.expanded_frame_width = expanded_frame_width;
- scale_vars.expanded_frame_height = expanded_frame_height;
-
- /* perform center and scale */
- any_ratio_frame_scale(&scale_vars, YOffset, UVOffset);
-
- break;
- }
- case CENTER:
- center_image(src_yuv_config, dst_yuv_config);
- break;
-
- default:
- break;
- }
+ vpx_memcpy(dst->v_buffer + i * dst->uv_stride, dst->v_buffer + (dh / 2 - 2)*dst->uv_stride, dst->uv_width);
}
diff --git a/libvpx/vpx_scale/generic/yv12config.c b/libvpx/vpx_scale/generic/yv12config.c
index 4cb2a41..754a615 100644
--- a/libvpx/vpx_scale/generic/yv12config.c
+++ b/libvpx/vpx_scale/generic/yv12config.c
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
+#include "./vpx_config.h"
#include "vpx_scale/yv12config.h"
#include "vpx_mem/vpx_mem.h"
@@ -35,58 +35,174 @@ vp8_yv12_de_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf) {
return 0;
}
-/****************************************************************************
- *
- ****************************************************************************/
-int
-vp8_yv12_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height, int border) {
- /*NOTE:*/
-
+int vp8_yv12_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
+ int width, int height, int border) {
if (ybf) {
- int y_stride = ((width + 2 * border) + 31) & ~31;
- int yplane_size = (height + 2 * border) * y_stride;
- int uv_width = width >> 1;
- int uv_height = height >> 1;
+ int aligned_width = (width + 15) & ~15;
+ int aligned_height = (height + 15) & ~15;
+ int y_stride = ((aligned_width + 2 * border) + 31) & ~31;
+ int yplane_size = (aligned_height + 2 * border) * y_stride;
+ int uv_width = aligned_width >> 1;
+ int uv_height = aligned_height >> 1;
/** There is currently a bunch of code which assumes
* uv_stride == y_stride/2, so enforce this here. */
int uv_stride = y_stride >> 1;
int uvplane_size = (uv_height + border) * uv_stride;
+ const int frame_size = yplane_size + 2 * uvplane_size;
- vp8_yv12_de_alloc_frame_buffer(ybf);
+ if (!ybf->buffer_alloc) {
+ ybf->buffer_alloc = vpx_memalign(32, frame_size);
+ ybf->buffer_alloc_sz = frame_size;
+ }
- /** Only support allocating buffers that have a height and width that
- * are multiples of 16, and a border that's a multiple of 32.
- * The border restriction is required to get 16-byte alignment of the
- * start of the chroma rows without intoducing an arbitrary gap
- * between planes, which would break the semantics of things like
- * vpx_img_set_rect(). */
- if ((width & 0xf) | (height & 0xf) | (border & 0x1f))
+ if (!ybf->buffer_alloc || ybf->buffer_alloc_sz < frame_size)
+ return -1;
+
+ /* Only support allocating buffers that have a border that's a multiple
+ * of 32. The border restriction is required to get 16-byte alignment of
+ * the start of the chroma rows without intoducing an arbitrary gap
+ * between planes, which would break the semantics of things like
+ * vpx_img_set_rect(). */
+ if (border & 0x1f)
return -3;
- ybf->y_width = width;
- ybf->y_height = height;
+ ybf->y_crop_width = width;
+ ybf->y_crop_height = height;
+ ybf->y_width = aligned_width;
+ ybf->y_height = aligned_height;
ybf->y_stride = y_stride;
ybf->uv_width = uv_width;
ybf->uv_height = uv_height;
ybf->uv_stride = uv_stride;
- ybf->border = border;
- ybf->frame_size = yplane_size + 2 * uvplane_size;
-
- ybf->buffer_alloc = (unsigned char *) vpx_memalign(32, ybf->frame_size);
+ ybf->alpha_width = 0;
+ ybf->alpha_height = 0;
+ ybf->alpha_stride = 0;
- if (ybf->buffer_alloc == NULL)
- return -1;
+ ybf->border = border;
+ ybf->frame_size = frame_size;
ybf->y_buffer = ybf->buffer_alloc + (border * y_stride) + border;
ybf->u_buffer = ybf->buffer_alloc + yplane_size + (border / 2 * uv_stride) + border / 2;
ybf->v_buffer = ybf->buffer_alloc + yplane_size + uvplane_size + (border / 2 * uv_stride) + border / 2;
+ ybf->alpha_buffer = NULL;
ybf->corrupted = 0; /* assume not currupted by errors */
+ return 0;
+ }
+ return -2;
+}
+
+int vp8_yv12_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
+ int width, int height, int border) {
+ if (ybf) {
+ vp8_yv12_de_alloc_frame_buffer(ybf);
+ return vp8_yv12_realloc_frame_buffer(ybf, width, height, border);
+ }
+ return -2;
+}
+
+#if CONFIG_VP9
+// TODO(jkoleszar): Maybe replace this with struct vpx_image
+
+int vp9_free_frame_buffer(YV12_BUFFER_CONFIG *ybf) {
+ if (ybf) {
+ vpx_free(ybf->buffer_alloc);
+
+ /* buffer_alloc isn't accessed by most functions. Rather y_buffer,
+ u_buffer and v_buffer point to buffer_alloc and are used. Clear out
+ all of this so that a freed pointer isn't inadvertently used */
+ vpx_memset(ybf, 0, sizeof(YV12_BUFFER_CONFIG));
} else {
- return -2;
+ return -1;
}
return 0;
}
+
+int vp9_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
+ int width, int height,
+ int ss_x, int ss_y, int border) {
+ if (ybf) {
+ const int aligned_width = (width + 7) & ~7;
+ const int aligned_height = (height + 7) & ~7;
+ const int y_stride = ((aligned_width + 2 * border) + 31) & ~31;
+ const int yplane_size = (aligned_height + 2 * border) * y_stride;
+ const int uv_width = aligned_width >> ss_x;
+ const int uv_height = aligned_height >> ss_y;
+ const int uv_stride = y_stride >> ss_x;
+ const int uv_border_w = border >> ss_x;
+ const int uv_border_h = border >> ss_y;
+ const int uvplane_size = (uv_height + 2 * uv_border_h) * uv_stride;
+#if CONFIG_ALPHA
+ const int alpha_width = aligned_width;
+ const int alpha_height = aligned_height;
+ const int alpha_stride = y_stride;
+ const int alpha_border_w = border;
+ const int alpha_border_h = border;
+ const int alpha_plane_size = (alpha_height + 2 * alpha_border_h) *
+ alpha_stride;
+ const int frame_size = yplane_size + 2 * uvplane_size +
+ alpha_plane_size;
+#else
+ const int frame_size = yplane_size + 2 * uvplane_size;
+#endif
+ if (!ybf->buffer_alloc) {
+ ybf->buffer_alloc = vpx_memalign(32, frame_size);
+ ybf->buffer_alloc_sz = frame_size;
+ }
+
+ if (!ybf->buffer_alloc || ybf->buffer_alloc_sz < frame_size)
+ return -1;
+
+ /* Only support allocating buffers that have a border that's a multiple
+ * of 32. The border restriction is required to get 16-byte alignment of
+ * the start of the chroma rows without intoducing an arbitrary gap
+ * between planes, which would break the semantics of things like
+ * vpx_img_set_rect(). */
+ if (border & 0x1f)
+ return -3;
+
+ ybf->y_crop_width = width;
+ ybf->y_crop_height = height;
+ ybf->y_width = aligned_width;
+ ybf->y_height = aligned_height;
+ ybf->y_stride = y_stride;
+
+ ybf->uv_width = uv_width;
+ ybf->uv_height = uv_height;
+ ybf->uv_stride = uv_stride;
+
+ ybf->border = border;
+ ybf->frame_size = frame_size;
+
+ ybf->y_buffer = ybf->buffer_alloc + (border * y_stride) + border;
+ ybf->u_buffer = ybf->buffer_alloc + yplane_size +
+ (uv_border_h * uv_stride) + uv_border_w;
+ ybf->v_buffer = ybf->buffer_alloc + yplane_size + uvplane_size +
+ (uv_border_h * uv_stride) + uv_border_w;
+
+#if CONFIG_ALPHA
+ ybf->alpha_width = alpha_width;
+ ybf->alpha_height = alpha_height;
+ ybf->alpha_stride = alpha_stride;
+ ybf->alpha_buffer = ybf->buffer_alloc + yplane_size + 2 * uvplane_size +
+ (alpha_border_h * alpha_stride) + alpha_border_w;
+#endif
+ ybf->corrupted = 0; /* assume not currupted by errors */
+ return 0;
+ }
+ return -2;
+}
+
+int vp9_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
+ int width, int height,
+ int ss_x, int ss_y, int border) {
+ if (ybf) {
+ vp9_free_frame_buffer(ybf);
+ return vp9_realloc_frame_buffer(ybf, width, height, ss_x, ss_y, border);
+ }
+ return -2;
+}
+#endif
diff --git a/libvpx/vpx_scale/generic/yv12extend.c b/libvpx/vpx_scale/generic/yv12extend.c
index 247078c..c38fb80 100644
--- a/libvpx/vpx_scale/generic/yv12extend.c
+++ b/libvpx/vpx_scale/generic/yv12extend.c
@@ -8,10 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
+#include <assert.h>
+#include "./vpx_config.h"
#include "vpx_scale/yv12config.h"
#include "vpx_mem/vpx_mem.h"
-#include "vpx_scale/vpxscale.h"
+#include "vpx_scale/vpx_scale.h"
/****************************************************************************
* Exports
@@ -20,179 +21,110 @@
/****************************************************************************
*
****************************************************************************/
-void
-vp8_yv12_extend_frame_borders_c(YV12_BUFFER_CONFIG *ybf) {
+static void extend_plane(uint8_t *s, /* source */
+ int sp, /* source pitch */
+ int w, /* width */
+ int h, /* height */
+ int et, /* extend top border */
+ int el, /* extend left border */
+ int eb, /* extend bottom border */
+ int er) { /* extend right border */
int i;
- unsigned char *src_ptr1, *src_ptr2;
- unsigned char *dest_ptr1, *dest_ptr2;
-
- unsigned int Border;
- int plane_stride;
- int plane_height;
- int plane_width;
-
- /***********/
- /* Y Plane */
- /***********/
- Border = ybf->border;
- plane_stride = ybf->y_stride;
- plane_height = ybf->y_height;
- plane_width = ybf->y_width;
-
- /* copy the left and right most columns out */
- src_ptr1 = ybf->y_buffer;
- src_ptr2 = src_ptr1 + plane_width - 1;
- dest_ptr1 = src_ptr1 - Border;
- dest_ptr2 = src_ptr2 + 1;
-
- for (i = 0; i < plane_height; i++) {
- vpx_memset(dest_ptr1, src_ptr1[0], Border);
- vpx_memset(dest_ptr2, src_ptr2[0], Border);
- src_ptr1 += plane_stride;
- src_ptr2 += plane_stride;
- dest_ptr1 += plane_stride;
- dest_ptr2 += plane_stride;
- }
-
- /* Now copy the top and bottom source lines into each line of the respective borders */
- src_ptr1 = ybf->y_buffer - Border;
- src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
- dest_ptr1 = src_ptr1 - (Border * plane_stride);
- dest_ptr2 = src_ptr2 + plane_stride;
-
- for (i = 0; i < (int)Border; i++) {
- vpx_memcpy(dest_ptr1, src_ptr1, plane_stride);
- vpx_memcpy(dest_ptr2, src_ptr2, plane_stride);
- dest_ptr1 += plane_stride;
- dest_ptr2 += plane_stride;
- }
-
-
- /***********/
- /* U Plane */
- /***********/
- plane_stride = ybf->uv_stride;
- plane_height = ybf->uv_height;
- plane_width = ybf->uv_width;
- Border /= 2;
+ uint8_t *src_ptr1, *src_ptr2;
+ uint8_t *dest_ptr1, *dest_ptr2;
+ int linesize;
/* copy the left and right most columns out */
- src_ptr1 = ybf->u_buffer;
- src_ptr2 = src_ptr1 + plane_width - 1;
- dest_ptr1 = src_ptr1 - Border;
- dest_ptr2 = src_ptr2 + 1;
-
- for (i = 0; i < plane_height; i++) {
- vpx_memset(dest_ptr1, src_ptr1[0], Border);
- vpx_memset(dest_ptr2, src_ptr2[0], Border);
- src_ptr1 += plane_stride;
- src_ptr2 += plane_stride;
- dest_ptr1 += plane_stride;
- dest_ptr2 += plane_stride;
+ src_ptr1 = s;
+ src_ptr2 = s + w - 1;
+ dest_ptr1 = s - el;
+ dest_ptr2 = s + w;
+
+ for (i = 0; i < h; i++) {
+ vpx_memset(dest_ptr1, src_ptr1[0], el);
+ vpx_memset(dest_ptr2, src_ptr2[0], er);
+ src_ptr1 += sp;
+ src_ptr2 += sp;
+ dest_ptr1 += sp;
+ dest_ptr2 += sp;
}
- /* Now copy the top and bottom source lines into each line of the respective borders */
- src_ptr1 = ybf->u_buffer - Border;
- src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
- dest_ptr1 = src_ptr1 - (Border * plane_stride);
- dest_ptr2 = src_ptr2 + plane_stride;
-
- for (i = 0; i < (int)(Border); i++) {
- vpx_memcpy(dest_ptr1, src_ptr1, plane_stride);
- vpx_memcpy(dest_ptr2, src_ptr2, plane_stride);
- dest_ptr1 += plane_stride;
- dest_ptr2 += plane_stride;
- }
-
- /***********/
- /* V Plane */
- /***********/
-
- /* copy the left and right most columns out */
- src_ptr1 = ybf->v_buffer;
- src_ptr2 = src_ptr1 + plane_width - 1;
- dest_ptr1 = src_ptr1 - Border;
- dest_ptr2 = src_ptr2 + 1;
-
- for (i = 0; i < plane_height; i++) {
- vpx_memset(dest_ptr1, src_ptr1[0], Border);
- vpx_memset(dest_ptr2, src_ptr2[0], Border);
- src_ptr1 += plane_stride;
- src_ptr2 += plane_stride;
- dest_ptr1 += plane_stride;
- dest_ptr2 += plane_stride;
+ /* Now copy the top and bottom lines into each line of the respective
+ * borders
+ */
+ src_ptr1 = s - el;
+ src_ptr2 = s + sp * (h - 1) - el;
+ dest_ptr1 = s + sp * (-et) - el;
+ dest_ptr2 = s + sp * (h) - el;
+ linesize = el + er + w;
+
+ for (i = 0; i < et; i++) {
+ vpx_memcpy(dest_ptr1, src_ptr1, linesize);
+ dest_ptr1 += sp;
}
- /* Now copy the top and bottom source lines into each line of the respective borders */
- src_ptr1 = ybf->v_buffer - Border;
- src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
- dest_ptr1 = src_ptr1 - (Border * plane_stride);
- dest_ptr2 = src_ptr2 + plane_stride;
-
- for (i = 0; i < (int)(Border); i++) {
- vpx_memcpy(dest_ptr1, src_ptr1, plane_stride);
- vpx_memcpy(dest_ptr2, src_ptr2, plane_stride);
- dest_ptr1 += plane_stride;
- dest_ptr2 += plane_stride;
+ for (i = 0; i < eb; i++) {
+ vpx_memcpy(dest_ptr2, src_ptr2, linesize);
+ dest_ptr2 += sp;
}
}
-
-static void
-extend_frame_borders_yonly_c(YV12_BUFFER_CONFIG *ybf) {
- int i;
- unsigned char *src_ptr1, *src_ptr2;
- unsigned char *dest_ptr1, *dest_ptr2;
-
- unsigned int Border;
- int plane_stride;
- int plane_height;
- int plane_width;
-
- /***********/
- /* Y Plane */
- /***********/
- Border = ybf->border;
- plane_stride = ybf->y_stride;
- plane_height = ybf->y_height;
- plane_width = ybf->y_width;
-
- /* copy the left and right most columns out */
- src_ptr1 = ybf->y_buffer;
- src_ptr2 = src_ptr1 + plane_width - 1;
- dest_ptr1 = src_ptr1 - Border;
- dest_ptr2 = src_ptr2 + 1;
-
- for (i = 0; i < plane_height; i++) {
- vpx_memset(dest_ptr1, src_ptr1[0], Border);
- vpx_memset(dest_ptr2, src_ptr2[0], Border);
- src_ptr1 += plane_stride;
- src_ptr2 += plane_stride;
- dest_ptr1 += plane_stride;
- dest_ptr2 += plane_stride;
- }
-
- /* Now copy the top and bottom source lines into each line of the respective borders */
- src_ptr1 = ybf->y_buffer - Border;
- src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
- dest_ptr1 = src_ptr1 - (Border * plane_stride);
- dest_ptr2 = src_ptr2 + plane_stride;
-
- for (i = 0; i < (int)Border; i++) {
- vpx_memcpy(dest_ptr1, src_ptr1, plane_stride);
- vpx_memcpy(dest_ptr2, src_ptr2, plane_stride);
- dest_ptr1 += plane_stride;
- dest_ptr2 += plane_stride;
- }
-
- plane_stride /= 2;
- plane_height /= 2;
- plane_width /= 2;
- Border /= 2;
-
+void
+vp8_yv12_extend_frame_borders_c(YV12_BUFFER_CONFIG *ybf) {
+ assert(ybf->y_height - ybf->y_crop_height < 16);
+ assert(ybf->y_width - ybf->y_crop_width < 16);
+ assert(ybf->y_height - ybf->y_crop_height >= 0);
+ assert(ybf->y_width - ybf->y_crop_width >= 0);
+
+ extend_plane(ybf->y_buffer, ybf->y_stride,
+ ybf->y_crop_width, ybf->y_crop_height,
+ ybf->border, ybf->border,
+ ybf->border + ybf->y_height - ybf->y_crop_height,
+ ybf->border + ybf->y_width - ybf->y_crop_width);
+
+ extend_plane(ybf->u_buffer, ybf->uv_stride,
+ (ybf->y_crop_width + 1) / 2, (ybf->y_crop_height + 1) / 2,
+ ybf->border / 2, ybf->border / 2,
+ (ybf->border + ybf->y_height - ybf->y_crop_height + 1) / 2,
+ (ybf->border + ybf->y_width - ybf->y_crop_width + 1) / 2);
+
+ extend_plane(ybf->v_buffer, ybf->uv_stride,
+ (ybf->y_crop_width + 1) / 2, (ybf->y_crop_height + 1) / 2,
+ ybf->border / 2, ybf->border / 2,
+ (ybf->border + ybf->y_height - ybf->y_crop_height + 1) / 2,
+ (ybf->border + ybf->y_width - ybf->y_crop_width + 1) / 2);
}
-
+#if CONFIG_VP9
+void vp9_extend_frame_borders_c(YV12_BUFFER_CONFIG *ybf,
+ int subsampling_x, int subsampling_y) {
+ const int c_w = (ybf->y_crop_width + subsampling_x) >> subsampling_x;
+ const int c_h = (ybf->y_crop_height + subsampling_y) >> subsampling_y;
+ const int c_et = ybf->border >> subsampling_y;
+ const int c_el = ybf->border >> subsampling_x;
+ const int c_eb = (ybf->border + ybf->y_height - ybf->y_crop_height +
+ subsampling_y) >> subsampling_y;
+ const int c_er = (ybf->border + ybf->y_width - ybf->y_crop_width +
+ subsampling_x) >> subsampling_x;
+
+ assert(ybf->y_height - ybf->y_crop_height < 16);
+ assert(ybf->y_width - ybf->y_crop_width < 16);
+ assert(ybf->y_height - ybf->y_crop_height >= 0);
+ assert(ybf->y_width - ybf->y_crop_width >= 0);
+
+ extend_plane(ybf->y_buffer, ybf->y_stride,
+ ybf->y_crop_width, ybf->y_crop_height,
+ ybf->border, ybf->border,
+ ybf->border + ybf->y_height - ybf->y_crop_height,
+ ybf->border + ybf->y_width - ybf->y_crop_width);
+
+ extend_plane(ybf->u_buffer, ybf->uv_stride,
+ c_w, c_h, c_et, c_el, c_eb, c_er);
+
+ extend_plane(ybf->v_buffer, ybf->uv_stride,
+ c_w, c_h, c_et, c_el, c_eb, c_er);
+}
+#endif
/****************************************************************************
*
@@ -216,6 +148,14 @@ vp8_yv12_copy_frame_c(YV12_BUFFER_CONFIG *src_ybc,
int row;
unsigned char *source, *dest;
+#if 0
+ /* These assertions are valid in the codec, but the libvpx-tester uses
+ * this code slightly differently.
+ */
+ assert(src_ybc->y_width == dst_ybc->y_width);
+ assert(src_ybc->y_height == dst_ybc->y_height);
+#endif
+
source = src_ybc->y_buffer;
dest = dst_ybc->y_buffer;
diff --git a/libvpx/vpx_scale/generic/yv12extend_generic.h b/libvpx/vpx_scale/generic/yv12extend_generic.h
deleted file mode 100644
index cc2a554..0000000
--- a/libvpx/vpx_scale/generic/yv12extend_generic.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-
-#ifndef YV12_EXTEND_GENERIC_H
-#define YV12_EXTEND_GENERIC_H
-
-#include "vpx_scale/yv12config.h"
-
- void vp8_yv12_extend_frame_borders(YV12_BUFFER_CONFIG *ybf);
-
- /* Copy Y,U,V buffer data from src to dst, filling border of dst as well. */
- void vp8_yv12_copy_frame(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc);
-
- /* Copy Y buffer data from src_ybc to dst_ybc without filling border data */
- void vp8_yv12_copy_y_c(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc);
-
-#endif /* YV12_EXTEND_GENERIC_H */
diff --git a/libvpx/vpx_scale/include/generic/vpxscale_arbitrary.h b/libvpx/vpx_scale/include/generic/vpxscale_arbitrary.h
deleted file mode 100644
index c535252..0000000
--- a/libvpx/vpx_scale/include/generic/vpxscale_arbitrary.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-
-#ifndef __VPX_SCALE_ARBITRARY_H__
-#define __VPX_SCALE_ARBITRARY_H__
-
-#include "vpx_scale/yv12config.h"
-
-typedef struct {
- int in_width;
- int in_height;
-
- int out_width;
- int out_height;
- int max_usable_out_width;
-
- // numerator for the width and height
- int nw;
- int nh;
- int nh_uv;
-
- // output to input correspondance array
- short *l_w;
- short *l_h;
- short *l_h_uv;
-
- // polyphase coefficients
- short *c_w;
- short *c_h;
- short *c_h_uv;
-
- // buffer for horizontal filtering.
- unsigned char *hbuf;
- unsigned char *hbuf_uv;
-} BICUBIC_SCALER_STRUCT;
-
-int bicubic_coefficient_setup(int in_width, int in_height, int out_width, int out_height);
-int bicubic_scale(int in_width, int in_height, int in_stride,
- int out_width, int out_height, int out_stride,
- unsigned char *input_image, unsigned char *output_image);
-void bicubic_scale_frame_reset();
-void bicubic_scale_frame(YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst,
- int new_width, int new_height);
-void bicubic_coefficient_init();
-void bicubic_coefficient_destroy();
-
-#endif /* __VPX_SCALE_ARBITRARY_H__ */
diff --git a/libvpx/vpx_scale/include/generic/vpxscale_depricated.h b/libvpx/vpx_scale/include/generic/vpxscale_depricated.h
deleted file mode 100644
index 3f7fe0f..0000000
--- a/libvpx/vpx_scale/include/generic/vpxscale_depricated.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/****************************************************************************
-*
-* Module Title : postp.h
-*
-* Description : Post processor interface
-*
-****************************************************************************/
-#ifndef VPXSCALE_H
-#define VPXSCALE_H
-
-extern void (*vp8_vertical_band_4_5_scale)(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-extern void (*vp8_last_vertical_band_4_5_scale)(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-extern void (*vp8_vertical_band_3_5_scale)(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-extern void (*vp8_last_vertical_band_3_5_scale)(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-extern void (*vp8_horizontal_line_1_2_scale)(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-extern void (*vp8_horizontal_line_3_5_scale)(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-extern void (*vp8_horizontal_line_4_5_scale)(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-extern void (*vp8_vertical_band_1_2_scale)(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-extern void (*vp8_last_vertical_band_1_2_scale)(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-
-extern void dmachine_specific_config(int mmx_enabled, int xmm_enabled, int wmt_enabled);
-
-#endif
diff --git a/libvpx/vpx_scale/vpxscale.h b/libvpx/vpx_scale/vpx_scale.h
index 3c2194d..9ddf62e 100644
--- a/libvpx/vpx_scale/vpxscale.h
+++ b/libvpx/vpx_scale/vpx_scale.h
@@ -14,17 +14,7 @@
#include "vpx_scale/yv12config.h"
-extern void vp8_yv12_scale_or_center(YV12_BUFFER_CONFIG *src_yuv_config,
- YV12_BUFFER_CONFIG *dst_yuv_config,
- int expanded_frame_width,
- int expanded_frame_height,
- int scaling_mode,
- int HScale,
- int HRatio,
- int VScale,
- int VRatio);
-
-extern void vp8_scale_frame(YV12_BUFFER_CONFIG *src,
+extern void vpx_scale_frame(YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst,
unsigned char *temp_area,
unsigned char temp_height,
diff --git a/libvpx/vpx_scale/vpx_scale.mk b/libvpx/vpx_scale/vpx_scale.mk
index dc89478..76c11e7 100644
--- a/libvpx/vpx_scale/vpx_scale.mk
+++ b/libvpx/vpx_scale/vpx_scale.mk
@@ -1,12 +1,13 @@
SCALE_SRCS-yes += vpx_scale.mk
-SCALE_SRCS-yes += scale_mode.h
SCALE_SRCS-yes += yv12config.h
-SCALE_SRCS-yes += vpxscale.h
-SCALE_SRCS-yes += generic/vpxscale.c
+SCALE_SRCS-yes += vpx_scale.h
+SCALE_SRCS-yes += generic/vpx_scale.c
SCALE_SRCS-yes += generic/yv12config.c
SCALE_SRCS-yes += generic/yv12extend.c
-SCALE_SRCS-yes += generic/yv12extend_generic.h
SCALE_SRCS-$(CONFIG_SPATIAL_RESAMPLING) += generic/gen_scalers.c
+SCALE_SRCS-yes += vpx_scale_asm_offsets.c
+SCALE_SRCS-yes += vpx_scale_rtcd.c
+SCALE_SRCS-yes += vpx_scale_rtcd.sh
#neon
SCALE_SRCS-$(HAVE_NEON) += arm/neon/vp8_vpxyv12_copyframe_func_neon$(ASM)
@@ -16,3 +17,8 @@ SCALE_SRCS-$(HAVE_NEON) += arm/neon/vp8_vpxyv12_extendframeborders_neon$(ASM)
SCALE_SRCS-$(HAVE_NEON) += arm/neon/yv12extend_arm.c
SCALE_SRCS-no += $(SCALE_SRCS_REMOVE-yes)
+
+$(eval $(call asm_offsets_template,\
+ vpx_scale_asm_offsets.asm, vpx_scale/vpx_scale_asm_offsets.c))
+
+$(eval $(call rtcd_h_template,vpx_scale_rtcd,vpx_scale/vpx_scale_rtcd.sh))
diff --git a/libvpx/vp8/common/asm_com_offsets.c b/libvpx/vpx_scale/vpx_scale_asm_offsets.c
index ae22b5f..caa9e80 100644
--- a/libvpx/vp8/common/asm_com_offsets.c
+++ b/libvpx/vpx_scale/vpx_scale_asm_offsets.c
@@ -9,15 +9,10 @@
*/
-#include "vpx_config.h"
+#include "./vpx_config.h"
#include "vpx/vpx_codec.h"
#include "vpx_ports/asm_offsets.h"
#include "vpx_scale/yv12config.h"
-#include "vp8/common/blockd.h"
-
-#if CONFIG_POSTPROC
-#include "postproc.h"
-#endif /* CONFIG_POSTPROC */
BEGIN
@@ -34,38 +29,12 @@ DEFINE(yv12_buffer_config_v_buffer, offsetof(YV12_BUFFER_CONFIG, v_b
DEFINE(yv12_buffer_config_border, offsetof(YV12_BUFFER_CONFIG, border));
DEFINE(VP8BORDERINPIXELS_VAL, VP8BORDERINPIXELS);
-#if CONFIG_POSTPROC
-/* mfqe.c / filter_by_weight */
-DEFINE(MFQE_PRECISION_VAL, MFQE_PRECISION);
-#endif /* CONFIG_POSTPROC */
-
END
/* add asserts for any offset that is not supported by assembly code */
/* add asserts for any size that is not supported by assembly code */
-#if HAVE_MEDIA
-/* switch case in vp8_intra4x4_predict_armv6 is based on these enumerated values */
-ct_assert(B_DC_PRED, B_DC_PRED == 0);
-ct_assert(B_TM_PRED, B_TM_PRED == 1);
-ct_assert(B_VE_PRED, B_VE_PRED == 2);
-ct_assert(B_HE_PRED, B_HE_PRED == 3);
-ct_assert(B_LD_PRED, B_LD_PRED == 4);
-ct_assert(B_RD_PRED, B_RD_PRED == 5);
-ct_assert(B_VR_PRED, B_VR_PRED == 6);
-ct_assert(B_VL_PRED, B_VL_PRED == 7);
-ct_assert(B_HD_PRED, B_HD_PRED == 8);
-ct_assert(B_HU_PRED, B_HU_PRED == 9);
-#endif
-
#if HAVE_NEON
/* vp8_yv12_extend_frame_borders_neon makes several assumptions based on this */
ct_assert(VP8BORDERINPIXELS_VAL, VP8BORDERINPIXELS == 32)
#endif
-
-#if HAVE_SSE2
-#if CONFIG_POSTPROC
-/* vp8_filter_by_weight16x16 and 8x8 */
-ct_assert(MFQE_PRECISION_VAL, MFQE_PRECISION == 4)
-#endif /* CONFIG_POSTPROC */
-#endif /* HAVE_SSE2 */
diff --git a/libvpx/vpx_scale/vpx_scale_rtcd.c b/libvpx/vpx_scale/vpx_scale_rtcd.c
new file mode 100644
index 0000000..656a22f
--- /dev/null
+++ b/libvpx/vpx_scale/vpx_scale_rtcd.c
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "vpx_config.h"
+#define RTCD_C
+#include "vpx_scale_rtcd.h"
+#include "vpx_ports/vpx_once.h"
+
+void vpx_scale_rtcd()
+{
+ once(setup_rtcd_internal);
+}
diff --git a/libvpx/vpx_scale/vpx_scale_rtcd.sh b/libvpx/vpx_scale/vpx_scale_rtcd.sh
new file mode 100644
index 0000000..b4f8907
--- /dev/null
+++ b/libvpx/vpx_scale/vpx_scale_rtcd.sh
@@ -0,0 +1,31 @@
+vpx_scale_forward_decls() {
+cat <<EOF
+struct yv12_buffer_config;
+EOF
+}
+forward_decls vpx_scale_forward_decls
+
+# Scaler functions
+if [ "CONFIG_SPATIAL_RESAMPLING" != "yes" ]; then
+ prototype void vp8_horizontal_line_5_4_scale "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width"
+ prototype void vp8_vertical_band_5_4_scale "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
+ prototype void vp8_horizontal_line_5_3_scale "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width"
+ prototype void vp8_vertical_band_5_3_scale "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
+ prototype void vp8_horizontal_line_2_1_scale "const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width"
+ prototype void vp8_vertical_band_2_1_scale "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
+ prototype void vp8_vertical_band_2_1_scale_i "unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width"
+fi
+
+prototype void vp8_yv12_extend_frame_borders "struct yv12_buffer_config *ybf"
+specialize vp8_yv12_extend_frame_borders neon
+
+prototype void vp8_yv12_copy_frame "struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc"
+specialize vp8_yv12_copy_frame neon
+
+prototype void vp8_yv12_copy_y "struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc"
+specialize vp8_yv12_copy_y neon
+
+if [ "$CONFIG_VP9" = "yes" ]; then
+ prototype void vp9_extend_frame_borders "struct yv12_buffer_config *ybf, int subsampling_x, int subsampling_y"
+ specialize vp9_extend_frame_borders
+fi
diff --git a/libvpx/vpx_scale/win32/scaleopt.c b/libvpx/vpx_scale/win32/scaleopt.c
index 2d96cc7..4336ece 100644
--- a/libvpx/vpx_scale/win32/scaleopt.c
+++ b/libvpx/vpx_scale/win32/scaleopt.c
@@ -18,1184 +18,14 @@
****************************************************************************/
#include "pragmas.h"
-
-
/****************************************************************************
* Module Statics
****************************************************************************/
-__declspec(align(16)) const static unsigned short one_fifth[] = { 51, 51, 51, 51 };
-__declspec(align(16)) const static unsigned short two_fifths[] = { 102, 102, 102, 102 };
-__declspec(align(16)) const static unsigned short three_fifths[] = { 154, 154, 154, 154 };
-__declspec(align(16)) const static unsigned short four_fifths[] = { 205, 205, 205, 205 };
__declspec(align(16)) const static unsigned short round_values[] = { 128, 128, 128, 128 };
-__declspec(align(16)) const static unsigned short four_ones[] = { 1, 1, 1, 1};
-__declspec(align(16)) const static unsigned short const45_2[] = {205, 154, 102, 51 };
-__declspec(align(16)) const static unsigned short const45_1[] = { 51, 102, 154, 205 };
-__declspec(align(16)) const static unsigned char mask45[] = { 0, 0, 0, 0, 0, 0, 255, 0};
-__declspec(align(16)) const static unsigned short const35_2[] = { 154, 51, 205, 102 };
-__declspec(align(16)) const static unsigned short const35_1[] = { 102, 205, 51, 154 };
-
-
-#include "vpx_scale/vpxscale.h"
+#include "vpx_scale/vpx_scale.h"
#include "vpx_mem/vpx_mem.h"
-/****************************************************************************
- *
- * ROUTINE : horizontal_line_3_5_scale_mmx
- *
- * INPUTS : const unsigned char *source :
- * unsigned int source_width :
- * unsigned char *dest :
- * unsigned int dest_width :
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : 3 to 5 up-scaling of a horizontal line of pixels.
- *
- * SPECIAL NOTES : None.
- *
- ****************************************************************************/
-static
-void horizontal_line_3_5_scale_mmx
-(
- const unsigned char *source,
- unsigned int source_width,
- unsigned char *dest,
- unsigned int dest_width
-) {
- (void) dest_width;
-
- __asm {
-
- push ebx
-
- mov esi, source
- mov edi, dest
-
- mov ecx, source_width
- lea edx, [esi+ecx-3];
-
- movq mm5, const35_1 // mm5 = 66 xx cd xx 33 xx 9a xx
- movq mm6, const35_2 // mm6 = 9a xx 33 xx cd xx 66 xx
-
- movq mm4, round_values // mm4 = 80 xx 80 xx 80 xx 80 xx
- pxor mm7, mm7 // clear mm7
-
- horiz_line_3_5_loop:
-
- mov eax, DWORD PTR [esi] // eax = 00 01 02 03
- mov ebx, eax
-
- and ebx, 0xffff00 // ebx = xx 01 02 xx
- mov ecx, eax // ecx = 00 01 02 03
-
- and eax, 0xffff0000 // eax = xx xx 02 03
- xor ecx, eax // ecx = 00 01 xx xx
-
- shr ebx, 8 // ebx = 01 02 xx xx
- or eax, ebx // eax = 01 02 02 03
-
- shl ebx, 16 // ebx = xx xx 01 02
- movd mm1, eax // mm1 = 01 02 02 03 xx xx xx xx
-
- or ebx, ecx // ebx = 00 01 01 02
- punpcklbw mm1, mm7 // mm1 = 01 xx 02 xx 02 xx 03 xx
-
- movd mm0, ebx // mm0 = 00 01 01 02
- pmullw mm1, mm6 //
-
- punpcklbw mm0, mm7 // mm0 = 00 xx 01 xx 01 xx 02 xx
- pmullw mm0, mm5 //
-
- mov [edi], ebx // writeoutput 00 xx xx xx
- add esi, 3
-
- add edi, 5
- paddw mm0, mm1
-
- paddw mm0, mm4
- psrlw mm0, 8
-
- cmp esi, edx
- packuswb mm0, mm7
-
- movd DWORD Ptr [edi-4], mm0
- jl horiz_line_3_5_loop
-
-// Exit:
- mov eax, DWORD PTR [esi] // eax = 00 01 02 03
- mov ebx, eax
-
- and ebx, 0xffff00 // ebx = xx 01 02 xx
- mov ecx, eax // ecx = 00 01 02 03
-
- and eax, 0xffff0000 // eax = xx xx 02 03
- xor ecx, eax // ecx = 00 01 xx xx
-
- shr ebx, 8 // ebx = 01 02 xx xx
- or eax, ebx // eax = 01 02 02 03
-
- shl eax, 8 // eax = xx 01 02 02
- and eax, 0xffff0000 // eax = xx xx 02 02
-
- or eax, ebx // eax = 01 02 02 02
-
- shl ebx, 16 // ebx = xx xx 01 02
- movd mm1, eax // mm1 = 01 02 02 02 xx xx xx xx
-
- or ebx, ecx // ebx = 00 01 01 02
- punpcklbw mm1, mm7 // mm1 = 01 xx 02 xx 02 xx 02 xx
-
- movd mm0, ebx // mm0 = 00 01 01 02
- pmullw mm1, mm6 //
-
- punpcklbw mm0, mm7 // mm0 = 00 xx 01 xx 01 xx 02 xx
- pmullw mm0, mm5 //
-
- mov [edi], ebx // writeoutput 00 xx xx xx
- paddw mm0, mm1
-
- paddw mm0, mm4
- psrlw mm0, 8
-
- packuswb mm0, mm7
- movd DWORD Ptr [edi+1], mm0
-
- pop ebx
-
- }
-
-}
-
-
-/****************************************************************************
- *
- * ROUTINE : horizontal_line_4_5_scale_mmx
- *
- * INPUTS : const unsigned char *source :
- * unsigned int source_width :
- * unsigned char *dest :
- * unsigned int dest_width :
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : 4 to 5 up-scaling of a horizontal line of pixels.
- *
- * SPECIAL NOTES : None.
- *
- ****************************************************************************/
-static
-void horizontal_line_4_5_scale_mmx
-(
- const unsigned char *source,
- unsigned int source_width,
- unsigned char *dest,
- unsigned int dest_width
-) {
- (void)dest_width;
-
- __asm {
-
- mov esi, source
- mov edi, dest
-
- mov ecx, source_width
- lea edx, [esi+ecx-8];
-
- movq mm5, const45_1 // mm5 = 33 xx 66 xx 9a xx cd xx
- movq mm6, const45_2 // mm6 = cd xx 9a xx 66 xx 33 xx
-
- movq mm4, round_values // mm4 = 80 xx 80 xx 80 xx 80 xx
- pxor mm7, mm7 // clear mm7
-
- horiz_line_4_5_loop:
-
- movq mm0, QWORD PTR [esi] // mm0 = 00 01 02 03 04 05 06 07
- movq mm1, QWORD PTR [esi+1]; // mm1 = 01 02 03 04 05 06 07 08
-
- movq mm2, mm0 // mm2 = 00 01 02 03 04 05 06 07
- movq mm3, mm1 // mm3 = 01 02 03 04 05 06 07 08
-
- movd DWORD PTR [edi], mm0 // write output 00 xx xx xx
- punpcklbw mm0, mm7 // mm0 = 00 xx 01 xx 02 xx 03 xx
-
- punpcklbw mm1, mm7 // mm1 = 01 xx 02 xx 03 xx 04 xx
- pmullw mm0, mm5 // 00* 51 01*102 02*154 03*205
-
- pmullw mm1, mm6 // 01*205 02*154 03*102 04* 51
- punpckhbw mm2, mm7 // mm2 = 04 xx 05 xx 06 xx 07 xx
-
- movd DWORD PTR [edi+5], mm2 // write ouput 05 xx xx xx
- pmullw mm2, mm5 // 04* 51 05*102 06*154 07*205
-
- punpckhbw mm3, mm7 // mm3 = 05 xx 06 xx 07 xx 08 xx
- pmullw mm3, mm6 // 05*205 06*154 07*102 08* 51
-
- paddw mm0, mm1 // added round values
- paddw mm0, mm4
-
- psrlw mm0, 8 // output: 01 xx 02 xx 03 xx 04 xx
- packuswb mm0, mm7
-
- movd DWORD PTR [edi+1], mm0 // write output 01 02 03 04
- add edi, 10
-
- add esi, 8
- paddw mm2, mm3 //
-
- paddw mm2, mm4 // added round values
- cmp esi, edx
-
- psrlw mm2, 8
- packuswb mm2, mm7
-
- movd DWORD PTR [edi-4], mm2 // writeoutput 06 07 08 09
- jl horiz_line_4_5_loop
-
-// Exit:
- movq mm0, [esi] // mm0 = 00 01 02 03 04 05 06 07
- movq mm1, mm0 // mm1 = 00 01 02 03 04 05 06 07
-
- movq mm2, mm0 // mm2 = 00 01 02 03 04 05 06 07
- psrlq mm1, 8 // mm1 = 01 02 03 04 05 06 07 00
-
- movq mm3, mask45 // mm3 = 00 00 00 00 00 00 ff 00
- pand mm3, mm1 // mm3 = 00 00 00 00 00 00 07 00
-
- psllq mm3, 8 // mm3 = 00 00 00 00 00 00 00 07
- por mm1, mm3 // mm1 = 01 02 03 04 05 06 07 07
-
- movq mm3, mm1
-
- movd DWORD PTR [edi], mm0 // write output 00 xx xx xx
- punpcklbw mm0, mm7 // mm0 = 00 xx 01 xx 02 xx 03 xx
-
- punpcklbw mm1, mm7 // mm1 = 01 xx 02 xx 03 xx 04 xx
- pmullw mm0, mm5 // 00* 51 01*102 02*154 03*205
-
- pmullw mm1, mm6 // 01*205 02*154 03*102 04* 51
- punpckhbw mm2, mm7 // mm2 = 04 xx 05 xx 06 xx 07 xx
-
- movd DWORD PTR [edi+5], mm2 // write ouput 05 xx xx xx
- pmullw mm2, mm5 // 04* 51 05*102 06*154 07*205
-
- punpckhbw mm3, mm7 // mm3 = 05 xx 06 xx 07 xx 08 xx
- pmullw mm3, mm6 // 05*205 06*154 07*102 07* 51
-
- paddw mm0, mm1 // added round values
- paddw mm0, mm4
-
- psrlw mm0, 8 // output: 01 xx 02 xx 03 xx 04 xx
- packuswb mm0, mm7 // 01 02 03 04 xx xx xx xx
-
- movd DWORD PTR [edi+1], mm0 // write output 01 02 03 04
- paddw mm2, mm3 //
-
- paddw mm2, mm4 // added round values
- psrlw mm2, 8
-
- packuswb mm2, mm7
- movd DWORD PTR [edi+6], mm2 // writeoutput 06 07 08 09
-
-
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : vertical_band_4_5_scale_mmx
- *
- * INPUTS : unsigned char *dest :
- * unsigned int dest_pitch :
- * unsigned int dest_width :
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : 4 to 5 up-scaling of a 4 pixel high band of pixels.
- *
- * SPECIAL NOTES : The routine uses the first line of the band below
- * the current band. The function also has a "C" only
- * version.
- *
- ****************************************************************************/
-static
-void vertical_band_4_5_scale_mmx
-(
- unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width
-) {
- __asm {
-
- mov esi, dest // Get the source and destination pointer
- mov ecx, dest_pitch // Get the pitch size
-
- lea edi, [esi+ecx*2] // tow lines below
- add edi, ecx // three lines below
-
- pxor mm7, mm7 // clear out mm7
- mov edx, dest_width // Loop counter
-
- vs_4_5_loop:
-
- movq mm0, QWORD ptr [esi] // src[0];
- movq mm1, QWORD ptr [esi+ecx] // src[1];
-
- movq mm2, mm0 // Make a copy
- punpcklbw mm0, mm7 // unpack low to word
-
- movq mm5, one_fifth
- punpckhbw mm2, mm7 // unpack high to word
-
- pmullw mm0, mm5 // a * 1/5
-
- movq mm3, mm1 // make a copy
- punpcklbw mm1, mm7 // unpack low to word
-
- pmullw mm2, mm5 // a * 1/5
- movq mm6, four_fifths // constan
-
- movq mm4, mm1 // copy of low b
- pmullw mm4, mm6 // b * 4/5
-
- punpckhbw mm3, mm7 // unpack high to word
- movq mm5, mm3 // copy of high b
-
- pmullw mm5, mm6 // b * 4/5
- paddw mm0, mm4 // a * 1/5 + b * 4/5
-
- paddw mm2, mm5 // a * 1/5 + b * 4/5
- paddw mm0, round_values // + 128
-
- paddw mm2, round_values // + 128
- psrlw mm0, 8
-
- psrlw mm2, 8
- packuswb mm0, mm2 // des [1]
-
- movq QWORD ptr [esi+ecx], mm0 // write des[1]
- movq mm0, [esi+ecx*2] // mm0 = src[2]
-
- // mm1, mm3 --- Src[1]
- // mm0 --- Src[2]
- // mm7 for unpacking
-
- movq mm5, two_fifths
- movq mm2, mm0 // make a copy
-
- pmullw mm1, mm5 // b * 2/5
- movq mm6, three_fifths
-
-
- punpcklbw mm0, mm7 // unpack low to word
- pmullw mm3, mm5 // b * 2/5
-
- movq mm4, mm0 // make copy of c
- punpckhbw mm2, mm7 // unpack high to word
-
- pmullw mm4, mm6 // c * 3/5
- movq mm5, mm2
-
- pmullw mm5, mm6 // c * 3/5
- paddw mm1, mm4 // b * 2/5 + c * 3/5
-
- paddw mm3, mm5 // b * 2/5 + c * 3/5
- paddw mm1, round_values // + 128
-
- paddw mm3, round_values // + 128
- psrlw mm1, 8
-
- psrlw mm3, 8
- packuswb mm1, mm3 // des[2]
-
- movq QWORD ptr [esi+ecx*2], mm1 // write des[2]
- movq mm1, [edi] // mm1=Src[3];
-
- // mm0, mm2 --- Src[2]
- // mm1 --- Src[3]
- // mm6 --- 3/5
- // mm7 for unpacking
-
- pmullw mm0, mm6 // c * 3/5
- movq mm5, two_fifths // mm5 = 2/5
-
- movq mm3, mm1 // make a copy
- pmullw mm2, mm6 // c * 3/5
-
- punpcklbw mm1, mm7 // unpack low
- movq mm4, mm1 // make a copy
-
- punpckhbw mm3, mm7 // unpack high
- pmullw mm4, mm5 // d * 2/5
-
- movq mm6, mm3 // make a copy
- pmullw mm6, mm5 // d * 2/5
-
- paddw mm0, mm4 // c * 3/5 + d * 2/5
- paddw mm2, mm6 // c * 3/5 + d * 2/5
-
- paddw mm0, round_values // + 128
- paddw mm2, round_values // + 128
-
- psrlw mm0, 8
- psrlw mm2, 8
-
- packuswb mm0, mm2 // des[3]
- movq QWORD ptr [edi], mm0 // write des[3]
-
- // mm1, mm3 --- Src[3]
- // mm7 -- cleared for unpacking
-
- movq mm0, [edi+ecx*2] // mm0, Src[0] of the next group
-
- movq mm5, four_fifths // mm5 = 4/5
- pmullw mm1, mm5 // d * 4/5
-
- movq mm6, one_fifth // mm6 = 1/5
- movq mm2, mm0 // make a copy
-
- pmullw mm3, mm5 // d * 4/5
- punpcklbw mm0, mm7 // unpack low
-
- pmullw mm0, mm6 // an * 1/5
- punpckhbw mm2, mm7 // unpack high
-
- paddw mm1, mm0 // d * 4/5 + an * 1/5
- pmullw mm2, mm6 // an * 1/5
-
- paddw mm3, mm2 // d * 4/5 + an * 1/5
- paddw mm1, round_values // + 128
-
- paddw mm3, round_values // + 128
- psrlw mm1, 8
-
- psrlw mm3, 8
- packuswb mm1, mm3 // des[4]
-
- movq QWORD ptr [edi+ecx], mm1 // write des[4]
-
- add edi, 8
- add esi, 8
-
- sub edx, 8
- jg vs_4_5_loop
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : last_vertical_band_4_5_scale_mmx
- *
- * INPUTS : unsigned char *dest :
- * unsigned int dest_pitch :
- * unsigned int dest_width :
- *
- * OUTPUTS : None.
- *
- * RETURNS : None
- *
- * FUNCTION : 4 to 5 up-scaling of the last 4-pixel high band in an image.
- *
- * SPECIAL NOTES : The routine uses the first line of the band below
- * the current band. The function also has an "C" only
- * version.
- *
- ****************************************************************************/
-static
-void last_vertical_band_4_5_scale_mmx
-(
- unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width
-) {
- __asm {
- mov esi, dest // Get the source and destination pointer
- mov ecx, dest_pitch // Get the pitch size
-
- lea edi, [esi+ecx*2] // tow lines below
- add edi, ecx // three lines below
-
- pxor mm7, mm7 // clear out mm7
- mov edx, dest_width // Loop counter
-
- last_vs_4_5_loop:
-
- movq mm0, QWORD ptr [esi] // src[0];
- movq mm1, QWORD ptr [esi+ecx] // src[1];
-
- movq mm2, mm0 // Make a copy
- punpcklbw mm0, mm7 // unpack low to word
-
- movq mm5, one_fifth
- punpckhbw mm2, mm7 // unpack high to word
-
- pmullw mm0, mm5 // a * 1/5
-
- movq mm3, mm1 // make a copy
- punpcklbw mm1, mm7 // unpack low to word
-
- pmullw mm2, mm5 // a * 1/5
- movq mm6, four_fifths // constan
-
- movq mm4, mm1 // copy of low b
- pmullw mm4, mm6 // b * 4/5
-
- punpckhbw mm3, mm7 // unpack high to word
- movq mm5, mm3 // copy of high b
-
- pmullw mm5, mm6 // b * 4/5
- paddw mm0, mm4 // a * 1/5 + b * 4/5
-
- paddw mm2, mm5 // a * 1/5 + b * 4/5
- paddw mm0, round_values // + 128
-
- paddw mm2, round_values // + 128
- psrlw mm0, 8
-
- psrlw mm2, 8
- packuswb mm0, mm2 // des [1]
-
- movq QWORD ptr [esi+ecx], mm0 // write des[1]
- movq mm0, [esi+ecx*2] // mm0 = src[2]
-
- // mm1, mm3 --- Src[1]
- // mm0 --- Src[2]
- // mm7 for unpacking
-
- movq mm5, two_fifths
- movq mm2, mm0 // make a copy
-
- pmullw mm1, mm5 // b * 2/5
- movq mm6, three_fifths
-
-
- punpcklbw mm0, mm7 // unpack low to word
- pmullw mm3, mm5 // b * 2/5
-
- movq mm4, mm0 // make copy of c
- punpckhbw mm2, mm7 // unpack high to word
-
- pmullw mm4, mm6 // c * 3/5
- movq mm5, mm2
-
- pmullw mm5, mm6 // c * 3/5
- paddw mm1, mm4 // b * 2/5 + c * 3/5
-
- paddw mm3, mm5 // b * 2/5 + c * 3/5
- paddw mm1, round_values // + 128
-
- paddw mm3, round_values // + 128
- psrlw mm1, 8
-
- psrlw mm3, 8
- packuswb mm1, mm3 // des[2]
-
- movq QWORD ptr [esi+ecx*2], mm1 // write des[2]
- movq mm1, [edi] // mm1=Src[3];
-
- movq QWORD ptr [edi+ecx], mm1 // write des[4];
-
- // mm0, mm2 --- Src[2]
- // mm1 --- Src[3]
- // mm6 --- 3/5
- // mm7 for unpacking
-
- pmullw mm0, mm6 // c * 3/5
- movq mm5, two_fifths // mm5 = 2/5
-
- movq mm3, mm1 // make a copy
- pmullw mm2, mm6 // c * 3/5
-
- punpcklbw mm1, mm7 // unpack low
- movq mm4, mm1 // make a copy
-
- punpckhbw mm3, mm7 // unpack high
- pmullw mm4, mm5 // d * 2/5
-
- movq mm6, mm3 // make a copy
- pmullw mm6, mm5 // d * 2/5
-
- paddw mm0, mm4 // c * 3/5 + d * 2/5
- paddw mm2, mm6 // c * 3/5 + d * 2/5
-
- paddw mm0, round_values // + 128
- paddw mm2, round_values // + 128
-
- psrlw mm0, 8
- psrlw mm2, 8
-
- packuswb mm0, mm2 // des[3]
- movq QWORD ptr [edi], mm0 // write des[3]
-
- // mm1, mm3 --- Src[3]
- // mm7 -- cleared for unpacking
- add edi, 8
- add esi, 8
-
- sub edx, 8
- jg last_vs_4_5_loop
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : vertical_band_3_5_scale_mmx
- *
- * INPUTS : unsigned char *dest :
- * unsigned int dest_pitch :
- * unsigned int dest_width :
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : 3 to 5 up-scaling of a 3-pixel high band of pixels.
- *
- * SPECIAL NOTES : The routine uses the first line of the band below
- * the current band. The function also has an "C" only
- * version.
- *
- ****************************************************************************/
-static
-void vertical_band_3_5_scale_mmx
-(
- unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width
-) {
- __asm {
- mov esi, dest // Get the source and destination pointer
- mov ecx, dest_pitch // Get the pitch size
-
- lea edi, [esi+ecx*2] // tow lines below
- add edi, ecx // three lines below
-
- pxor mm7, mm7 // clear out mm7
- mov edx, dest_width // Loop counter
-
- vs_3_5_loop:
-
- movq mm0, QWORD ptr [esi] // src[0];
- movq mm1, QWORD ptr [esi+ecx] // src[1];
-
- movq mm2, mm0 // Make a copy
- punpcklbw mm0, mm7 // unpack low to word
-
- movq mm5, two_fifths // mm5 = 2/5
- punpckhbw mm2, mm7 // unpack high to word
-
- pmullw mm0, mm5 // a * 2/5
-
- movq mm3, mm1 // make a copy
- punpcklbw mm1, mm7 // unpack low to word
-
- pmullw mm2, mm5 // a * 2/5
- movq mm6, three_fifths // mm6 = 3/5
-
- movq mm4, mm1 // copy of low b
- pmullw mm4, mm6 // b * 3/5
-
- punpckhbw mm3, mm7 // unpack high to word
- movq mm5, mm3 // copy of high b
-
- pmullw mm5, mm6 // b * 3/5
- paddw mm0, mm4 // a * 2/5 + b * 3/5
-
- paddw mm2, mm5 // a * 2/5 + b * 3/5
- paddw mm0, round_values // + 128
-
- paddw mm2, round_values // + 128
- psrlw mm0, 8
-
- psrlw mm2, 8
- packuswb mm0, mm2 // des [1]
-
- movq QWORD ptr [esi+ecx], mm0 // write des[1]
- movq mm0, [esi+ecx*2] // mm0 = src[2]
-
- // mm1, mm3 --- Src[1]
- // mm0 --- Src[2]
- // mm7 for unpacking
-
- movq mm4, mm1 // b low
- pmullw mm1, four_fifths // b * 4/5 low
-
- movq mm5, mm3 // b high
- pmullw mm3, four_fifths // b * 4/5 high
-
- movq mm2, mm0 // c
- pmullw mm4, one_fifth // b * 1/5
-
- punpcklbw mm0, mm7 // c low
- pmullw mm5, one_fifth // b * 1/5
-
- movq mm6, mm0 // make copy of c low
- punpckhbw mm2, mm7 // c high
-
- pmullw mm6, one_fifth // c * 1/5 low
- movq mm7, mm2 // make copy of c high
-
- pmullw mm7, one_fifth // c * 1/5 high
- paddw mm1, mm6 // b * 4/5 + c * 1/5 low
-
- paddw mm3, mm7 // b * 4/5 + c * 1/5 high
- movq mm6, mm0 // make copy of c low
-
- pmullw mm6, four_fifths // c * 4/5 low
- movq mm7, mm2 // make copy of c high
-
- pmullw mm7, four_fifths // c * 4/5 high
-
- paddw mm4, mm6 // b * 1/5 + c * 4/5 low
- paddw mm5, mm7 // b * 1/5 + c * 4/5 high
-
- paddw mm1, round_values // + 128
- paddw mm3, round_values // + 128
-
- psrlw mm1, 8
- psrlw mm3, 8
-
- packuswb mm1, mm3 // des[2]
- movq QWORD ptr [esi+ecx*2], mm1 // write des[2]
-
- paddw mm4, round_values // + 128
- paddw mm5, round_values // + 128
-
- psrlw mm4, 8
- psrlw mm5, 8
-
- packuswb mm4, mm5 // des[3]
- movq QWORD ptr [edi], mm4 // write des[3]
-
- // mm0, mm2 --- Src[3]
-
- pxor mm7, mm7 // clear mm7 for unpacking
- movq mm1, [edi+ecx*2] // mm1 = Src[0] of the next group
-
- movq mm5, three_fifths // mm5 = 3/5
- pmullw mm0, mm5 // d * 3/5
-
- movq mm6, two_fifths // mm6 = 2/5
- movq mm3, mm1 // make a copy
-
- pmullw mm2, mm5 // d * 3/5
- punpcklbw mm1, mm7 // unpack low
-
- pmullw mm1, mm6 // an * 2/5
- punpckhbw mm3, mm7 // unpack high
-
- paddw mm0, mm1 // d * 3/5 + an * 2/5
- pmullw mm3, mm6 // an * 2/5
-
- paddw mm2, mm3 // d * 3/5 + an * 2/5
- paddw mm0, round_values // + 128
-
- paddw mm2, round_values // + 128
- psrlw mm0, 8
-
- psrlw mm2, 8
- packuswb mm0, mm2 // des[4]
-
- movq QWORD ptr [edi+ecx], mm0 // write des[4]
-
- add edi, 8
- add esi, 8
-
- sub edx, 8
- jg vs_3_5_loop
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : last_vertical_band_3_5_scale_mmx
- *
- * INPUTS : unsigned char *dest :
- * unsigned int dest_pitch :
- * unsigned int dest_width :
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : 3 to 5 up-scaling of a 3-pixel high band of pixels.
- *
- * SPECIAL NOTES : The routine uses the first line of the band below
- * the current band. The function also has an "C" only
- * version.
- *
- ****************************************************************************/
-static
-void last_vertical_band_3_5_scale_mmx
-(
- unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width
-) {
- __asm {
- mov esi, dest // Get the source and destination pointer
- mov ecx, dest_pitch // Get the pitch size
-
- lea edi, [esi+ecx*2] // tow lines below
- add edi, ecx // three lines below
-
- pxor mm7, mm7 // clear out mm7
- mov edx, dest_width // Loop counter
-
-
- last_vs_3_5_loop:
-
- movq mm0, QWORD ptr [esi] // src[0];
- movq mm1, QWORD ptr [esi+ecx] // src[1];
-
- movq mm2, mm0 // Make a copy
- punpcklbw mm0, mm7 // unpack low to word
-
- movq mm5, two_fifths // mm5 = 2/5
- punpckhbw mm2, mm7 // unpack high to word
-
- pmullw mm0, mm5 // a * 2/5
-
- movq mm3, mm1 // make a copy
- punpcklbw mm1, mm7 // unpack low to word
-
- pmullw mm2, mm5 // a * 2/5
- movq mm6, three_fifths // mm6 = 3/5
-
- movq mm4, mm1 // copy of low b
- pmullw mm4, mm6 // b * 3/5
-
- punpckhbw mm3, mm7 // unpack high to word
- movq mm5, mm3 // copy of high b
-
- pmullw mm5, mm6 // b * 3/5
- paddw mm0, mm4 // a * 2/5 + b * 3/5
-
- paddw mm2, mm5 // a * 2/5 + b * 3/5
- paddw mm0, round_values // + 128
-
- paddw mm2, round_values // + 128
- psrlw mm0, 8
-
- psrlw mm2, 8
- packuswb mm0, mm2 // des [1]
-
- movq QWORD ptr [esi+ecx], mm0 // write des[1]
- movq mm0, [esi+ecx*2] // mm0 = src[2]
-
-
-
- // mm1, mm3 --- Src[1]
- // mm0 --- Src[2]
- // mm7 for unpacking
-
- movq mm4, mm1 // b low
- pmullw mm1, four_fifths // b * 4/5 low
-
- movq QWORD ptr [edi+ecx], mm0 // write des[4]
-
- movq mm5, mm3 // b high
- pmullw mm3, four_fifths // b * 4/5 high
-
- movq mm2, mm0 // c
- pmullw mm4, one_fifth // b * 1/5
-
- punpcklbw mm0, mm7 // c low
- pmullw mm5, one_fifth // b * 1/5
-
- movq mm6, mm0 // make copy of c low
- punpckhbw mm2, mm7 // c high
-
- pmullw mm6, one_fifth // c * 1/5 low
- movq mm7, mm2 // make copy of c high
-
- pmullw mm7, one_fifth // c * 1/5 high
- paddw mm1, mm6 // b * 4/5 + c * 1/5 low
-
- paddw mm3, mm7 // b * 4/5 + c * 1/5 high
- movq mm6, mm0 // make copy of c low
-
- pmullw mm6, four_fifths // c * 4/5 low
- movq mm7, mm2 // make copy of c high
-
- pmullw mm7, four_fifths // c * 4/5 high
-
- paddw mm4, mm6 // b * 1/5 + c * 4/5 low
- paddw mm5, mm7 // b * 1/5 + c * 4/5 high
-
- paddw mm1, round_values // + 128
- paddw mm3, round_values // + 128
-
- psrlw mm1, 8
- psrlw mm3, 8
-
- packuswb mm1, mm3 // des[2]
- movq QWORD ptr [esi+ecx*2], mm1 // write des[2]
-
- paddw mm4, round_values // + 128
- paddw mm5, round_values // + 128
-
- psrlw mm4, 8
- psrlw mm5, 8
-
- packuswb mm4, mm5 // des[3]
- movq QWORD ptr [edi], mm4 // write des[3]
-
- // mm0, mm2 --- Src[3]
-
- add edi, 8
- add esi, 8
-
- sub edx, 8
- jg last_vs_3_5_loop
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : vertical_band_1_2_scale_mmx
- *
- * INPUTS : unsigned char *dest :
- * unsigned int dest_pitch :
- * unsigned int dest_width :
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : 1 to 2 up-scaling of a band of pixels.
- *
- * SPECIAL NOTES : The routine uses the first line of the band below
- * the current band. The function also has an "C" only
- * version.
- *
- ****************************************************************************/
-static
-void vertical_band_1_2_scale_mmx
-(
- unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width
-) {
- __asm {
-
- mov esi, dest // Get the source and destination pointer
- mov ecx, dest_pitch // Get the pitch size
-
- pxor mm7, mm7 // clear out mm7
- mov edx, dest_width // Loop counter
-
- vs_1_2_loop:
-
- movq mm0, [esi] // get Src[0]
- movq mm1, [esi + ecx * 2] // get Src[1]
-
- movq mm2, mm0 // make copy before unpack
- movq mm3, mm1 // make copy before unpack
-
- punpcklbw mm0, mm7 // low Src[0]
- movq mm6, four_ones // mm6= 1, 1, 1, 1
-
- punpcklbw mm1, mm7 // low Src[1]
- paddw mm0, mm1 // low (a + b)
-
- punpckhbw mm2, mm7 // high Src[0]
- paddw mm0, mm6 // low (a + b + 1)
-
- punpckhbw mm3, mm7
- paddw mm2, mm3 // high (a + b )
-
- psraw mm0, 1 // low (a + b +1 )/2
- paddw mm2, mm6 // high (a + b + 1)
-
- psraw mm2, 1 // high (a + b + 1)/2
- packuswb mm0, mm2 // pack results
-
- movq [esi+ecx], mm0 // write out eight bytes
- add esi, 8
-
- sub edx, 8
- jg vs_1_2_loop
- }
-
-}
-
-/****************************************************************************
- *
- * ROUTINE : last_vertical_band_1_2_scale_mmx
- *
- * INPUTS : unsigned char *dest :
- * unsigned int dest_pitch :
- * unsigned int dest_width :
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : 1 to 2 up-scaling of band of pixels.
- *
- * SPECIAL NOTES : The routine uses the first line of the band below
- * the current band. The function also has an "C" only
- * version.
- *
- ****************************************************************************/
-static
-void last_vertical_band_1_2_scale_mmx
-(
- unsigned char *dest,
- unsigned int dest_pitch,
- unsigned int dest_width
-) {
- __asm {
- mov esi, dest // Get the source and destination pointer
- mov ecx, dest_pitch // Get the pitch size
-
- mov edx, dest_width // Loop counter
-
- last_vs_1_2_loop:
-
- movq mm0, [esi] // get Src[0]
- movq [esi+ecx], mm0 // write out eight bytes
-
- add esi, 8
- sub edx, 8
-
- jg last_vs_1_2_loop
- }
-}
-
-/****************************************************************************
- *
- * ROUTINE : horizontal_line_1_2_scale
- *
- * INPUTS : const unsigned char *source :
- * unsigned int source_width :
- * unsigned char *dest :
- * unsigned int dest_width :
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : 1 to 2 up-scaling of a horizontal line of pixels.
- *
- * SPECIAL NOTES : None.
- *
- ****************************************************************************/
-static
-void horizontal_line_1_2_scale_mmx
-(
- const unsigned char *source,
- unsigned int source_width,
- unsigned char *dest,
- unsigned int dest_width
-) {
- (void) dest_width;
-
- __asm {
- mov esi, source
- mov edi, dest
-
- pxor mm7, mm7
- movq mm6, four_ones
-
- mov ecx, source_width
-
- hs_1_2_loop:
-
- movq mm0, [esi]
- movq mm1, [esi+1]
-
- movq mm2, mm0
- movq mm3, mm1
-
- movq mm4, mm0
- punpcklbw mm0, mm7
-
- punpcklbw mm1, mm7
- paddw mm0, mm1
-
- paddw mm0, mm6
- punpckhbw mm2, mm7
-
- punpckhbw mm3, mm7
- paddw mm2, mm3
-
- paddw mm2, mm6
- psraw mm0, 1
-
- psraw mm2, 1
- packuswb mm0, mm2
-
- movq mm2, mm4
- punpcklbw mm2, mm0
-
- movq [edi], mm2
- punpckhbw mm4, mm0
-
- movq [edi+8], mm4
- add esi, 8
-
- add edi, 16
- sub ecx, 8
-
- cmp ecx, 8
- jg hs_1_2_loop
-
-// last eight pixel
-
- movq mm0, [esi]
- movq mm1, mm0
-
- movq mm2, mm0
- movq mm3, mm1
-
- psrlq mm1, 8
- psrlq mm3, 56
-
- psllq mm3, 56
- por mm1, mm3
-
- movq mm3, mm1
- movq mm4, mm0
-
- punpcklbw mm0, mm7
- punpcklbw mm1, mm7
-
- paddw mm0, mm1
- paddw mm0, mm6
-
- punpckhbw mm2, mm7
- punpckhbw mm3, mm7
-
- paddw mm2, mm3
- paddw mm2, mm6
-
- psraw mm0, 1
- psraw mm2, 1
-
- packuswb mm0, mm2
- movq mm2, mm4
-
- punpcklbw mm2, mm0
- movq [edi], mm2
-
- punpckhbw mm4, mm0
- movq [edi+8], mm4
- }
-}
-
-
-
-
-
__declspec(align(16)) const static unsigned short const54_2[] = { 0, 64, 128, 192 };
__declspec(align(16)) const static unsigned short const54_1[] = {256, 192, 128, 64 };
@@ -1685,25 +515,6 @@ void vertical_band_2_1_scale_i_mmx(unsigned char *source, unsigned int src_pitch
void
register_mmxscalers(void) {
- vp8_horizontal_line_1_2_scale = horizontal_line_1_2_scale_mmx;
- vp8_vertical_band_1_2_scale = vertical_band_1_2_scale_mmx;
- vp8_last_vertical_band_1_2_scale = last_vertical_band_1_2_scale_mmx;
- vp8_horizontal_line_3_5_scale = horizontal_line_3_5_scale_mmx;
- vp8_vertical_band_3_5_scale = vertical_band_3_5_scale_mmx;
- vp8_last_vertical_band_3_5_scale = last_vertical_band_3_5_scale_mmx;
- vp8_horizontal_line_4_5_scale = horizontal_line_4_5_scale_mmx;
- vp8_vertical_band_4_5_scale = vertical_band_4_5_scale_mmx;
- vp8_last_vertical_band_4_5_scale = last_vertical_band_4_5_scale_mmx;
-
- vp8_horizontal_line_3_4_scale = vp8cx_horizontal_line_3_4_scale_c;
- vp8_vertical_band_3_4_scale = vp8cx_vertical_band_3_4_scale_c;
- vp8_last_vertical_band_3_4_scale = vp8cx_last_vertical_band_3_4_scale_c;
- vp8_horizontal_line_2_3_scale = vp8cx_horizontal_line_2_3_scale_c;
- vp8_vertical_band_2_3_scale = vp8cx_vertical_band_2_3_scale_c;
- vp8_last_vertical_band_2_3_scale = vp8cx_last_vertical_band_2_3_scale_c;
-
-
-
vp8_vertical_band_5_4_scale = vertical_band_5_4_scale_mmx;
vp8_vertical_band_5_3_scale = vertical_band_5_3_scale_mmx;
vp8_vertical_band_2_1_scale = vertical_band_2_1_scale_mmx;
@@ -1711,8 +522,4 @@ register_mmxscalers(void) {
vp8_horizontal_line_2_1_scale = horizontal_line_2_1_scale_mmx;
vp8_horizontal_line_5_3_scale = horizontal_line_5_3_scale_mmx;
vp8_horizontal_line_5_4_scale = horizontal_line_5_4_scale_mmx;
-
-
-
-
}
diff --git a/libvpx/vpx_scale/win32/scalesystemdependent.c b/libvpx/vpx_scale/win32/scalesystemdependent.c
deleted file mode 100644
index 98913d1..0000000
--- a/libvpx/vpx_scale/win32/scalesystemdependent.c
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-
-/****************************************************************************
-*
-* Module Title : system_dependent.c
-*
-* Description : Miscellaneous system dependent functions
-*
-****************************************************************************/
-
-/****************************************************************************
-* Header Files
-****************************************************************************/
-#include "vpx_scale/vpxscale.h"
-#include "cpuidlib.h"
-
-/****************************************************************************
-* Imports
-*****************************************************************************/
-extern void register_generic_scalers(void);
-extern void register_mmxscalers(void);
-
-/****************************************************************************
- *
- * ROUTINE : post_proc_machine_specific_config
- *
- * INPUTS : UINT32 Version : Codec version number.
- *
- * OUTPUTS : None.
- *
- * RETURNS : void
- *
- * FUNCTION : Checks for machine specifc features such as MMX support
- * sets appropriate flags and function pointers.
- *
- * SPECIAL NOTES : None.
- *
- ****************************************************************************/
-void
-vp8_scale_machine_specific_config(void) {
- // If MMX supported then set to use MMX versions of functions else
- // use original 'C' versions.
- int mmx_enabled;
- int xmm_enabled;
- int wmt_enabled;
-
- vpx_get_processor_flags(&mmx_enabled, &xmm_enabled, &wmt_enabled);
-
- if (mmx_enabled || xmm_enabled || wmt_enabled) {
- register_mmxscalers();
- } else {
- vp8_horizontal_line_1_2_scale = vp8cx_horizontal_line_1_2_scale_c;
- vp8_vertical_band_1_2_scale = vp8cx_vertical_band_1_2_scale_c;
- vp8_last_vertical_band_1_2_scale = vp8cx_last_vertical_band_1_2_scale_c;
- vp8_horizontal_line_3_5_scale = vp8cx_horizontal_line_3_5_scale_c;
- vp8_vertical_band_3_5_scale = vp8cx_vertical_band_3_5_scale_c;
- vp8_last_vertical_band_3_5_scale = vp8cx_last_vertical_band_3_5_scale_c;
- vp8_horizontal_line_3_4_scale = vp8cx_horizontal_line_3_4_scale_c;
- vp8_vertical_band_3_4_scale = vp8cx_vertical_band_3_4_scale_c;
- vp8_last_vertical_band_3_4_scale = vp8cx_last_vertical_band_3_4_scale_c;
- vp8_horizontal_line_2_3_scale = vp8cx_horizontal_line_2_3_scale_c;
- vp8_vertical_band_2_3_scale = vp8cx_vertical_band_2_3_scale_c;
- vp8_last_vertical_band_2_3_scale = vp8cx_last_vertical_band_2_3_scale_c;
- vp8_horizontal_line_4_5_scale = vp8cx_horizontal_line_4_5_scale_c;
- vp8_vertical_band_4_5_scale = vp8cx_vertical_band_4_5_scale_c;
- vp8_last_vertical_band_4_5_scale = vp8cx_last_vertical_band_4_5_scale_c;
-
-
- vp8_vertical_band_5_4_scale = vp8cx_vertical_band_5_4_scale_c;
- vp8_vertical_band_5_3_scale = vp8cx_vertical_band_5_3_scale_c;
- vp8_vertical_band_2_1_scale = vp8cx_vertical_band_2_1_scale_c;
- vp8_vertical_band_2_1_scale_i = vp8cx_vertical_band_2_1_scale_i_c;
- vp8_horizontal_line_2_1_scale = vp8cx_horizontal_line_2_1_scale_c;
- vp8_horizontal_line_5_3_scale = vp8cx_horizontal_line_5_3_scale_c;
- vp8_horizontal_line_5_4_scale = vp8cx_horizontal_line_5_4_scale_c;
-
- }
-}
diff --git a/libvpx/vpx_scale/yv12config.h b/libvpx/vpx_scale/yv12config.h
index 6a8a1fc..7b8bd85 100644
--- a/libvpx/vpx_scale/yv12config.h
+++ b/libvpx/vpx_scale/yv12config.h
@@ -8,16 +8,17 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef YV12_CONFIG_H
#define YV12_CONFIG_H
+
#ifdef __cplusplus
-extern "C"
-{
+extern "C" {
#endif
+#include "vpx/vpx_integer.h"
+
#define VP8BORDERINPIXELS 32
-#define VP9BORDERINPIXELS 64
+#define VP9BORDERINPIXELS 96
#define VP9_INTERP_EXTEND 4
/*************************************
@@ -41,6 +42,8 @@ extern "C"
typedef struct yv12_buffer_config {
int y_width;
int y_height;
+ int y_crop_width;
+ int y_crop_height;
int y_stride;
/* int yinternal_width; */
@@ -49,11 +52,17 @@ extern "C"
int uv_stride;
/* int uvinternal_width; */
- unsigned char *y_buffer;
- unsigned char *u_buffer;
- unsigned char *v_buffer;
+ int alpha_width;
+ int alpha_height;
+ int alpha_stride;
- unsigned char *buffer_alloc;
+ uint8_t *y_buffer;
+ uint8_t *u_buffer;
+ uint8_t *v_buffer;
+ uint8_t *alpha_buffer;
+
+ uint8_t *buffer_alloc;
+ int buffer_alloc_sz;
int border;
int frame_size;
YUV_TYPE clrtype;
@@ -62,12 +71,22 @@ extern "C"
int flags;
} YV12_BUFFER_CONFIG;
- int vp8_yv12_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height, int border);
+ int vp8_yv12_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
+ int width, int height, int border);
+ int vp8_yv12_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
+ int width, int height, int border);
int vp8_yv12_de_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf);
+ int vp9_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
+ int width, int height, int ss_x, int ss_y,
+ int border);
+ int vp9_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
+ int width, int height, int ss_x, int ss_y,
+ int border);
+ int vp9_free_frame_buffer(YV12_BUFFER_CONFIG *ybf);
+
#ifdef __cplusplus
}
#endif
-
-#endif /*YV12_CONFIG_H*/
+#endif // YV12_CONFIG_H
diff --git a/libvpx/vpxdec.c b/libvpx/vpxdec.c
index 9b728bf..513d7bd 100644
--- a/libvpx/vpxdec.c
+++ b/libvpx/vpxdec.c
@@ -12,6 +12,7 @@
/* This is a simple program that reads ivf files and decodes them
* using the new interface. Decoded frames are output as YV12 raw.
*/
+#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
@@ -22,7 +23,7 @@
#include "vpx_config.h"
#include "vpx/vpx_decoder.h"
#include "vpx_ports/vpx_timer.h"
-#if CONFIG_VP8_DECODER
+#if CONFIG_VP8_DECODER || CONFIG_VP9_DECODER
#include "vpx/vp8dx.h"
#endif
#if CONFIG_MD5
@@ -30,6 +31,7 @@
#endif
#include "tools_common.h"
#include "nestegg/include/nestegg/nestegg.h"
+#include "third_party/libyuv/include/libyuv/scale.h"
#if CONFIG_OS_SUPPORT
#if defined(_MSC_VER)
@@ -49,175 +51,175 @@
static const char *exec_name;
#define VP8_FOURCC (0x00385056)
-static const struct
-{
- char const *name;
- vpx_codec_iface_t *iface;
- unsigned int fourcc;
- unsigned int fourcc_mask;
-} ifaces[] =
-{
+#define VP9_FOURCC (0x00395056)
+static const struct {
+ char const *name;
+ const vpx_codec_iface_t *(*iface)(void);
+ unsigned int fourcc;
+ unsigned int fourcc_mask;
+} ifaces[] = {
#if CONFIG_VP8_DECODER
- {"vp8", &vpx_codec_vp8_dx_algo, VP8_FOURCC, 0x00FFFFFF},
+ {"vp8", vpx_codec_vp8_dx, VP8_FOURCC, 0x00FFFFFF},
+#endif
+#if CONFIG_VP9_DECODER
+ {"vp9", vpx_codec_vp9_dx, VP9_FOURCC, 0x00FFFFFF},
#endif
};
#include "args.h"
+static const arg_def_t looparg = ARG_DEF(NULL, "loops", 1,
+ "Number of times to decode the file");
static const arg_def_t codecarg = ARG_DEF(NULL, "codec", 1,
- "Codec to use");
+ "Codec to use");
static const arg_def_t use_yv12 = ARG_DEF(NULL, "yv12", 0,
- "Output raw YV12 frames");
+ "Output raw YV12 frames");
static const arg_def_t use_i420 = ARG_DEF(NULL, "i420", 0,
- "Output raw I420 frames");
+ "Output raw I420 frames");
static const arg_def_t flipuvarg = ARG_DEF(NULL, "flipuv", 0,
- "Flip the chroma planes in the output");
+ "Flip the chroma planes in the output");
static const arg_def_t noblitarg = ARG_DEF(NULL, "noblit", 0,
- "Don't process the decoded frames");
+ "Don't process the decoded frames");
static const arg_def_t progressarg = ARG_DEF(NULL, "progress", 0,
- "Show progress after each frame decodes");
+ "Show progress after each frame decodes");
static const arg_def_t limitarg = ARG_DEF(NULL, "limit", 1,
- "Stop decoding after n frames");
+ "Stop decoding after n frames");
+static const arg_def_t skiparg = ARG_DEF(NULL, "skip", 1,
+ "Skip the first n input frames");
static const arg_def_t postprocarg = ARG_DEF(NULL, "postproc", 0,
- "Postprocess decoded frames");
+ "Postprocess decoded frames");
static const arg_def_t summaryarg = ARG_DEF(NULL, "summary", 0,
- "Show timing summary");
+ "Show timing summary");
static const arg_def_t outputfile = ARG_DEF("o", "output", 1,
- "Output file name pattern (see below)");
+ "Output file name pattern (see below)");
static const arg_def_t threadsarg = ARG_DEF("t", "threads", 1,
- "Max threads to use");
+ "Max threads to use");
static const arg_def_t verbosearg = ARG_DEF("v", "verbose", 0,
- "Show version string");
+ "Show version string");
static const arg_def_t error_concealment = ARG_DEF(NULL, "error-concealment", 0,
- "Enable decoder error-concealment");
+ "Enable decoder error-concealment");
+static const arg_def_t scalearg = ARG_DEF("S", "scale", 0,
+ "Scale output frames uniformly");
#if CONFIG_MD5
static const arg_def_t md5arg = ARG_DEF(NULL, "md5", 0,
"Compute the MD5 sum of the decoded frame");
#endif
-static const arg_def_t *all_args[] =
-{
- &codecarg, &use_yv12, &use_i420, &flipuvarg, &noblitarg,
- &progressarg, &limitarg, &postprocarg, &summaryarg, &outputfile,
- &threadsarg, &verbosearg,
+static const arg_def_t *all_args[] = {
+ &codecarg, &use_yv12, &use_i420, &flipuvarg, &noblitarg,
+ &progressarg, &limitarg, &skiparg, &postprocarg, &summaryarg, &outputfile,
+ &threadsarg, &verbosearg, &scalearg,
#if CONFIG_MD5
- &md5arg,
+ &md5arg,
#endif
- &error_concealment,
- NULL
+ &error_concealment,
+ NULL
};
#if CONFIG_VP8_DECODER
static const arg_def_t addnoise_level = ARG_DEF(NULL, "noise-level", 1,
- "Enable VP8 postproc add noise");
+ "Enable VP8 postproc add noise");
static const arg_def_t deblock = ARG_DEF(NULL, "deblock", 0,
- "Enable VP8 deblocking");
+ "Enable VP8 deblocking");
static const arg_def_t demacroblock_level = ARG_DEF(NULL, "demacroblock-level", 1,
- "Enable VP8 demacroblocking, w/ level");
+ "Enable VP8 demacroblocking, w/ level");
static const arg_def_t pp_debug_info = ARG_DEF(NULL, "pp-debug-info", 1,
- "Enable VP8 visible debug info");
+ "Enable VP8 visible debug info");
static const arg_def_t pp_disp_ref_frame = ARG_DEF(NULL, "pp-dbg-ref-frame", 1,
- "Display only selected reference frame per macro block");
+ "Display only selected reference frame per macro block");
static const arg_def_t pp_disp_mb_modes = ARG_DEF(NULL, "pp-dbg-mb-modes", 1,
- "Display only selected macro block modes");
+ "Display only selected macro block modes");
static const arg_def_t pp_disp_b_modes = ARG_DEF(NULL, "pp-dbg-b-modes", 1,
- "Display only selected block modes");
+ "Display only selected block modes");
static const arg_def_t pp_disp_mvs = ARG_DEF(NULL, "pp-dbg-mvs", 1,
- "Draw only selected motion vectors");
+ "Draw only selected motion vectors");
static const arg_def_t mfqe = ARG_DEF(NULL, "mfqe", 0,
- "Enable multiframe quality enhancement");
+ "Enable multiframe quality enhancement");
-static const arg_def_t *vp8_pp_args[] =
-{
- &addnoise_level, &deblock, &demacroblock_level, &pp_debug_info,
- &pp_disp_ref_frame, &pp_disp_mb_modes, &pp_disp_b_modes, &pp_disp_mvs, &mfqe,
- NULL
+static const arg_def_t *vp8_pp_args[] = {
+ &addnoise_level, &deblock, &demacroblock_level, &pp_debug_info,
+ &pp_disp_ref_frame, &pp_disp_mb_modes, &pp_disp_b_modes, &pp_disp_mvs, &mfqe,
+ NULL
};
#endif
-static void usage_exit()
-{
- int i;
+static void usage_exit() {
+ int i;
- fprintf(stderr, "Usage: %s <options> filename\n\n"
- "Options:\n", exec_name);
- arg_show_usage(stderr, all_args);
+ fprintf(stderr, "Usage: %s <options> filename\n\n"
+ "Options:\n", exec_name);
+ arg_show_usage(stderr, all_args);
#if CONFIG_VP8_DECODER
- fprintf(stderr, "\nVP8 Postprocessing Options:\n");
- arg_show_usage(stderr, vp8_pp_args);
+ fprintf(stderr, "\nVP8 Postprocessing Options:\n");
+ arg_show_usage(stderr, vp8_pp_args);
#endif
- fprintf(stderr,
- "\nOutput File Patterns:\n\n"
- " The -o argument specifies the name of the file(s) to "
- "write to. If the\n argument does not include any escape "
- "characters, the output will be\n written to a single file. "
- "Otherwise, the filename will be calculated by\n expanding "
- "the following escape characters:\n");
- fprintf(stderr,
- "\n\t%%w - Frame width"
- "\n\t%%h - Frame height"
- "\n\t%%<n> - Frame number, zero padded to <n> places (1..9)"
- "\n\n Pattern arguments are only supported in conjunction "
- "with the --yv12 and\n --i420 options. If the -o option is "
- "not specified, the output will be\n directed to stdout.\n"
- );
- fprintf(stderr, "\nIncluded decoders:\n\n");
-
- for (i = 0; i < sizeof(ifaces) / sizeof(ifaces[0]); i++)
- fprintf(stderr, " %-6s - %s\n",
- ifaces[i].name,
- vpx_codec_iface_name(ifaces[i].iface));
-
- exit(EXIT_FAILURE);
+ fprintf(stderr,
+ "\nOutput File Patterns:\n\n"
+ " The -o argument specifies the name of the file(s) to "
+ "write to. If the\n argument does not include any escape "
+ "characters, the output will be\n written to a single file. "
+ "Otherwise, the filename will be calculated by\n expanding "
+ "the following escape characters:\n");
+ fprintf(stderr,
+ "\n\t%%w - Frame width"
+ "\n\t%%h - Frame height"
+ "\n\t%%<n> - Frame number, zero padded to <n> places (1..9)"
+ "\n\n Pattern arguments are only supported in conjunction "
+ "with the --yv12 and\n --i420 options. If the -o option is "
+ "not specified, the output will be\n directed to stdout.\n"
+ );
+ fprintf(stderr, "\nIncluded decoders:\n\n");
+
+ for (i = 0; i < sizeof(ifaces) / sizeof(ifaces[0]); i++)
+ fprintf(stderr, " %-6s - %s\n",
+ ifaces[i].name,
+ vpx_codec_iface_name(ifaces[i].iface()));
+
+ exit(EXIT_FAILURE);
}
-void die(const char *fmt, ...)
-{
- va_list ap;
- va_start(ap, fmt);
- vfprintf(stderr, fmt, ap);
- fprintf(stderr, "\n");
- usage_exit();
+void die(const char *fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ fprintf(stderr, "\n");
+ usage_exit();
}
-static unsigned int mem_get_le16(const void *vmem)
-{
- unsigned int val;
- const unsigned char *mem = (const unsigned char *)vmem;
+static unsigned int mem_get_le16(const void *vmem) {
+ unsigned int val;
+ const unsigned char *mem = (const unsigned char *)vmem;
- val = mem[1] << 8;
- val |= mem[0];
- return val;
+ val = mem[1] << 8;
+ val |= mem[0];
+ return val;
}
-static unsigned int mem_get_le32(const void *vmem)
-{
- unsigned int val;
- const unsigned char *mem = (const unsigned char *)vmem;
+static unsigned int mem_get_le32(const void *vmem) {
+ unsigned int val;
+ const unsigned char *mem = (const unsigned char *)vmem;
- val = mem[3] << 24;
- val |= mem[2] << 16;
- val |= mem[1] << 8;
- val |= mem[0];
- return val;
+ val = mem[3] << 24;
+ val |= mem[2] << 16;
+ val |= mem[1] << 8;
+ val |= mem[0];
+ return val;
}
-enum file_kind
-{
- RAW_FILE,
- IVF_FILE,
- WEBM_FILE
+enum file_kind {
+ RAW_FILE,
+ IVF_FILE,
+ WEBM_FILE
};
-struct input_ctx
-{
- enum file_kind kind;
- FILE *infile;
- nestegg *nestegg_ctx;
- nestegg_packet *pkt;
- unsigned int chunk;
- unsigned int chunks;
- unsigned int video_track;
+struct input_ctx {
+ enum file_kind kind;
+ FILE *infile;
+ nestegg *nestegg_ctx;
+ nestegg_packet *pkt;
+ unsigned int chunk;
+ unsigned int chunks;
+ unsigned int video_track;
};
#define IVF_FRAME_HDR_SZ (sizeof(uint32_t) + sizeof(uint64_t))
@@ -225,163 +227,136 @@ struct input_ctx
static int read_frame(struct input_ctx *input,
uint8_t **buf,
size_t *buf_sz,
- size_t *buf_alloc_sz)
-{
- char raw_hdr[IVF_FRAME_HDR_SZ];
- size_t new_buf_sz;
- FILE *infile = input->infile;
- enum file_kind kind = input->kind;
- if(kind == WEBM_FILE)
- {
- if(input->chunk >= input->chunks)
- {
- unsigned int track;
-
- do
- {
- /* End of this packet, get another. */
- if(input->pkt)
- nestegg_free_packet(input->pkt);
-
- if(nestegg_read_packet(input->nestegg_ctx, &input->pkt) <= 0
- || nestegg_packet_track(input->pkt, &track))
- return 1;
-
- } while(track != input->video_track);
-
- if(nestegg_packet_count(input->pkt, &input->chunks))
- return 1;
- input->chunk = 0;
- }
+ size_t *buf_alloc_sz) {
+ char raw_hdr[IVF_FRAME_HDR_SZ];
+ size_t new_buf_sz;
+ FILE *infile = input->infile;
+ enum file_kind kind = input->kind;
+ if (kind == WEBM_FILE) {
+ if (input->chunk >= input->chunks) {
+ unsigned int track;
+
+ do {
+ /* End of this packet, get another. */
+ if (input->pkt)
+ nestegg_free_packet(input->pkt);
+
+ if (nestegg_read_packet(input->nestegg_ctx, &input->pkt) <= 0
+ || nestegg_packet_track(input->pkt, &track))
+ return 1;
+
+ } while (track != input->video_track);
+
+ if (nestegg_packet_count(input->pkt, &input->chunks))
+ return 1;
+ input->chunk = 0;
+ }
- if(nestegg_packet_data(input->pkt, input->chunk, buf, buf_sz))
- return 1;
- input->chunk++;
+ if (nestegg_packet_data(input->pkt, input->chunk, buf, buf_sz))
+ return 1;
+ input->chunk++;
- return 0;
- }
- /* For both the raw and ivf formats, the frame size is the first 4 bytes
- * of the frame header. We just need to special case on the header
- * size.
- */
- else if (fread(raw_hdr, kind==IVF_FILE
- ? IVF_FRAME_HDR_SZ : RAW_FRAME_HDR_SZ, 1, infile) != 1)
- {
- if (!feof(infile))
- fprintf(stderr, "Failed to read frame size\n");
+ return 0;
+ }
+ /* For both the raw and ivf formats, the frame size is the first 4 bytes
+ * of the frame header. We just need to special case on the header
+ * size.
+ */
+ else if (fread(raw_hdr, kind == IVF_FILE
+ ? IVF_FRAME_HDR_SZ : RAW_FRAME_HDR_SZ, 1, infile) != 1) {
+ if (!feof(infile))
+ fprintf(stderr, "Failed to read frame size\n");
- new_buf_sz = 0;
- }
- else
- {
- new_buf_sz = mem_get_le32(raw_hdr);
-
- if (new_buf_sz > 256 * 1024 * 1024)
- {
- fprintf(stderr, "Error: Read invalid frame size (%u)\n",
- (unsigned int)new_buf_sz);
- new_buf_sz = 0;
- }
+ new_buf_sz = 0;
+ } else {
+ new_buf_sz = mem_get_le32(raw_hdr);
- if (kind == RAW_FILE && new_buf_sz > 256 * 1024)
- fprintf(stderr, "Warning: Read invalid frame size (%u)"
- " - not a raw file?\n", (unsigned int)new_buf_sz);
-
- if (new_buf_sz > *buf_alloc_sz)
- {
- uint8_t *new_buf = realloc(*buf, 2 * new_buf_sz);
-
- if (new_buf)
- {
- *buf = new_buf;
- *buf_alloc_sz = 2 * new_buf_sz;
- }
- else
- {
- fprintf(stderr, "Failed to allocate compressed data buffer\n");
- new_buf_sz = 0;
- }
- }
+ if (new_buf_sz > 256 * 1024 * 1024) {
+ fprintf(stderr, "Error: Read invalid frame size (%u)\n",
+ (unsigned int)new_buf_sz);
+ new_buf_sz = 0;
}
- *buf_sz = new_buf_sz;
+ if (kind == RAW_FILE && new_buf_sz > 256 * 1024)
+ fprintf(stderr, "Warning: Read invalid frame size (%u)"
+ " - not a raw file?\n", (unsigned int)new_buf_sz);
- if (!feof(infile))
- {
- if (fread(*buf, 1, *buf_sz, infile) != *buf_sz)
- {
- fprintf(stderr, "Failed to read full frame\n");
- return 1;
- }
+ if (new_buf_sz > *buf_alloc_sz) {
+ uint8_t *new_buf = realloc(*buf, 2 * new_buf_sz);
- return 0;
+ if (new_buf) {
+ *buf = new_buf;
+ *buf_alloc_sz = 2 * new_buf_sz;
+ } else {
+ fprintf(stderr, "Failed to allocate compressed data buffer\n");
+ new_buf_sz = 0;
+ }
}
+ }
- return 1;
+ *buf_sz = new_buf_sz;
+
+ if (!feof(infile)) {
+ if (fread(*buf, 1, *buf_sz, infile) != *buf_sz) {
+ fprintf(stderr, "Failed to read full frame\n");
+ return 1;
+ }
+
+ return 0;
+ }
+
+ return 1;
}
-void *out_open(const char *out_fn, int do_md5)
-{
- void *out = NULL;
+void *out_open(const char *out_fn, int do_md5) {
+ void *out = NULL;
- if (do_md5)
- {
+ if (do_md5) {
#if CONFIG_MD5
- MD5Context *md5_ctx = out = malloc(sizeof(MD5Context));
- (void)out_fn;
- MD5Init(md5_ctx);
+ MD5Context *md5_ctx = out = malloc(sizeof(MD5Context));
+ (void)out_fn;
+ MD5Init(md5_ctx);
#endif
+ } else {
+ FILE *outfile = out = strcmp("-", out_fn) ? fopen(out_fn, "wb")
+ : set_binary_mode(stdout);
+
+ if (!outfile) {
+ fprintf(stderr, "Failed to output file");
+ exit(EXIT_FAILURE);
}
- else
- {
- FILE *outfile = out = strcmp("-", out_fn) ? fopen(out_fn, "wb")
- : set_binary_mode(stdout);
-
- if (!outfile)
- {
- fprintf(stderr, "Failed to output file");
- exit(EXIT_FAILURE);
- }
- }
+ }
- return out;
+ return out;
}
-void out_put(void *out, const uint8_t *buf, unsigned int len, int do_md5)
-{
- if (do_md5)
- {
+void out_put(void *out, const uint8_t *buf, unsigned int len, int do_md5) {
+ if (do_md5) {
#if CONFIG_MD5
- MD5Update(out, buf, len);
+ MD5Update(out, buf, len);
#endif
- }
- else
- {
- (void) fwrite(buf, 1, len, out);
- }
+ } else {
+ (void) fwrite(buf, 1, len, out);
+ }
}
-void out_close(void *out, const char *out_fn, int do_md5)
-{
- if (do_md5)
- {
+void out_close(void *out, const char *out_fn, int do_md5) {
+ if (do_md5) {
#if CONFIG_MD5
- uint8_t md5[16];
- int i;
+ uint8_t md5[16];
+ int i;
- MD5Final(md5, out);
- free(out);
+ MD5Final(md5, out);
+ free(out);
- for (i = 0; i < 16; i++)
- printf("%02x", md5[i]);
+ for (i = 0; i < 16; i++)
+ printf("%02x", md5[i]);
- printf(" %s\n", out_fn);
+ printf(" %s\n", out_fn);
#endif
- }
- else
- {
- fclose(out);
- }
+ } else {
+ fclose(out);
+ }
}
unsigned int file_is_ivf(FILE *infile,
@@ -389,56 +364,50 @@ unsigned int file_is_ivf(FILE *infile,
unsigned int *width,
unsigned int *height,
unsigned int *fps_den,
- unsigned int *fps_num)
-{
- char raw_hdr[32];
- int is_ivf = 0;
-
- if (fread(raw_hdr, 1, 32, infile) == 32)
- {
- if (raw_hdr[0] == 'D' && raw_hdr[1] == 'K'
- && raw_hdr[2] == 'I' && raw_hdr[3] == 'F')
- {
- is_ivf = 1;
-
- if (mem_get_le16(raw_hdr + 4) != 0)
- fprintf(stderr, "Error: Unrecognized IVF version! This file may not"
- " decode properly.");
-
- *fourcc = mem_get_le32(raw_hdr + 8);
- *width = mem_get_le16(raw_hdr + 12);
- *height = mem_get_le16(raw_hdr + 14);
- *fps_num = mem_get_le32(raw_hdr + 16);
- *fps_den = mem_get_le32(raw_hdr + 20);
-
- /* Some versions of vpxenc used 1/(2*fps) for the timebase, so
- * we can guess the framerate using only the timebase in this
- * case. Other files would require reading ahead to guess the
- * timebase, like we do for webm.
- */
- if(*fps_num < 1000)
- {
- /* Correct for the factor of 2 applied to the timebase in the
- * encoder.
- */
- if(*fps_num&1)*fps_den<<=1;
- else *fps_num>>=1;
- }
- else
- {
- /* Don't know FPS for sure, and don't have readahead code
- * (yet?), so just default to 30fps.
- */
- *fps_num = 30;
- *fps_den = 1;
- }
- }
+ unsigned int *fps_num) {
+ char raw_hdr[32];
+ int is_ivf = 0;
+
+ if (fread(raw_hdr, 1, 32, infile) == 32) {
+ if (raw_hdr[0] == 'D' && raw_hdr[1] == 'K'
+ && raw_hdr[2] == 'I' && raw_hdr[3] == 'F') {
+ is_ivf = 1;
+
+ if (mem_get_le16(raw_hdr + 4) != 0)
+ fprintf(stderr, "Error: Unrecognized IVF version! This file may not"
+ " decode properly.");
+
+ *fourcc = mem_get_le32(raw_hdr + 8);
+ *width = mem_get_le16(raw_hdr + 12);
+ *height = mem_get_le16(raw_hdr + 14);
+ *fps_num = mem_get_le32(raw_hdr + 16);
+ *fps_den = mem_get_le32(raw_hdr + 20);
+
+ /* Some versions of vpxenc used 1/(2*fps) for the timebase, so
+ * we can guess the framerate using only the timebase in this
+ * case. Other files would require reading ahead to guess the
+ * timebase, like we do for webm.
+ */
+ if (*fps_num < 1000) {
+ /* Correct for the factor of 2 applied to the timebase in the
+ * encoder.
+ */
+ if (*fps_num & 1)*fps_den <<= 1;
+ else *fps_num >>= 1;
+ } else {
+ /* Don't know FPS for sure, and don't have readahead code
+ * (yet?), so just default to 30fps.
+ */
+ *fps_num = 30;
+ *fps_den = 1;
+ }
}
+ }
- if (!is_ivf)
- rewind(infile);
+ if (!is_ivf)
+ rewind(infile);
- return is_ivf;
+ return is_ivf;
}
@@ -447,126 +416,121 @@ unsigned int file_is_raw(FILE *infile,
unsigned int *width,
unsigned int *height,
unsigned int *fps_den,
- unsigned int *fps_num)
-{
- unsigned char buf[32];
- int is_raw = 0;
- vpx_codec_stream_info_t si;
-
- si.sz = sizeof(si);
-
- if (fread(buf, 1, 32, infile) == 32)
- {
- int i;
-
- if(mem_get_le32(buf) < 256 * 1024 * 1024)
- for (i = 0; i < sizeof(ifaces) / sizeof(ifaces[0]); i++)
- if(!vpx_codec_peek_stream_info(ifaces[i].iface,
- buf + 4, 32 - 4, &si))
- {
- is_raw = 1;
- *fourcc = ifaces[i].fourcc;
- *width = si.w;
- *height = si.h;
- *fps_num = 30;
- *fps_den = 1;
- break;
- }
- }
+ unsigned int *fps_num) {
+ unsigned char buf[32];
+ int is_raw = 0;
+ vpx_codec_stream_info_t si;
- rewind(infile);
- return is_raw;
+ si.sz = sizeof(si);
+
+ if (fread(buf, 1, 32, infile) == 32) {
+ int i;
+
+ if (mem_get_le32(buf) < 256 * 1024 * 1024)
+ for (i = 0; i < sizeof(ifaces) / sizeof(ifaces[0]); i++)
+ if (!vpx_codec_peek_stream_info(ifaces[i].iface(),
+ buf + 4, 32 - 4, &si)) {
+ is_raw = 1;
+ *fourcc = ifaces[i].fourcc;
+ *width = si.w;
+ *height = si.h;
+ *fps_num = 30;
+ *fps_den = 1;
+ break;
+ }
+ }
+
+ rewind(infile);
+ return is_raw;
}
static int
-nestegg_read_cb(void *buffer, size_t length, void *userdata)
-{
- FILE *f = userdata;
-
- if(fread(buffer, 1, length, f) < length)
- {
- if (ferror(f))
- return -1;
- if (feof(f))
- return 0;
- }
- return 1;
+nestegg_read_cb(void *buffer, size_t length, void *userdata) {
+ FILE *f = userdata;
+
+ if (fread(buffer, 1, length, f) < length) {
+ if (ferror(f))
+ return -1;
+ if (feof(f))
+ return 0;
+ }
+ return 1;
}
static int
-nestegg_seek_cb(int64_t offset, int whence, void * userdata)
-{
- switch(whence) {
- case NESTEGG_SEEK_SET: whence = SEEK_SET; break;
- case NESTEGG_SEEK_CUR: whence = SEEK_CUR; break;
- case NESTEGG_SEEK_END: whence = SEEK_END; break;
- };
- return fseek(userdata, (long)offset, whence)? -1 : 0;
+nestegg_seek_cb(int64_t offset, int whence, void *userdata) {
+ switch (whence) {
+ case NESTEGG_SEEK_SET:
+ whence = SEEK_SET;
+ break;
+ case NESTEGG_SEEK_CUR:
+ whence = SEEK_CUR;
+ break;
+ case NESTEGG_SEEK_END:
+ whence = SEEK_END;
+ break;
+ };
+ return fseek(userdata, (long)offset, whence) ? -1 : 0;
}
static int64_t
-nestegg_tell_cb(void * userdata)
-{
- return ftell(userdata);
+nestegg_tell_cb(void *userdata) {
+ return ftell(userdata);
}
static void
-nestegg_log_cb(nestegg * context, unsigned int severity, char const * format,
- ...)
-{
- va_list ap;
-
- va_start(ap, format);
- vfprintf(stderr, format, ap);
- fprintf(stderr, "\n");
- va_end(ap);
+nestegg_log_cb(nestegg *context, unsigned int severity, char const *format,
+ ...) {
+ va_list ap;
+
+ va_start(ap, format);
+ vfprintf(stderr, format, ap);
+ fprintf(stderr, "\n");
+ va_end(ap);
}
static int
webm_guess_framerate(struct input_ctx *input,
unsigned int *fps_den,
- unsigned int *fps_num)
-{
- unsigned int i;
- uint64_t tstamp=0;
-
- /* Guess the framerate. Read up to 1 second, or 50 video packets,
- * whichever comes first.
- */
- for(i=0; tstamp < 1000000000 && i < 50;)
- {
- nestegg_packet * pkt;
- unsigned int track;
-
- if(nestegg_read_packet(input->nestegg_ctx, &pkt) <= 0)
- break;
-
- nestegg_packet_track(pkt, &track);
- if(track == input->video_track)
- {
- nestegg_packet_tstamp(pkt, &tstamp);
- i++;
- }
+ unsigned int *fps_num) {
+ unsigned int i;
+ uint64_t tstamp = 0;
+
+ /* Guess the framerate. Read up to 1 second, or 50 video packets,
+ * whichever comes first.
+ */
+ for (i = 0; tstamp < 1000000000 && i < 50;) {
+ nestegg_packet *pkt;
+ unsigned int track;
+
+ if (nestegg_read_packet(input->nestegg_ctx, &pkt) <= 0)
+ break;
- nestegg_free_packet(pkt);
+ nestegg_packet_track(pkt, &track);
+ if (track == input->video_track) {
+ nestegg_packet_tstamp(pkt, &tstamp);
+ i++;
}
- if(nestegg_track_seek(input->nestegg_ctx, input->video_track, 0))
- goto fail;
+ nestegg_free_packet(pkt);
+ }
- *fps_num = (i - 1) * 1000000;
- *fps_den = (unsigned int)(tstamp / 1000);
- return 0;
+ if (nestegg_track_seek(input->nestegg_ctx, input->video_track, 0))
+ goto fail;
+
+ *fps_num = (i - 1) * 1000000;
+ *fps_den = (unsigned int)(tstamp / 1000);
+ return 0;
fail:
- nestegg_destroy(input->nestegg_ctx);
- input->nestegg_ctx = NULL;
- rewind(input->infile);
- return 1;
+ nestegg_destroy(input->nestegg_ctx);
+ input->nestegg_ctx = NULL;
+ rewind(input->infile);
+ return 1;
}
@@ -576,586 +540,641 @@ file_is_webm(struct input_ctx *input,
unsigned int *width,
unsigned int *height,
unsigned int *fps_den,
- unsigned int *fps_num)
-{
- unsigned int i, n;
- int track_type = -1;
+ unsigned int *fps_num) {
+ unsigned int i, n;
+ int track_type = -1;
+ int codec_id;
- nestegg_io io = {nestegg_read_cb, nestegg_seek_cb, nestegg_tell_cb, 0};
- nestegg_video_params params;
+ nestegg_io io = {nestegg_read_cb, nestegg_seek_cb, nestegg_tell_cb, 0};
+ nestegg_video_params params;
- io.userdata = input->infile;
- if(nestegg_init(&input->nestegg_ctx, io, NULL))
- goto fail;
+ io.userdata = input->infile;
+ if (nestegg_init(&input->nestegg_ctx, io, NULL))
+ goto fail;
- if(nestegg_track_count(input->nestegg_ctx, &n))
- goto fail;
+ if (nestegg_track_count(input->nestegg_ctx, &n))
+ goto fail;
- for(i=0; i<n; i++)
- {
- track_type = nestegg_track_type(input->nestegg_ctx, i);
+ for (i = 0; i < n; i++) {
+ track_type = nestegg_track_type(input->nestegg_ctx, i);
- if(track_type == NESTEGG_TRACK_VIDEO)
- break;
- else if(track_type < 0)
- goto fail;
- }
-
- if(nestegg_track_codec_id(input->nestegg_ctx, i) != NESTEGG_CODEC_VP8)
- {
- fprintf(stderr, "Not VP8 video, quitting.\n");
- exit(1);
- }
+ if (track_type == NESTEGG_TRACK_VIDEO)
+ break;
+ else if (track_type < 0)
+ goto fail;
+ }
- input->video_track = i;
-
- if(nestegg_track_video_params(input->nestegg_ctx, i, &params))
- goto fail;
-
- *fps_den = 0;
- *fps_num = 0;
+ codec_id = nestegg_track_codec_id(input->nestegg_ctx, i);
+ if (codec_id == NESTEGG_CODEC_VP8) {
*fourcc = VP8_FOURCC;
- *width = params.width;
- *height = params.height;
- return 1;
+ } else if (codec_id == NESTEGG_CODEC_VP9) {
+ *fourcc = VP9_FOURCC;
+ } else {
+ fprintf(stderr, "Not VPx video, quitting.\n");
+ exit(1);
+ }
+
+ input->video_track = i;
+
+ if (nestegg_track_video_params(input->nestegg_ctx, i, &params))
+ goto fail;
+
+ *fps_den = 0;
+ *fps_num = 0;
+ *width = params.width;
+ *height = params.height;
+ return 1;
fail:
- input->nestegg_ctx = NULL;
- rewind(input->infile);
- return 0;
+ input->nestegg_ctx = NULL;
+ rewind(input->infile);
+ return 0;
}
-void show_progress(int frame_in, int frame_out, unsigned long dx_time)
-{
- fprintf(stderr, "%d decoded frames/%d showed frames in %lu us (%.2f fps)\r",
- frame_in, frame_out, dx_time,
- (float)frame_out * 1000000.0 / (float)dx_time);
+void show_progress(int frame_in, int frame_out, unsigned long dx_time) {
+ fprintf(stderr, "%d decoded frames/%d showed frames in %lu us (%.2f fps)\r",
+ frame_in, frame_out, dx_time,
+ (float)frame_out * 1000000.0 / (float)dx_time);
}
void generate_filename(const char *pattern, char *out, size_t q_len,
unsigned int d_w, unsigned int d_h,
- unsigned int frame_in)
-{
- const char *p = pattern;
- char *q = out;
-
- do
- {
- char *next_pat = strchr(p, '%');
-
- if(p == next_pat)
- {
- size_t pat_len;
-
- /* parse the pattern */
- q[q_len - 1] = '\0';
- switch(p[1])
- {
- case 'w': snprintf(q, q_len - 1, "%d", d_w); break;
- case 'h': snprintf(q, q_len - 1, "%d", d_h); break;
- case '1': snprintf(q, q_len - 1, "%d", frame_in); break;
- case '2': snprintf(q, q_len - 1, "%02d", frame_in); break;
- case '3': snprintf(q, q_len - 1, "%03d", frame_in); break;
- case '4': snprintf(q, q_len - 1, "%04d", frame_in); break;
- case '5': snprintf(q, q_len - 1, "%05d", frame_in); break;
- case '6': snprintf(q, q_len - 1, "%06d", frame_in); break;
- case '7': snprintf(q, q_len - 1, "%07d", frame_in); break;
- case '8': snprintf(q, q_len - 1, "%08d", frame_in); break;
- case '9': snprintf(q, q_len - 1, "%09d", frame_in); break;
- default:
- die("Unrecognized pattern %%%c\n", p[1]);
- }
-
- pat_len = strlen(q);
- if(pat_len >= q_len - 1)
- die("Output filename too long.\n");
- q += pat_len;
- p += 2;
- q_len -= pat_len;
- }
- else
- {
- size_t copy_len;
-
- /* copy the next segment */
- if(!next_pat)
- copy_len = strlen(p);
- else
- copy_len = next_pat - p;
-
- if(copy_len >= q_len - 1)
- die("Output filename too long.\n");
-
- memcpy(q, p, copy_len);
- q[copy_len] = '\0';
- q += copy_len;
- p += copy_len;
- q_len -= copy_len;
- }
- } while(*p);
+ unsigned int frame_in) {
+ const char *p = pattern;
+ char *q = out;
+
+ do {
+ char *next_pat = strchr(p, '%');
+
+ if (p == next_pat) {
+ size_t pat_len;
+
+ /* parse the pattern */
+ q[q_len - 1] = '\0';
+ switch (p[1]) {
+ case 'w':
+ snprintf(q, q_len - 1, "%d", d_w);
+ break;
+ case 'h':
+ snprintf(q, q_len - 1, "%d", d_h);
+ break;
+ case '1':
+ snprintf(q, q_len - 1, "%d", frame_in);
+ break;
+ case '2':
+ snprintf(q, q_len - 1, "%02d", frame_in);
+ break;
+ case '3':
+ snprintf(q, q_len - 1, "%03d", frame_in);
+ break;
+ case '4':
+ snprintf(q, q_len - 1, "%04d", frame_in);
+ break;
+ case '5':
+ snprintf(q, q_len - 1, "%05d", frame_in);
+ break;
+ case '6':
+ snprintf(q, q_len - 1, "%06d", frame_in);
+ break;
+ case '7':
+ snprintf(q, q_len - 1, "%07d", frame_in);
+ break;
+ case '8':
+ snprintf(q, q_len - 1, "%08d", frame_in);
+ break;
+ case '9':
+ snprintf(q, q_len - 1, "%09d", frame_in);
+ break;
+ default:
+ die("Unrecognized pattern %%%c\n", p[1]);
+ }
+
+ pat_len = strlen(q);
+ if (pat_len >= q_len - 1)
+ die("Output filename too long.\n");
+ q += pat_len;
+ p += 2;
+ q_len -= pat_len;
+ } else {
+ size_t copy_len;
+
+ /* copy the next segment */
+ if (!next_pat)
+ copy_len = strlen(p);
+ else
+ copy_len = next_pat - p;
+
+ if (copy_len >= q_len - 1)
+ die("Output filename too long.\n");
+
+ memcpy(q, p, copy_len);
+ q[copy_len] = '\0';
+ q += copy_len;
+ p += copy_len;
+ q_len -= copy_len;
+ }
+ } while (*p);
}
-int main(int argc, const char **argv_)
-{
- vpx_codec_ctx_t decoder;
- char *fn = NULL;
- int i;
- uint8_t *buf = NULL;
- size_t buf_sz = 0, buf_alloc_sz = 0;
- FILE *infile;
- int frame_in = 0, frame_out = 0, flipuv = 0, noblit = 0, do_md5 = 0, progress = 0;
- int stop_after = 0, postproc = 0, summary = 0, quiet = 1;
- int ec_enabled = 0;
- vpx_codec_iface_t *iface = NULL;
- unsigned int fourcc;
- unsigned long dx_time = 0;
- struct arg arg;
- char **argv, **argi, **argj;
- const char *outfile_pattern = 0;
- char outfile[PATH_MAX];
- int single_file;
- int use_y4m = 1;
- unsigned int width;
- unsigned int height;
- unsigned int fps_den;
- unsigned int fps_num;
- void *out = NULL;
- vpx_codec_dec_cfg_t cfg = {0};
+int main_loop(int argc, const char **argv_) {
+ vpx_codec_ctx_t decoder;
+ char *fn = NULL;
+ int i;
+ uint8_t *buf = NULL;
+ size_t buf_sz = 0, buf_alloc_sz = 0;
+ FILE *infile;
+ int frame_in = 0, frame_out = 0, flipuv = 0, noblit = 0, do_md5 = 0, progress = 0;
+ int stop_after = 0, postproc = 0, summary = 0, quiet = 1;
+ int arg_skip = 0;
+ int ec_enabled = 0;
+ vpx_codec_iface_t *iface = NULL;
+ unsigned int fourcc;
+ unsigned long dx_time = 0;
+ struct arg arg;
+ char **argv, **argi, **argj;
+ const char *outfile_pattern = 0;
+ char outfile[PATH_MAX];
+ int single_file;
+ int use_y4m = 1;
+ unsigned int width;
+ unsigned int height;
+ unsigned int fps_den;
+ unsigned int fps_num;
+ void *out = NULL;
+ vpx_codec_dec_cfg_t cfg = {0};
#if CONFIG_VP8_DECODER
- vp8_postproc_cfg_t vp8_pp_cfg = {0};
- int vp8_dbg_color_ref_frame = 0;
- int vp8_dbg_color_mb_modes = 0;
- int vp8_dbg_color_b_modes = 0;
- int vp8_dbg_display_mv = 0;
+ vp8_postproc_cfg_t vp8_pp_cfg = {0};
+ int vp8_dbg_color_ref_frame = 0;
+ int vp8_dbg_color_mb_modes = 0;
+ int vp8_dbg_color_b_modes = 0;
+ int vp8_dbg_display_mv = 0;
#endif
- struct input_ctx input = {0};
- int frames_corrupted = 0;
- int dec_flags = 0;
-
- /* Parse command line */
- exec_name = argv_[0];
- argv = argv_dup(argc - 1, argv_ + 1);
-
- for (argi = argj = argv; (*argj = *argi); argi += arg.argv_step)
- {
- memset(&arg, 0, sizeof(arg));
- arg.argv_step = 1;
-
- if (arg_match(&arg, &codecarg, argi))
- {
- int j, k = -1;
-
- for (j = 0; j < sizeof(ifaces) / sizeof(ifaces[0]); j++)
- if (!strcmp(ifaces[j].name, arg.val))
- k = j;
-
- if (k >= 0)
- iface = ifaces[k].iface;
- else
- die("Error: Unrecognized argument (%s) to --codec\n",
- arg.val);
- }
- else if (arg_match(&arg, &outputfile, argi))
- outfile_pattern = arg.val;
- else if (arg_match(&arg, &use_yv12, argi))
- {
- use_y4m = 0;
- flipuv = 1;
- }
- else if (arg_match(&arg, &use_i420, argi))
- {
- use_y4m = 0;
- flipuv = 0;
- }
- else if (arg_match(&arg, &flipuvarg, argi))
- flipuv = 1;
- else if (arg_match(&arg, &noblitarg, argi))
- noblit = 1;
- else if (arg_match(&arg, &progressarg, argi))
- progress = 1;
- else if (arg_match(&arg, &limitarg, argi))
- stop_after = arg_parse_uint(&arg);
- else if (arg_match(&arg, &postprocarg, argi))
- postproc = 1;
- else if (arg_match(&arg, &md5arg, argi))
- do_md5 = 1;
- else if (arg_match(&arg, &summaryarg, argi))
- summary = 1;
- else if (arg_match(&arg, &threadsarg, argi))
- cfg.threads = arg_parse_uint(&arg);
- else if (arg_match(&arg, &verbosearg, argi))
- quiet = 0;
+ struct input_ctx input = {0};
+ int frames_corrupted = 0;
+ int dec_flags = 0;
+ int do_scale = 0;
+ int stream_w = 0, stream_h = 0;
+ vpx_image_t *scaled_img = NULL;
+ int frame_avail, got_data;
+
+ /* Parse command line */
+ exec_name = argv_[0];
+ argv = argv_dup(argc - 1, argv_ + 1);
+
+ for (argi = argj = argv; (*argj = *argi); argi += arg.argv_step) {
+ memset(&arg, 0, sizeof(arg));
+ arg.argv_step = 1;
+
+ if (arg_match(&arg, &codecarg, argi)) {
+ int j, k = -1;
+
+ for (j = 0; j < sizeof(ifaces) / sizeof(ifaces[0]); j++)
+ if (!strcmp(ifaces[j].name, arg.val))
+ k = j;
+
+ if (k >= 0)
+ iface = ifaces[k].iface();
+ else
+ die("Error: Unrecognized argument (%s) to --codec\n",
+ arg.val);
+ } else if (arg_match(&arg, &looparg, argi)) {
+ // no-op
+ } else if (arg_match(&arg, &outputfile, argi))
+ outfile_pattern = arg.val;
+ else if (arg_match(&arg, &use_yv12, argi)) {
+ use_y4m = 0;
+ flipuv = 1;
+ } else if (arg_match(&arg, &use_i420, argi)) {
+ use_y4m = 0;
+ flipuv = 0;
+ } else if (arg_match(&arg, &flipuvarg, argi))
+ flipuv = 1;
+ else if (arg_match(&arg, &noblitarg, argi))
+ noblit = 1;
+ else if (arg_match(&arg, &progressarg, argi))
+ progress = 1;
+ else if (arg_match(&arg, &limitarg, argi))
+ stop_after = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &skiparg, argi))
+ arg_skip = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &postprocarg, argi))
+ postproc = 1;
+ else if (arg_match(&arg, &md5arg, argi))
+ do_md5 = 1;
+ else if (arg_match(&arg, &summaryarg, argi))
+ summary = 1;
+ else if (arg_match(&arg, &threadsarg, argi))
+ cfg.threads = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &verbosearg, argi))
+ quiet = 0;
+ else if (arg_match(&arg, &scalearg, argi))
+ do_scale = 1;
#if CONFIG_VP8_DECODER
- else if (arg_match(&arg, &addnoise_level, argi))
- {
- postproc = 1;
- vp8_pp_cfg.post_proc_flag |= VP8_ADDNOISE;
- vp8_pp_cfg.noise_level = arg_parse_uint(&arg);
- }
- else if (arg_match(&arg, &demacroblock_level, argi))
- {
- postproc = 1;
- vp8_pp_cfg.post_proc_flag |= VP8_DEMACROBLOCK;
- vp8_pp_cfg.deblocking_level = arg_parse_uint(&arg);
- }
- else if (arg_match(&arg, &deblock, argi))
- {
- postproc = 1;
- vp8_pp_cfg.post_proc_flag |= VP8_DEBLOCK;
- }
- else if (arg_match(&arg, &mfqe, argi))
- {
- postproc = 1;
- vp8_pp_cfg.post_proc_flag |= VP8_MFQE;
- }
- else if (arg_match(&arg, &pp_debug_info, argi))
- {
- unsigned int level = arg_parse_uint(&arg);
-
- postproc = 1;
- vp8_pp_cfg.post_proc_flag &= ~0x7;
-
- if (level)
- vp8_pp_cfg.post_proc_flag |= level;
- }
- else if (arg_match(&arg, &pp_disp_ref_frame, argi))
- {
- unsigned int flags = arg_parse_int(&arg);
- if (flags)
- {
- postproc = 1;
- vp8_dbg_color_ref_frame = flags;
- }
- }
- else if (arg_match(&arg, &pp_disp_mb_modes, argi))
- {
- unsigned int flags = arg_parse_int(&arg);
- if (flags)
- {
- postproc = 1;
- vp8_dbg_color_mb_modes = flags;
- }
- }
- else if (arg_match(&arg, &pp_disp_b_modes, argi))
- {
- unsigned int flags = arg_parse_int(&arg);
- if (flags)
- {
- postproc = 1;
- vp8_dbg_color_b_modes = flags;
- }
- }
- else if (arg_match(&arg, &pp_disp_mvs, argi))
- {
- unsigned int flags = arg_parse_int(&arg);
- if (flags)
- {
- postproc = 1;
- vp8_dbg_display_mv = flags;
- }
- }
- else if (arg_match(&arg, &error_concealment, argi))
- {
- ec_enabled = 1;
- }
+ else if (arg_match(&arg, &addnoise_level, argi)) {
+ postproc = 1;
+ vp8_pp_cfg.post_proc_flag |= VP8_ADDNOISE;
+ vp8_pp_cfg.noise_level = arg_parse_uint(&arg);
+ } else if (arg_match(&arg, &demacroblock_level, argi)) {
+ postproc = 1;
+ vp8_pp_cfg.post_proc_flag |= VP8_DEMACROBLOCK;
+ vp8_pp_cfg.deblocking_level = arg_parse_uint(&arg);
+ } else if (arg_match(&arg, &deblock, argi)) {
+ postproc = 1;
+ vp8_pp_cfg.post_proc_flag |= VP8_DEBLOCK;
+ } else if (arg_match(&arg, &mfqe, argi)) {
+ postproc = 1;
+ vp8_pp_cfg.post_proc_flag |= VP8_MFQE;
+ } else if (arg_match(&arg, &pp_debug_info, argi)) {
+ unsigned int level = arg_parse_uint(&arg);
+
+ postproc = 1;
+ vp8_pp_cfg.post_proc_flag &= ~0x7;
+
+ if (level)
+ vp8_pp_cfg.post_proc_flag |= level;
+ } else if (arg_match(&arg, &pp_disp_ref_frame, argi)) {
+ unsigned int flags = arg_parse_int(&arg);
+ if (flags) {
+ postproc = 1;
+ vp8_dbg_color_ref_frame = flags;
+ }
+ } else if (arg_match(&arg, &pp_disp_mb_modes, argi)) {
+ unsigned int flags = arg_parse_int(&arg);
+ if (flags) {
+ postproc = 1;
+ vp8_dbg_color_mb_modes = flags;
+ }
+ } else if (arg_match(&arg, &pp_disp_b_modes, argi)) {
+ unsigned int flags = arg_parse_int(&arg);
+ if (flags) {
+ postproc = 1;
+ vp8_dbg_color_b_modes = flags;
+ }
+ } else if (arg_match(&arg, &pp_disp_mvs, argi)) {
+ unsigned int flags = arg_parse_int(&arg);
+ if (flags) {
+ postproc = 1;
+ vp8_dbg_display_mv = flags;
+ }
+ } else if (arg_match(&arg, &error_concealment, argi)) {
+ ec_enabled = 1;
+ }
#endif
- else
- argj++;
- }
+ else
+ argj++;
+ }
- /* Check for unrecognized options */
- for (argi = argv; *argi; argi++)
- if (argi[0][0] == '-' && strlen(argi[0]) > 1)
- die("Error: Unrecognized option %s\n", *argi);
+ /* Check for unrecognized options */
+ for (argi = argv; *argi; argi++)
+ if (argi[0][0] == '-' && strlen(argi[0]) > 1)
+ die("Error: Unrecognized option %s\n", *argi);
- /* Handle non-option arguments */
- fn = argv[0];
+ /* Handle non-option arguments */
+ fn = argv[0];
- if (!fn)
- usage_exit();
+ if (!fn)
+ usage_exit();
- /* Open file */
- infile = strcmp(fn, "-") ? fopen(fn, "rb") : set_binary_mode(stdin);
+ /* Open file */
+ infile = strcmp(fn, "-") ? fopen(fn, "rb") : set_binary_mode(stdin);
- if (!infile)
- {
- fprintf(stderr, "Failed to open file '%s'",
- strcmp(fn, "-") ? fn : "stdin");
- return EXIT_FAILURE;
- }
+ if (!infile) {
+ fprintf(stderr, "Failed to open file '%s'",
+ strcmp(fn, "-") ? fn : "stdin");
+ return EXIT_FAILURE;
+ }
#if CONFIG_OS_SUPPORT
- /* Make sure we don't dump to the terminal, unless forced to with -o - */
- if(!outfile_pattern && isatty(fileno(stdout)) && !do_md5 && !noblit)
- {
- fprintf(stderr,
- "Not dumping raw video to your terminal. Use '-o -' to "
- "override.\n");
- return EXIT_FAILURE;
- }
+ /* Make sure we don't dump to the terminal, unless forced to with -o - */
+ if (!outfile_pattern && isatty(fileno(stdout)) && !do_md5 && !noblit) {
+ fprintf(stderr,
+ "Not dumping raw video to your terminal. Use '-o -' to "
+ "override.\n");
+ return EXIT_FAILURE;
+ }
#endif
- input.infile = infile;
- if(file_is_ivf(infile, &fourcc, &width, &height, &fps_den,
- &fps_num))
- input.kind = IVF_FILE;
- else if(file_is_webm(&input, &fourcc, &width, &height, &fps_den, &fps_num))
- input.kind = WEBM_FILE;
- else if(file_is_raw(infile, &fourcc, &width, &height, &fps_den, &fps_num))
- input.kind = RAW_FILE;
- else
- {
- fprintf(stderr, "Unrecognized input file type.\n");
- return EXIT_FAILURE;
+ input.infile = infile;
+ if (file_is_ivf(infile, &fourcc, &width, &height, &fps_den,
+ &fps_num))
+ input.kind = IVF_FILE;
+ else if (file_is_webm(&input, &fourcc, &width, &height, &fps_den, &fps_num))
+ input.kind = WEBM_FILE;
+ else if (file_is_raw(infile, &fourcc, &width, &height, &fps_den, &fps_num))
+ input.kind = RAW_FILE;
+ else {
+ fprintf(stderr, "Unrecognized input file type.\n");
+ return EXIT_FAILURE;
+ }
+
+ /* If the output file is not set or doesn't have a sequence number in
+ * it, then we only open it once.
+ */
+ outfile_pattern = outfile_pattern ? outfile_pattern : "-";
+ single_file = 1;
+ {
+ const char *p = outfile_pattern;
+ do {
+ p = strchr(p, '%');
+ if (p && p[1] >= '1' && p[1] <= '9') {
+ /* pattern contains sequence number, so it's not unique. */
+ single_file = 0;
+ break;
+ }
+ if (p)
+ p++;
+ } while (p);
+ }
+
+ if (single_file && !noblit) {
+ generate_filename(outfile_pattern, outfile, sizeof(outfile) - 1,
+ width, height, 0);
+ out = out_open(outfile, do_md5);
+ }
+
+ if (use_y4m && !noblit) {
+ char buffer[128];
+
+ if (!single_file) {
+ fprintf(stderr, "YUV4MPEG2 not supported with output patterns,"
+ " try --i420 or --yv12.\n");
+ return EXIT_FAILURE;
}
- /* If the output file is not set or doesn't have a sequence number in
- * it, then we only open it once.
- */
- outfile_pattern = outfile_pattern ? outfile_pattern : "-";
- single_file = 1;
- {
- const char *p = outfile_pattern;
- do
- {
- p = strchr(p, '%');
- if(p && p[1] >= '1' && p[1] <= '9')
- {
- /* pattern contains sequence number, so it's not unique. */
- single_file = 0;
- break;
- }
- if(p)
- p++;
- } while(p);
- }
-
- if(single_file && !noblit)
- {
- generate_filename(outfile_pattern, outfile, sizeof(outfile)-1,
- width, height, 0);
- out = out_open(outfile, do_md5);
- }
-
- if (use_y4m && !noblit)
- {
- char buffer[128];
- if (!single_file)
- {
- fprintf(stderr, "YUV4MPEG2 not supported with output patterns,"
- " try --i420 or --yv12.\n");
- return EXIT_FAILURE;
- }
-
- if(input.kind == WEBM_FILE)
- if(webm_guess_framerate(&input, &fps_den, &fps_num))
- {
- fprintf(stderr, "Failed to guess framerate -- error parsing "
- "webm file?\n");
- return EXIT_FAILURE;
- }
-
-
- /*Note: We can't output an aspect ratio here because IVF doesn't
- store one, and neither does VP8.
- That will have to wait until these tools support WebM natively.*/
- sprintf(buffer, "YUV4MPEG2 C%s W%u H%u F%u:%u I%c\n",
- "420jpeg", width, height, fps_num, fps_den, 'p');
- out_put(out, (unsigned char *)buffer,
- (unsigned int)strlen(buffer), do_md5);
- }
-
- /* Try to determine the codec from the fourcc. */
- for (i = 0; i < sizeof(ifaces) / sizeof(ifaces[0]); i++)
- if ((fourcc & ifaces[i].fourcc_mask) == ifaces[i].fourcc)
- {
- vpx_codec_iface_t *ivf_iface = ifaces[i].iface;
-
- if (iface && iface != ivf_iface)
- fprintf(stderr, "Notice -- IVF header indicates codec: %s\n",
- ifaces[i].name);
- else
- iface = ivf_iface;
-
- break;
- }
-
- dec_flags = (postproc ? VPX_CODEC_USE_POSTPROC : 0) |
- (ec_enabled ? VPX_CODEC_USE_ERROR_CONCEALMENT : 0);
- if (vpx_codec_dec_init(&decoder, iface ? iface : ifaces[0].iface, &cfg,
- dec_flags))
- {
- fprintf(stderr, "Failed to initialize decoder: %s\n", vpx_codec_error(&decoder));
+ if (input.kind == WEBM_FILE)
+ if (webm_guess_framerate(&input, &fps_den, &fps_num)) {
+ fprintf(stderr, "Failed to guess framerate -- error parsing "
+ "webm file?\n");
return EXIT_FAILURE;
+ }
+
+
+ /*Note: We can't output an aspect ratio here because IVF doesn't
+ store one, and neither does VP8.
+ That will have to wait until these tools support WebM natively.*/
+ snprintf(buffer, sizeof(buffer), "YUV4MPEG2 W%u H%u F%u:%u I%c ",
+ width, height, fps_num, fps_den, 'p');
+ out_put(out, (unsigned char *)buffer,
+ (unsigned int)strlen(buffer), do_md5);
+ }
+
+ /* Try to determine the codec from the fourcc. */
+ for (i = 0; i < sizeof(ifaces) / sizeof(ifaces[0]); i++)
+ if ((fourcc & ifaces[i].fourcc_mask) == ifaces[i].fourcc) {
+ vpx_codec_iface_t *ivf_iface = ifaces[i].iface();
+
+ if (iface && iface != ivf_iface)
+ fprintf(stderr, "Notice -- IVF header indicates codec: %s\n",
+ ifaces[i].name);
+ else
+ iface = ivf_iface;
+
+ break;
}
- if (!quiet)
- fprintf(stderr, "%s\n", decoder.name);
+ dec_flags = (postproc ? VPX_CODEC_USE_POSTPROC : 0) |
+ (ec_enabled ? VPX_CODEC_USE_ERROR_CONCEALMENT : 0);
+ if (vpx_codec_dec_init(&decoder, iface ? iface : ifaces[0].iface(), &cfg,
+ dec_flags)) {
+ fprintf(stderr, "Failed to initialize decoder: %s\n", vpx_codec_error(&decoder));
+ return EXIT_FAILURE;
+ }
+
+ if (!quiet)
+ fprintf(stderr, "%s\n", decoder.name);
#if CONFIG_VP8_DECODER
- if (vp8_pp_cfg.post_proc_flag
- && vpx_codec_control(&decoder, VP8_SET_POSTPROC, &vp8_pp_cfg))
- {
- fprintf(stderr, "Failed to configure postproc: %s\n", vpx_codec_error(&decoder));
- return EXIT_FAILURE;
- }
+ if (vp8_pp_cfg.post_proc_flag
+ && vpx_codec_control(&decoder, VP8_SET_POSTPROC, &vp8_pp_cfg)) {
+ fprintf(stderr, "Failed to configure postproc: %s\n", vpx_codec_error(&decoder));
+ return EXIT_FAILURE;
+ }
+
+ if (vp8_dbg_color_ref_frame
+ && vpx_codec_control(&decoder, VP8_SET_DBG_COLOR_REF_FRAME, vp8_dbg_color_ref_frame)) {
+ fprintf(stderr, "Failed to configure reference block visualizer: %s\n", vpx_codec_error(&decoder));
+ return EXIT_FAILURE;
+ }
+
+ if (vp8_dbg_color_mb_modes
+ && vpx_codec_control(&decoder, VP8_SET_DBG_COLOR_MB_MODES, vp8_dbg_color_mb_modes)) {
+ fprintf(stderr, "Failed to configure macro block visualizer: %s\n", vpx_codec_error(&decoder));
+ return EXIT_FAILURE;
+ }
+
+ if (vp8_dbg_color_b_modes
+ && vpx_codec_control(&decoder, VP8_SET_DBG_COLOR_B_MODES, vp8_dbg_color_b_modes)) {
+ fprintf(stderr, "Failed to configure block visualizer: %s\n", vpx_codec_error(&decoder));
+ return EXIT_FAILURE;
+ }
+
+ if (vp8_dbg_display_mv
+ && vpx_codec_control(&decoder, VP8_SET_DBG_DISPLAY_MV, vp8_dbg_display_mv)) {
+ fprintf(stderr, "Failed to configure motion vector visualizer: %s\n", vpx_codec_error(&decoder));
+ return EXIT_FAILURE;
+ }
+#endif
- if (vp8_dbg_color_ref_frame
- && vpx_codec_control(&decoder, VP8_SET_DBG_COLOR_REF_FRAME, vp8_dbg_color_ref_frame))
- {
- fprintf(stderr, "Failed to configure reference block visualizer: %s\n", vpx_codec_error(&decoder));
- return EXIT_FAILURE;
- }
- if (vp8_dbg_color_mb_modes
- && vpx_codec_control(&decoder, VP8_SET_DBG_COLOR_MB_MODES, vp8_dbg_color_mb_modes))
- {
- fprintf(stderr, "Failed to configure macro block visualizer: %s\n", vpx_codec_error(&decoder));
- return EXIT_FAILURE;
- }
+ if(arg_skip)
+ fprintf(stderr, "Skiping first %d frames.\n", arg_skip);
+ while (arg_skip) {
+ if (read_frame(&input, &buf, &buf_sz, &buf_alloc_sz))
+ break;
+ arg_skip--;
+ }
- if (vp8_dbg_color_b_modes
- && vpx_codec_control(&decoder, VP8_SET_DBG_COLOR_B_MODES, vp8_dbg_color_b_modes))
- {
- fprintf(stderr, "Failed to configure block visualizer: %s\n", vpx_codec_error(&decoder));
- return EXIT_FAILURE;
- }
+ frame_avail = 1;
+ got_data = 0;
- if (vp8_dbg_display_mv
- && vpx_codec_control(&decoder, VP8_SET_DBG_DISPLAY_MV, vp8_dbg_display_mv))
- {
- fprintf(stderr, "Failed to configure motion vector visualizer: %s\n", vpx_codec_error(&decoder));
- return EXIT_FAILURE;
- }
-#endif
+ /* Decode file */
+ while (frame_avail || got_data) {
+ vpx_codec_iter_t iter = NULL;
+ vpx_image_t *img;
+ struct vpx_usec_timer timer;
+ int corrupted;
- /* Decode file */
- while (!read_frame(&input, &buf, &buf_sz, &buf_alloc_sz))
- {
- vpx_codec_iter_t iter = NULL;
- vpx_image_t *img;
- struct vpx_usec_timer timer;
- int corrupted;
+ frame_avail = 0;
+ if (!stop_after || frame_in < stop_after) {
+ if(!read_frame(&input, &buf, &buf_sz, &buf_alloc_sz)) {
+ frame_avail = 1;
+ frame_in++;
vpx_usec_timer_start(&timer);
- if (vpx_codec_decode(&decoder, buf, (unsigned int)buf_sz, NULL, 0))
- {
- const char *detail = vpx_codec_error_detail(&decoder);
- fprintf(stderr, "Failed to decode frame: %s\n", vpx_codec_error(&decoder));
+ if (vpx_codec_decode(&decoder, buf, (unsigned int)buf_sz, NULL, 0)) {
+ const char *detail = vpx_codec_error_detail(&decoder);
+ fprintf(stderr, "Failed to decode frame: %s\n",
+ vpx_codec_error(&decoder));
- if (detail)
- fprintf(stderr, " Additional information: %s\n", detail);
-
- goto fail;
+ if (detail)
+ fprintf(stderr, " Additional information: %s\n", detail);
+ goto fail;
}
vpx_usec_timer_mark(&timer);
dx_time += (unsigned int)vpx_usec_timer_elapsed(&timer);
+ }
+ }
- ++frame_in;
+ vpx_usec_timer_start(&timer);
- if (vpx_codec_control(&decoder, VP8D_GET_FRAME_CORRUPTED, &corrupted))
- {
- fprintf(stderr, "Failed VP8_GET_FRAME_CORRUPTED: %s\n",
- vpx_codec_error(&decoder));
- goto fail;
+ got_data = 0;
+ if ((img = vpx_codec_get_frame(&decoder, &iter))) {
+ ++frame_out;
+ got_data = 1;
+ }
+
+ vpx_usec_timer_mark(&timer);
+ dx_time += (unsigned int)vpx_usec_timer_elapsed(&timer);
+
+ if (vpx_codec_control(&decoder, VP8D_GET_FRAME_CORRUPTED, &corrupted)) {
+ fprintf(stderr, "Failed VP8_GET_FRAME_CORRUPTED: %s\n",
+ vpx_codec_error(&decoder));
+ goto fail;
+ }
+ frames_corrupted += corrupted;
+
+ if (progress)
+ show_progress(frame_in, frame_out, dx_time);
+
+ if (!noblit) {
+ if (frame_out == 1 && img && use_y4m) {
+ /* Write out the color format to terminate the header line */
+ const char *color =
+ img->fmt == VPX_IMG_FMT_444A ? "C444alpha\n" :
+ img->fmt == VPX_IMG_FMT_I444 ? "C444\n" :
+ img->fmt == VPX_IMG_FMT_I422 ? "C422\n" :
+ "C420jpeg\n";
+
+ out_put(out, (const unsigned char*)color, strlen(color), do_md5);
+ }
+
+ if (do_scale) {
+ if (img && frame_out == 1) {
+ stream_w = img->d_w;
+ stream_h = img->d_h;
+ scaled_img = vpx_img_alloc(NULL, VPX_IMG_FMT_I420,
+ stream_w, stream_h, 16);
+ }
+ if (img && (img->d_w != stream_w || img->d_h != stream_h)) {
+ assert(img->fmt == VPX_IMG_FMT_I420);
+ I420Scale(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
+ img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
+ img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
+ img->d_w, img->d_h,
+ scaled_img->planes[VPX_PLANE_Y],
+ scaled_img->stride[VPX_PLANE_Y],
+ scaled_img->planes[VPX_PLANE_U],
+ scaled_img->stride[VPX_PLANE_U],
+ scaled_img->planes[VPX_PLANE_V],
+ scaled_img->stride[VPX_PLANE_V],
+ stream_w, stream_h,
+ kFilterBox);
+ img = scaled_img;
+ }
+ }
+
+ if (img) {
+ unsigned int y;
+ char out_fn[PATH_MAX];
+ uint8_t *buf;
+ unsigned int c_w =
+ img->x_chroma_shift ? (1 + img->d_w) >> img->x_chroma_shift
+ : img->d_w;
+ unsigned int c_h =
+ img->y_chroma_shift ? (1 + img->d_h) >> img->y_chroma_shift
+ : img->d_h;
+
+ if (!single_file) {
+ size_t len = sizeof(out_fn) - 1;
+
+ out_fn[len] = '\0';
+ generate_filename(outfile_pattern, out_fn, len - 1,
+ img->d_w, img->d_h, frame_in);
+ out = out_open(out_fn, do_md5);
+ } else if (use_y4m)
+ out_put(out, (unsigned char *)"FRAME\n", 6, do_md5);
+
+ buf = img->planes[VPX_PLANE_Y];
+
+ for (y = 0; y < img->d_h; y++) {
+ out_put(out, buf, img->d_w, do_md5);
+ buf += img->stride[VPX_PLANE_Y];
}
- frames_corrupted += corrupted;
- vpx_usec_timer_start(&timer);
+ buf = img->planes[flipuv ? VPX_PLANE_V : VPX_PLANE_U];
- if ((img = vpx_codec_get_frame(&decoder, &iter)))
- ++frame_out;
+ for (y = 0; y < c_h; y++) {
+ out_put(out, buf, c_w, do_md5);
+ buf += img->stride[VPX_PLANE_U];
+ }
- vpx_usec_timer_mark(&timer);
- dx_time += (unsigned int)vpx_usec_timer_elapsed(&timer);
+ buf = img->planes[flipuv ? VPX_PLANE_U : VPX_PLANE_V];
- if (progress)
- show_progress(frame_in, frame_out, dx_time);
-
- if (!noblit)
- {
- if (img)
- {
- unsigned int y;
- char out_fn[PATH_MAX];
- uint8_t *buf;
-
- if (!single_file)
- {
- size_t len = sizeof(out_fn)-1;
-
- out_fn[len] = '\0';
- generate_filename(outfile_pattern, out_fn, len-1,
- img->d_w, img->d_h, frame_in);
- out = out_open(out_fn, do_md5);
- }
- else if(use_y4m)
- out_put(out, (unsigned char *)"FRAME\n", 6, do_md5);
-
- buf = img->planes[VPX_PLANE_Y];
-
- for (y = 0; y < img->d_h; y++)
- {
- out_put(out, buf, img->d_w, do_md5);
- buf += img->stride[VPX_PLANE_Y];
- }
-
- buf = img->planes[flipuv?VPX_PLANE_V:VPX_PLANE_U];
-
- for (y = 0; y < (1 + img->d_h) / 2; y++)
- {
- out_put(out, buf, (1 + img->d_w) / 2, do_md5);
- buf += img->stride[VPX_PLANE_U];
- }
-
- buf = img->planes[flipuv?VPX_PLANE_U:VPX_PLANE_V];
-
- for (y = 0; y < (1 + img->d_h) / 2; y++)
- {
- out_put(out, buf, (1 + img->d_w) / 2, do_md5);
- buf += img->stride[VPX_PLANE_V];
- }
-
- if (!single_file)
- out_close(out, out_fn, do_md5);
- }
+ for (y = 0; y < c_h; y++) {
+ out_put(out, buf, c_w, do_md5);
+ buf += img->stride[VPX_PLANE_V];
}
- if (stop_after && frame_in >= stop_after)
- break;
+ if (!single_file)
+ out_close(out, out_fn, do_md5);
+ }
}
- if (summary || progress)
- {
- show_progress(frame_in, frame_out, dx_time);
- fprintf(stderr, "\n");
- }
+ if (stop_after && frame_in >= stop_after)
+ break;
+ }
- if (frames_corrupted)
- fprintf(stderr, "WARNING: %d frames corrupted.\n",frames_corrupted);
+ if (summary || progress) {
+ show_progress(frame_in, frame_out, dx_time);
+ fprintf(stderr, "\n");
+ }
+
+ if (frames_corrupted)
+ fprintf(stderr, "WARNING: %d frames corrupted.\n", frames_corrupted);
fail:
- if (vpx_codec_destroy(&decoder))
- {
- fprintf(stderr, "Failed to destroy decoder: %s\n", vpx_codec_error(&decoder));
- return EXIT_FAILURE;
- }
+ if (vpx_codec_destroy(&decoder)) {
+ fprintf(stderr, "Failed to destroy decoder: %s\n", vpx_codec_error(&decoder));
+ return EXIT_FAILURE;
+ }
+
+ if (single_file && !noblit)
+ out_close(out, outfile, do_md5);
- if (single_file && !noblit)
- out_close(out, outfile, do_md5);
+ if (input.nestegg_ctx)
+ nestegg_destroy(input.nestegg_ctx);
+ if (input.kind != WEBM_FILE)
+ free(buf);
+ fclose(infile);
+ free(argv);
- if(input.nestegg_ctx)
- nestegg_destroy(input.nestegg_ctx);
- if(input.kind != WEBM_FILE)
- free(buf);
- fclose(infile);
- free(argv);
+ return frames_corrupted ? EXIT_FAILURE : EXIT_SUCCESS;
+}
+
+int main(int argc, const char **argv_) {
+ unsigned int loops = 1, i;
+ char **argv, **argi, **argj;
+ struct arg arg;
+ int error = 0;
- return frames_corrupted ? EXIT_FAILURE : EXIT_SUCCESS;
+ argv = argv_dup(argc - 1, argv_ + 1);
+ for (argi = argj = argv; (*argj = *argi); argi += arg.argv_step) {
+ memset(&arg, 0, sizeof(arg));
+ arg.argv_step = 1;
+
+ if (arg_match(&arg, &looparg, argi)) {
+ loops = arg_parse_uint(&arg);
+ break;
+ }
+ }
+ free(argv);
+ for (i = 0; !error && i < loops; i++)
+ error = main_loop(argc, argv_);
+ return error;
}
diff --git a/libvpx/vpxenc.c b/libvpx/vpxenc.c
index 7449e6c..a60b84d 100644
--- a/libvpx/vpxenc.c
+++ b/libvpx/vpxenc.c
@@ -8,11 +8,9 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include "vpx_config.h"
-/* This is a simple program that encodes YV12 files and generates ivf
- * files using the new interface.
- */
-#if defined(_WIN32) || !CONFIG_OS_SUPPORT
+#if defined(_WIN32) || defined(__OS2__) || !CONFIG_OS_SUPPORT
#define USE_POSIX_MMAP 0
#else
#define USE_POSIX_MMAP 1
@@ -25,6 +23,9 @@
#include <limits.h>
#include <assert.h>
#include "vpx/vpx_encoder.h"
+#if CONFIG_DECODERS
+#include "vpx/vpx_decoder.h"
+#endif
#if USE_POSIX_MMAP
#include <sys/types.h>
#include <sys/stat.h>
@@ -32,13 +33,21 @@
#include <fcntl.h>
#include <unistd.h>
#endif
+
+#if CONFIG_VP8_ENCODER || CONFIG_VP9_ENCODER
#include "vpx/vp8cx.h"
+#endif
+#if CONFIG_VP8_DECODER || CONFIG_VP9_DECODER
+#include "vpx/vp8dx.h"
+#endif
+
#include "vpx_ports/mem_ops.h"
#include "vpx_ports/vpx_timer.h"
#include "tools_common.h"
#include "y4minput.h"
#include "libmkv/EbmlWriter.h"
#include "libmkv/EbmlIDs.h"
+#include "third_party/libyuv/include/libyuv/scale.h"
/* Need special handling of these functions on Windows */
#if defined(_MSC_VER)
@@ -66,643 +75,610 @@ typedef long off_t;
/* Swallow warnings about unused results of fread/fwrite */
static size_t wrap_fread(void *ptr, size_t size, size_t nmemb,
- FILE *stream)
-{
- return fread(ptr, size, nmemb, stream);
+ FILE *stream) {
+ return fread(ptr, size, nmemb, stream);
}
#define fread wrap_fread
static size_t wrap_fwrite(const void *ptr, size_t size, size_t nmemb,
- FILE *stream)
-{
- return fwrite(ptr, size, nmemb, stream);
+ FILE *stream) {
+ return fwrite(ptr, size, nmemb, stream);
}
#define fwrite wrap_fwrite
static const char *exec_name;
-static const struct codec_item
-{
- char const *name;
- vpx_codec_iface_t *iface;
- unsigned int fourcc;
-} codecs[] =
-{
-#if CONFIG_VP8_ENCODER
- {"vp8", &vpx_codec_vp8_cx_algo, 0x30385056},
+#define VP8_FOURCC (0x30385056)
+#define VP9_FOURCC (0x30395056)
+static const struct codec_item {
+ char const *name;
+ const vpx_codec_iface_t *(*iface)(void);
+ const vpx_codec_iface_t *(*dx_iface)(void);
+ unsigned int fourcc;
+} codecs[] = {
+#if CONFIG_VP8_ENCODER && CONFIG_VP8_DECODER
+ {"vp8", &vpx_codec_vp8_cx, &vpx_codec_vp8_dx, VP8_FOURCC},
+#elif CONFIG_VP8_ENCODER && !CONFIG_VP8_DECODER
+ {"vp8", &vpx_codec_vp8_cx, NULL, VP8_FOURCC},
+#endif
+#if CONFIG_VP9_ENCODER && CONFIG_VP9_DECODER
+ {"vp9", &vpx_codec_vp9_cx, &vpx_codec_vp9_dx, VP9_FOURCC},
+#elif CONFIG_VP9_ENCODER && !CONFIG_VP9_DECODER
+ {"vp9", &vpx_codec_vp9_cx, NULL, VP9_FOURCC},
#endif
};
static void usage_exit();
#define LOG_ERROR(label) do \
-{\
+ {\
const char *l=label;\
va_list ap;\
va_start(ap, fmt);\
if(l)\
- fprintf(stderr, "%s: ", l);\
+ fprintf(stderr, "%s: ", l);\
vfprintf(stderr, fmt, ap);\
fprintf(stderr, "\n");\
va_end(ap);\
-} while(0)
+ } while(0)
-void die(const char *fmt, ...)
-{
- LOG_ERROR(NULL);
- usage_exit();
+void die(const char *fmt, ...) {
+ LOG_ERROR(NULL);
+ usage_exit();
}
-void fatal(const char *fmt, ...)
-{
- LOG_ERROR("Fatal");
- exit(EXIT_FAILURE);
+void fatal(const char *fmt, ...) {
+ LOG_ERROR("Fatal");
+ exit(EXIT_FAILURE);
}
-void warn(const char *fmt, ...)
-{
- LOG_ERROR("Warning");
+void warn(const char *fmt, ...) {
+ LOG_ERROR("Warning");
}
-static void ctx_exit_on_error(vpx_codec_ctx_t *ctx, const char *s, ...)
-{
- va_list ap;
+static void warn_or_exit_on_errorv(vpx_codec_ctx_t *ctx, int fatal,
+ const char *s, va_list ap) {
+ if (ctx->err) {
+ const char *detail = vpx_codec_error_detail(ctx);
- va_start(ap, s);
- if (ctx->err)
- {
- const char *detail = vpx_codec_error_detail(ctx);
+ vfprintf(stderr, s, ap);
+ fprintf(stderr, ": %s\n", vpx_codec_error(ctx));
- vfprintf(stderr, s, ap);
- fprintf(stderr, ": %s\n", vpx_codec_error(ctx));
+ if (detail)
+ fprintf(stderr, " %s\n", detail);
- if (detail)
- fprintf(stderr, " %s\n", detail);
+ if (fatal)
+ exit(EXIT_FAILURE);
+ }
+}
- exit(EXIT_FAILURE);
- }
+static void ctx_exit_on_error(vpx_codec_ctx_t *ctx, const char *s, ...) {
+ va_list ap;
+
+ va_start(ap, s);
+ warn_or_exit_on_errorv(ctx, 1, s, ap);
+ va_end(ap);
+}
+
+static void warn_or_exit_on_error(vpx_codec_ctx_t *ctx, int fatal,
+ const char *s, ...) {
+ va_list ap;
+
+ va_start(ap, s);
+ warn_or_exit_on_errorv(ctx, fatal, s, ap);
+ va_end(ap);
}
/* This structure is used to abstract the different ways of handling
* first pass statistics.
*/
-typedef struct
-{
- vpx_fixed_buf_t buf;
- int pass;
- FILE *file;
- char *buf_ptr;
- size_t buf_alloc_sz;
+typedef struct {
+ vpx_fixed_buf_t buf;
+ int pass;
+ FILE *file;
+ char *buf_ptr;
+ size_t buf_alloc_sz;
} stats_io_t;
-int stats_open_file(stats_io_t *stats, const char *fpf, int pass)
-{
- int res;
+int stats_open_file(stats_io_t *stats, const char *fpf, int pass) {
+ int res;
- stats->pass = pass;
+ stats->pass = pass;
- if (pass == 0)
- {
- stats->file = fopen(fpf, "wb");
- stats->buf.sz = 0;
- stats->buf.buf = NULL,
- res = (stats->file != NULL);
- }
- else
- {
+ if (pass == 0) {
+ stats->file = fopen(fpf, "wb");
+ stats->buf.sz = 0;
+ stats->buf.buf = NULL,
+ res = (stats->file != NULL);
+ } else {
#if 0
#elif USE_POSIX_MMAP
- struct stat stat_buf;
- int fd;
-
- fd = open(fpf, O_RDONLY);
- stats->file = fdopen(fd, "rb");
- fstat(fd, &stat_buf);
- stats->buf.sz = stat_buf.st_size;
- stats->buf.buf = mmap(NULL, stats->buf.sz, PROT_READ, MAP_PRIVATE,
- fd, 0);
- res = (stats->buf.buf != NULL);
+ struct stat stat_buf;
+ int fd;
+
+ fd = open(fpf, O_RDONLY);
+ stats->file = fdopen(fd, "rb");
+ fstat(fd, &stat_buf);
+ stats->buf.sz = stat_buf.st_size;
+ stats->buf.buf = mmap(NULL, stats->buf.sz, PROT_READ, MAP_PRIVATE,
+ fd, 0);
+ res = (stats->buf.buf != NULL);
#else
- size_t nbytes;
+ size_t nbytes;
- stats->file = fopen(fpf, "rb");
+ stats->file = fopen(fpf, "rb");
- if (fseek(stats->file, 0, SEEK_END))
- fatal("First-pass stats file must be seekable!");
+ if (fseek(stats->file, 0, SEEK_END))
+ fatal("First-pass stats file must be seekable!");
- stats->buf.sz = stats->buf_alloc_sz = ftell(stats->file);
- rewind(stats->file);
+ stats->buf.sz = stats->buf_alloc_sz = ftell(stats->file);
+ rewind(stats->file);
- stats->buf.buf = malloc(stats->buf_alloc_sz);
+ stats->buf.buf = malloc(stats->buf_alloc_sz);
- if (!stats->buf.buf)
- fatal("Failed to allocate first-pass stats buffer (%lu bytes)",
- (unsigned long)stats->buf_alloc_sz);
+ if (!stats->buf.buf)
+ fatal("Failed to allocate first-pass stats buffer (%lu bytes)",
+ (unsigned long)stats->buf_alloc_sz);
- nbytes = fread(stats->buf.buf, 1, stats->buf.sz, stats->file);
- res = (nbytes == stats->buf.sz);
+ nbytes = fread(stats->buf.buf, 1, stats->buf.sz, stats->file);
+ res = (nbytes == stats->buf.sz);
#endif
- }
+ }
- return res;
+ return res;
}
-int stats_open_mem(stats_io_t *stats, int pass)
-{
- int res;
- stats->pass = pass;
+int stats_open_mem(stats_io_t *stats, int pass) {
+ int res;
+ stats->pass = pass;
- if (!pass)
- {
- stats->buf.sz = 0;
- stats->buf_alloc_sz = 64 * 1024;
- stats->buf.buf = malloc(stats->buf_alloc_sz);
- }
+ if (!pass) {
+ stats->buf.sz = 0;
+ stats->buf_alloc_sz = 64 * 1024;
+ stats->buf.buf = malloc(stats->buf_alloc_sz);
+ }
- stats->buf_ptr = stats->buf.buf;
- res = (stats->buf.buf != NULL);
- return res;
+ stats->buf_ptr = stats->buf.buf;
+ res = (stats->buf.buf != NULL);
+ return res;
}
-void stats_close(stats_io_t *stats, int last_pass)
-{
- if (stats->file)
- {
- if (stats->pass == last_pass)
- {
+void stats_close(stats_io_t *stats, int last_pass) {
+ if (stats->file) {
+ if (stats->pass == last_pass) {
#if 0
#elif USE_POSIX_MMAP
- munmap(stats->buf.buf, stats->buf.sz);
+ munmap(stats->buf.buf, stats->buf.sz);
#else
- free(stats->buf.buf);
+ free(stats->buf.buf);
#endif
- }
-
- fclose(stats->file);
- stats->file = NULL;
- }
- else
- {
- if (stats->pass == last_pass)
- free(stats->buf.buf);
}
+
+ fclose(stats->file);
+ stats->file = NULL;
+ } else {
+ if (stats->pass == last_pass)
+ free(stats->buf.buf);
+ }
}
-void stats_write(stats_io_t *stats, const void *pkt, size_t len)
-{
- if (stats->file)
- {
- (void) fwrite(pkt, 1, len, stats->file);
+void stats_write(stats_io_t *stats, const void *pkt, size_t len) {
+ if (stats->file) {
+ (void) fwrite(pkt, 1, len, stats->file);
+ } else {
+ if (stats->buf.sz + len > stats->buf_alloc_sz) {
+ size_t new_sz = stats->buf_alloc_sz + 64 * 1024;
+ char *new_ptr = realloc(stats->buf.buf, new_sz);
+
+ if (new_ptr) {
+ stats->buf_ptr = new_ptr + (stats->buf_ptr - (char *)stats->buf.buf);
+ stats->buf.buf = new_ptr;
+ stats->buf_alloc_sz = new_sz;
+ } else
+ fatal("Failed to realloc firstpass stats buffer.");
}
- else
- {
- if (stats->buf.sz + len > stats->buf_alloc_sz)
- {
- size_t new_sz = stats->buf_alloc_sz + 64 * 1024;
- char *new_ptr = realloc(stats->buf.buf, new_sz);
-
- if (new_ptr)
- {
- stats->buf_ptr = new_ptr + (stats->buf_ptr - (char *)stats->buf.buf);
- stats->buf.buf = new_ptr;
- stats->buf_alloc_sz = new_sz;
- }
- else
- fatal("Failed to realloc firstpass stats buffer.");
- }
- memcpy(stats->buf_ptr, pkt, len);
- stats->buf.sz += len;
- stats->buf_ptr += len;
- }
+ memcpy(stats->buf_ptr, pkt, len);
+ stats->buf.sz += len;
+ stats->buf_ptr += len;
+ }
}
-vpx_fixed_buf_t stats_get(stats_io_t *stats)
-{
- return stats->buf;
+vpx_fixed_buf_t stats_get(stats_io_t *stats) {
+ return stats->buf;
}
/* Stereo 3D packed frame format */
-typedef enum stereo_format
-{
- STEREO_FORMAT_MONO = 0,
- STEREO_FORMAT_LEFT_RIGHT = 1,
- STEREO_FORMAT_BOTTOM_TOP = 2,
- STEREO_FORMAT_TOP_BOTTOM = 3,
- STEREO_FORMAT_RIGHT_LEFT = 11
+typedef enum stereo_format {
+ STEREO_FORMAT_MONO = 0,
+ STEREO_FORMAT_LEFT_RIGHT = 1,
+ STEREO_FORMAT_BOTTOM_TOP = 2,
+ STEREO_FORMAT_TOP_BOTTOM = 3,
+ STEREO_FORMAT_RIGHT_LEFT = 11
} stereo_format_t;
-enum video_file_type
-{
- FILE_TYPE_RAW,
- FILE_TYPE_IVF,
- FILE_TYPE_Y4M
+enum video_file_type {
+ FILE_TYPE_RAW,
+ FILE_TYPE_IVF,
+ FILE_TYPE_Y4M
};
struct detect_buffer {
- char buf[4];
- size_t buf_read;
- size_t position;
+ char buf[4];
+ size_t buf_read;
+ size_t position;
};
-struct input_state
-{
- char *fn;
- FILE *file;
- y4m_input y4m;
- struct detect_buffer detect;
- enum video_file_type file_type;
- unsigned int w;
- unsigned int h;
- struct vpx_rational framerate;
- int use_i420;
+struct input_state {
+ char *fn;
+ FILE *file;
+ off_t length;
+ y4m_input y4m;
+ struct detect_buffer detect;
+ enum video_file_type file_type;
+ unsigned int w;
+ unsigned int h;
+ struct vpx_rational framerate;
+ int use_i420;
+ int only_i420;
};
#define IVF_FRAME_HDR_SZ (4+8) /* 4 byte size + 8 byte timestamp */
-static int read_frame(struct input_state *input, vpx_image_t *img)
-{
- FILE *f = input->file;
- enum video_file_type file_type = input->file_type;
- y4m_input *y4m = &input->y4m;
- struct detect_buffer *detect = &input->detect;
- int plane = 0;
- int shortread = 0;
-
- if (file_type == FILE_TYPE_Y4M)
- {
- if (y4m_input_fetch_frame(y4m, f, img) < 1)
- return 0;
+static int read_frame(struct input_state *input, vpx_image_t *img) {
+ FILE *f = input->file;
+ enum video_file_type file_type = input->file_type;
+ y4m_input *y4m = &input->y4m;
+ struct detect_buffer *detect = &input->detect;
+ int plane = 0;
+ int shortread = 0;
+
+ if (file_type == FILE_TYPE_Y4M) {
+ if (y4m_input_fetch_frame(y4m, f, img) < 1)
+ return 0;
+ } else {
+ if (file_type == FILE_TYPE_IVF) {
+ char junk[IVF_FRAME_HDR_SZ];
+
+ /* Skip the frame header. We know how big the frame should be. See
+ * write_ivf_frame_header() for documentation on the frame header
+ * layout.
+ */
+ (void) fread(junk, 1, IVF_FRAME_HDR_SZ, f);
}
- else
- {
- if (file_type == FILE_TYPE_IVF)
- {
- char junk[IVF_FRAME_HDR_SZ];
- /* Skip the frame header. We know how big the frame should be. See
- * write_ivf_frame_header() for documentation on the frame header
- * layout.
- */
- (void) fread(junk, 1, IVF_FRAME_HDR_SZ, f);
+ for (plane = 0; plane < 3; plane++) {
+ unsigned char *ptr;
+ int w = (plane ? (1 + img->d_w) / 2 : img->d_w);
+ int h = (plane ? (1 + img->d_h) / 2 : img->d_h);
+ int r;
+
+ /* Determine the correct plane based on the image format. The for-loop
+ * always counts in Y,U,V order, but this may not match the order of
+ * the data on disk.
+ */
+ switch (plane) {
+ case 1:
+ ptr = img->planes[img->fmt == VPX_IMG_FMT_YV12 ? VPX_PLANE_V : VPX_PLANE_U];
+ break;
+ case 2:
+ ptr = img->planes[img->fmt == VPX_IMG_FMT_YV12 ? VPX_PLANE_U : VPX_PLANE_V];
+ break;
+ default:
+ ptr = img->planes[plane];
+ }
+
+ for (r = 0; r < h; r++) {
+ size_t needed = w;
+ size_t buf_position = 0;
+ const size_t left = detect->buf_read - detect->position;
+ if (left > 0) {
+ const size_t more = (left < needed) ? left : needed;
+ memcpy(ptr, detect->buf + detect->position, more);
+ buf_position = more;
+ needed -= more;
+ detect->position += more;
}
-
- for (plane = 0; plane < 3; plane++)
- {
- unsigned char *ptr;
- int w = (plane ? (1 + img->d_w) / 2 : img->d_w);
- int h = (plane ? (1 + img->d_h) / 2 : img->d_h);
- int r;
-
- /* Determine the correct plane based on the image format. The for-loop
- * always counts in Y,U,V order, but this may not match the order of
- * the data on disk.
- */
- switch (plane)
- {
- case 1:
- ptr = img->planes[img->fmt==VPX_IMG_FMT_YV12? VPX_PLANE_V : VPX_PLANE_U];
- break;
- case 2:
- ptr = img->planes[img->fmt==VPX_IMG_FMT_YV12?VPX_PLANE_U : VPX_PLANE_V];
- break;
- default:
- ptr = img->planes[plane];
- }
-
- for (r = 0; r < h; r++)
- {
- size_t needed = w;
- size_t buf_position = 0;
- const size_t left = detect->buf_read - detect->position;
- if (left > 0)
- {
- const size_t more = (left < needed) ? left : needed;
- memcpy(ptr, detect->buf + detect->position, more);
- buf_position = more;
- needed -= more;
- detect->position += more;
- }
- if (needed > 0)
- {
- shortread |= (fread(ptr + buf_position, 1, needed, f) < needed);
- }
-
- ptr += img->stride[plane];
- }
+ if (needed > 0) {
+ shortread |= (fread(ptr + buf_position, 1, needed, f) < needed);
}
+
+ ptr += img->stride[plane];
+ }
}
+ }
- return !shortread;
+ return !shortread;
}
unsigned int file_is_y4m(FILE *infile,
y4m_input *y4m,
- char detect[4])
-{
- if(memcmp(detect, "YUV4", 4) == 0)
- {
- return 1;
- }
- return 0;
+ char detect[4]) {
+ if (memcmp(detect, "YUV4", 4) == 0) {
+ return 1;
+ }
+ return 0;
}
#define IVF_FILE_HDR_SZ (32)
unsigned int file_is_ivf(struct input_state *input,
- unsigned int *fourcc)
-{
- char raw_hdr[IVF_FILE_HDR_SZ];
- int is_ivf = 0;
- FILE *infile = input->file;
- unsigned int *width = &input->w;
- unsigned int *height = &input->h;
- struct detect_buffer *detect = &input->detect;
-
- if(memcmp(detect->buf, "DKIF", 4) != 0)
- return 0;
-
- /* See write_ivf_file_header() for more documentation on the file header
- * layout.
- */
- if (fread(raw_hdr + 4, 1, IVF_FILE_HDR_SZ - 4, infile)
- == IVF_FILE_HDR_SZ - 4)
+ unsigned int *fourcc) {
+ char raw_hdr[IVF_FILE_HDR_SZ];
+ int is_ivf = 0;
+ FILE *infile = input->file;
+ unsigned int *width = &input->w;
+ unsigned int *height = &input->h;
+ struct detect_buffer *detect = &input->detect;
+
+ if (memcmp(detect->buf, "DKIF", 4) != 0)
+ return 0;
+
+ /* See write_ivf_file_header() for more documentation on the file header
+ * layout.
+ */
+ if (fread(raw_hdr + 4, 1, IVF_FILE_HDR_SZ - 4, infile)
+ == IVF_FILE_HDR_SZ - 4) {
{
- {
- is_ivf = 1;
+ is_ivf = 1;
- if (mem_get_le16(raw_hdr + 4) != 0)
- warn("Unrecognized IVF version! This file may not decode "
- "properly.");
+ if (mem_get_le16(raw_hdr + 4) != 0)
+ warn("Unrecognized IVF version! This file may not decode "
+ "properly.");
- *fourcc = mem_get_le32(raw_hdr + 8);
- }
+ *fourcc = mem_get_le32(raw_hdr + 8);
}
+ }
- if (is_ivf)
- {
- *width = mem_get_le16(raw_hdr + 12);
- *height = mem_get_le16(raw_hdr + 14);
- detect->position = 4;
- }
+ if (is_ivf) {
+ *width = mem_get_le16(raw_hdr + 12);
+ *height = mem_get_le16(raw_hdr + 14);
+ detect->position = 4;
+ }
- return is_ivf;
+ return is_ivf;
}
static void write_ivf_file_header(FILE *outfile,
const vpx_codec_enc_cfg_t *cfg,
unsigned int fourcc,
- int frame_cnt)
-{
- char header[32];
-
- if (cfg->g_pass != VPX_RC_ONE_PASS && cfg->g_pass != VPX_RC_LAST_PASS)
- return;
-
- header[0] = 'D';
- header[1] = 'K';
- header[2] = 'I';
- header[3] = 'F';
- mem_put_le16(header + 4, 0); /* version */
- mem_put_le16(header + 6, 32); /* headersize */
- mem_put_le32(header + 8, fourcc); /* headersize */
- mem_put_le16(header + 12, cfg->g_w); /* width */
- mem_put_le16(header + 14, cfg->g_h); /* height */
- mem_put_le32(header + 16, cfg->g_timebase.den); /* rate */
- mem_put_le32(header + 20, cfg->g_timebase.num); /* scale */
- mem_put_le32(header + 24, frame_cnt); /* length */
- mem_put_le32(header + 28, 0); /* unused */
-
- (void) fwrite(header, 1, 32, outfile);
+ int frame_cnt) {
+ char header[32];
+
+ if (cfg->g_pass != VPX_RC_ONE_PASS && cfg->g_pass != VPX_RC_LAST_PASS)
+ return;
+
+ header[0] = 'D';
+ header[1] = 'K';
+ header[2] = 'I';
+ header[3] = 'F';
+ mem_put_le16(header + 4, 0); /* version */
+ mem_put_le16(header + 6, 32); /* headersize */
+ mem_put_le32(header + 8, fourcc); /* headersize */
+ mem_put_le16(header + 12, cfg->g_w); /* width */
+ mem_put_le16(header + 14, cfg->g_h); /* height */
+ mem_put_le32(header + 16, cfg->g_timebase.den); /* rate */
+ mem_put_le32(header + 20, cfg->g_timebase.num); /* scale */
+ mem_put_le32(header + 24, frame_cnt); /* length */
+ mem_put_le32(header + 28, 0); /* unused */
+
+ (void) fwrite(header, 1, 32, outfile);
}
static void write_ivf_frame_header(FILE *outfile,
- const vpx_codec_cx_pkt_t *pkt)
-{
- char header[12];
- vpx_codec_pts_t pts;
+ const vpx_codec_cx_pkt_t *pkt) {
+ char header[12];
+ vpx_codec_pts_t pts;
- if (pkt->kind != VPX_CODEC_CX_FRAME_PKT)
- return;
+ if (pkt->kind != VPX_CODEC_CX_FRAME_PKT)
+ return;
- pts = pkt->data.frame.pts;
- mem_put_le32(header, (int)pkt->data.frame.sz);
- mem_put_le32(header + 4, pts & 0xFFFFFFFF);
- mem_put_le32(header + 8, pts >> 32);
+ pts = pkt->data.frame.pts;
+ mem_put_le32(header, (int)pkt->data.frame.sz);
+ mem_put_le32(header + 4, pts & 0xFFFFFFFF);
+ mem_put_le32(header + 8, pts >> 32);
- (void) fwrite(header, 1, 12, outfile);
+ (void) fwrite(header, 1, 12, outfile);
}
-static void write_ivf_frame_size(FILE *outfile, size_t size)
-{
- char header[4];
- mem_put_le32(header, (int)size);
- (void) fwrite(header, 1, 4, outfile);
+static void write_ivf_frame_size(FILE *outfile, size_t size) {
+ char header[4];
+ mem_put_le32(header, (int)size);
+ (void) fwrite(header, 1, 4, outfile);
}
typedef off_t EbmlLoc;
-struct cue_entry
-{
- unsigned int time;
- uint64_t loc;
+struct cue_entry {
+ unsigned int time;
+ uint64_t loc;
};
-struct EbmlGlobal
-{
- int debug;
+struct EbmlGlobal {
+ int debug;
- FILE *stream;
- int64_t last_pts_ms;
- vpx_rational_t framerate;
+ FILE *stream;
+ int64_t last_pts_ms;
+ vpx_rational_t framerate;
- /* These pointers are to the start of an element */
- off_t position_reference;
- off_t seek_info_pos;
- off_t segment_info_pos;
- off_t track_pos;
- off_t cue_pos;
- off_t cluster_pos;
+ /* These pointers are to the start of an element */
+ off_t position_reference;
+ off_t seek_info_pos;
+ off_t segment_info_pos;
+ off_t track_pos;
+ off_t cue_pos;
+ off_t cluster_pos;
- /* This pointer is to a specific element to be serialized */
- off_t track_id_pos;
+ /* This pointer is to a specific element to be serialized */
+ off_t track_id_pos;
- /* These pointers are to the size field of the element */
- EbmlLoc startSegment;
- EbmlLoc startCluster;
+ /* These pointers are to the size field of the element */
+ EbmlLoc startSegment;
+ EbmlLoc startCluster;
- uint32_t cluster_timecode;
- int cluster_open;
+ uint32_t cluster_timecode;
+ int cluster_open;
- struct cue_entry *cue_list;
- unsigned int cues;
+ struct cue_entry *cue_list;
+ unsigned int cues;
};
-void Ebml_Write(EbmlGlobal *glob, const void *buffer_in, unsigned long len)
-{
- (void) fwrite(buffer_in, 1, len, glob->stream);
+void Ebml_Write(EbmlGlobal *glob, const void *buffer_in, unsigned long len) {
+ (void) fwrite(buffer_in, 1, len, glob->stream);
}
#define WRITE_BUFFER(s) \
-for(i = len-1; i>=0; i--)\
-{ \
+ for(i = len-1; i>=0; i--)\
+ { \
x = (char)(*(const s *)buffer_in >> (i * CHAR_BIT)); \
Ebml_Write(glob, &x, 1); \
-}
-void Ebml_Serialize(EbmlGlobal *glob, const void *buffer_in, int buffer_size, unsigned long len)
-{
- char x;
- int i;
-
- /* buffer_size:
- * 1 - int8_t;
- * 2 - int16_t;
- * 3 - int32_t;
- * 4 - int64_t;
- */
- switch (buffer_size)
- {
- case 1:
- WRITE_BUFFER(int8_t)
- break;
- case 2:
- WRITE_BUFFER(int16_t)
- break;
- case 4:
- WRITE_BUFFER(int32_t)
- break;
- case 8:
- WRITE_BUFFER(int64_t)
- break;
- default:
- break;
- }
+ }
+void Ebml_Serialize(EbmlGlobal *glob, const void *buffer_in, int buffer_size, unsigned long len) {
+ char x;
+ int i;
+
+ /* buffer_size:
+ * 1 - int8_t;
+ * 2 - int16_t;
+ * 3 - int32_t;
+ * 4 - int64_t;
+ */
+ switch (buffer_size) {
+ case 1:
+ WRITE_BUFFER(int8_t)
+ break;
+ case 2:
+ WRITE_BUFFER(int16_t)
+ break;
+ case 4:
+ WRITE_BUFFER(int32_t)
+ break;
+ case 8:
+ WRITE_BUFFER(int64_t)
+ break;
+ default:
+ break;
+ }
}
#undef WRITE_BUFFER
/* Need a fixed size serializer for the track ID. libmkv provides a 64 bit
* one, but not a 32 bit one.
*/
-static void Ebml_SerializeUnsigned32(EbmlGlobal *glob, unsigned long class_id, uint64_t ui)
-{
- unsigned char sizeSerialized = 4 | 0x80;
- Ebml_WriteID(glob, class_id);
- Ebml_Serialize(glob, &sizeSerialized, sizeof(sizeSerialized), 1);
- Ebml_Serialize(glob, &ui, sizeof(ui), 4);
+static void Ebml_SerializeUnsigned32(EbmlGlobal *glob, unsigned long class_id, uint64_t ui) {
+ unsigned char sizeSerialized = 4 | 0x80;
+ Ebml_WriteID(glob, class_id);
+ Ebml_Serialize(glob, &sizeSerialized, sizeof(sizeSerialized), 1);
+ Ebml_Serialize(glob, &ui, sizeof(ui), 4);
}
static void
Ebml_StartSubElement(EbmlGlobal *glob, EbmlLoc *ebmlLoc,
- unsigned long class_id)
-{
- /* todo this is always taking 8 bytes, this may need later optimization */
- /* this is a key that says length unknown */
- uint64_t unknownLen = LITERALU64(0x01FFFFFF, 0xFFFFFFFF);
-
- Ebml_WriteID(glob, class_id);
- *ebmlLoc = ftello(glob->stream);
- Ebml_Serialize(glob, &unknownLen, sizeof(unknownLen), 8);
+ unsigned long class_id) {
+ /* todo this is always taking 8 bytes, this may need later optimization */
+ /* this is a key that says length unknown */
+ uint64_t unknownLen = LITERALU64(0x01FFFFFF, 0xFFFFFFFF);
+
+ Ebml_WriteID(glob, class_id);
+ *ebmlLoc = ftello(glob->stream);
+ Ebml_Serialize(glob, &unknownLen, sizeof(unknownLen), 8);
}
static void
-Ebml_EndSubElement(EbmlGlobal *glob, EbmlLoc *ebmlLoc)
-{
- off_t pos;
- uint64_t size;
+Ebml_EndSubElement(EbmlGlobal *glob, EbmlLoc *ebmlLoc) {
+ off_t pos;
+ uint64_t size;
- /* Save the current stream pointer */
- pos = ftello(glob->stream);
+ /* Save the current stream pointer */
+ pos = ftello(glob->stream);
- /* Calculate the size of this element */
- size = pos - *ebmlLoc - 8;
- size |= LITERALU64(0x01000000,0x00000000);
+ /* Calculate the size of this element */
+ size = pos - *ebmlLoc - 8;
+ size |= LITERALU64(0x01000000, 0x00000000);
- /* Seek back to the beginning of the element and write the new size */
- fseeko(glob->stream, *ebmlLoc, SEEK_SET);
- Ebml_Serialize(glob, &size, sizeof(size), 8);
+ /* Seek back to the beginning of the element and write the new size */
+ fseeko(glob->stream, *ebmlLoc, SEEK_SET);
+ Ebml_Serialize(glob, &size, sizeof(size), 8);
- /* Reset the stream pointer */
- fseeko(glob->stream, pos, SEEK_SET);
+ /* Reset the stream pointer */
+ fseeko(glob->stream, pos, SEEK_SET);
}
static void
-write_webm_seek_element(EbmlGlobal *ebml, unsigned long id, off_t pos)
-{
- uint64_t offset = pos - ebml->position_reference;
- EbmlLoc start;
- Ebml_StartSubElement(ebml, &start, Seek);
- Ebml_SerializeBinary(ebml, SeekID, id);
- Ebml_SerializeUnsigned64(ebml, SeekPosition, offset);
- Ebml_EndSubElement(ebml, &start);
+write_webm_seek_element(EbmlGlobal *ebml, unsigned long id, off_t pos) {
+ uint64_t offset = pos - ebml->position_reference;
+ EbmlLoc start;
+ Ebml_StartSubElement(ebml, &start, Seek);
+ Ebml_SerializeBinary(ebml, SeekID, id);
+ Ebml_SerializeUnsigned64(ebml, SeekPosition, offset);
+ Ebml_EndSubElement(ebml, &start);
}
static void
-write_webm_seek_info(EbmlGlobal *ebml)
-{
+write_webm_seek_info(EbmlGlobal *ebml) {
- off_t pos;
+ off_t pos;
- /* Save the current stream pointer */
- pos = ftello(ebml->stream);
+ /* Save the current stream pointer */
+ pos = ftello(ebml->stream);
- if(ebml->seek_info_pos)
- fseeko(ebml->stream, ebml->seek_info_pos, SEEK_SET);
- else
- ebml->seek_info_pos = pos;
+ if (ebml->seek_info_pos)
+ fseeko(ebml->stream, ebml->seek_info_pos, SEEK_SET);
+ else
+ ebml->seek_info_pos = pos;
- {
- EbmlLoc start;
+ {
+ EbmlLoc start;
- Ebml_StartSubElement(ebml, &start, SeekHead);
- write_webm_seek_element(ebml, Tracks, ebml->track_pos);
- write_webm_seek_element(ebml, Cues, ebml->cue_pos);
- write_webm_seek_element(ebml, Info, ebml->segment_info_pos);
- Ebml_EndSubElement(ebml, &start);
+ Ebml_StartSubElement(ebml, &start, SeekHead);
+ write_webm_seek_element(ebml, Tracks, ebml->track_pos);
+ write_webm_seek_element(ebml, Cues, ebml->cue_pos);
+ write_webm_seek_element(ebml, Info, ebml->segment_info_pos);
+ Ebml_EndSubElement(ebml, &start);
+ }
+ {
+ /* segment info */
+ EbmlLoc startInfo;
+ uint64_t frame_time;
+ char version_string[64];
+
+ /* Assemble version string */
+ if (ebml->debug)
+ strcpy(version_string, "vpxenc");
+ else {
+ strcpy(version_string, "vpxenc ");
+ strncat(version_string,
+ vpx_codec_version_str(),
+ sizeof(version_string) - 1 - strlen(version_string));
}
- {
- /* segment info */
- EbmlLoc startInfo;
- uint64_t frame_time;
- char version_string[64];
-
- /* Assemble version string */
- if(ebml->debug)
- strcpy(version_string, "vpxenc");
- else
- {
- strcpy(version_string, "vpxenc ");
- strncat(version_string,
- vpx_codec_version_str(),
- sizeof(version_string) - 1 - strlen(version_string));
- }
- frame_time = (uint64_t)1000 * ebml->framerate.den
- / ebml->framerate.num;
- ebml->segment_info_pos = ftello(ebml->stream);
- Ebml_StartSubElement(ebml, &startInfo, Info);
- Ebml_SerializeUnsigned(ebml, TimecodeScale, 1000000);
- Ebml_SerializeFloat(ebml, Segment_Duration,
- (double)(ebml->last_pts_ms + frame_time));
- Ebml_SerializeString(ebml, 0x4D80, version_string);
- Ebml_SerializeString(ebml, 0x5741, version_string);
- Ebml_EndSubElement(ebml, &startInfo);
- }
+ frame_time = (uint64_t)1000 * ebml->framerate.den
+ / ebml->framerate.num;
+ ebml->segment_info_pos = ftello(ebml->stream);
+ Ebml_StartSubElement(ebml, &startInfo, Info);
+ Ebml_SerializeUnsigned(ebml, TimecodeScale, 1000000);
+ Ebml_SerializeFloat(ebml, Segment_Duration,
+ (double)(ebml->last_pts_ms + frame_time));
+ Ebml_SerializeString(ebml, 0x4D80, version_string);
+ Ebml_SerializeString(ebml, 0x5741, version_string);
+ Ebml_EndSubElement(ebml, &startInfo);
+ }
}
@@ -710,1858 +686,2108 @@ static void
write_webm_file_header(EbmlGlobal *glob,
const vpx_codec_enc_cfg_t *cfg,
const struct vpx_rational *fps,
- stereo_format_t stereo_fmt)
-{
- {
- EbmlLoc start;
- Ebml_StartSubElement(glob, &start, EBML);
- Ebml_SerializeUnsigned(glob, EBMLVersion, 1);
- Ebml_SerializeUnsigned(glob, EBMLReadVersion, 1);
- Ebml_SerializeUnsigned(glob, EBMLMaxIDLength, 4);
- Ebml_SerializeUnsigned(glob, EBMLMaxSizeLength, 8);
- Ebml_SerializeString(glob, DocType, "webm");
- Ebml_SerializeUnsigned(glob, DocTypeVersion, 2);
- Ebml_SerializeUnsigned(glob, DocTypeReadVersion, 2);
- Ebml_EndSubElement(glob, &start);
- }
+ stereo_format_t stereo_fmt,
+ unsigned int fourcc) {
+ {
+ EbmlLoc start;
+ Ebml_StartSubElement(glob, &start, EBML);
+ Ebml_SerializeUnsigned(glob, EBMLVersion, 1);
+ Ebml_SerializeUnsigned(glob, EBMLReadVersion, 1);
+ Ebml_SerializeUnsigned(glob, EBMLMaxIDLength, 4);
+ Ebml_SerializeUnsigned(glob, EBMLMaxSizeLength, 8);
+ Ebml_SerializeString(glob, DocType, "webm");
+ Ebml_SerializeUnsigned(glob, DocTypeVersion, 2);
+ Ebml_SerializeUnsigned(glob, DocTypeReadVersion, 2);
+ Ebml_EndSubElement(glob, &start);
+ }
+ {
+ Ebml_StartSubElement(glob, &glob->startSegment, Segment);
+ glob->position_reference = ftello(glob->stream);
+ glob->framerate = *fps;
+ write_webm_seek_info(glob);
+
{
- Ebml_StartSubElement(glob, &glob->startSegment, Segment);
- glob->position_reference = ftello(glob->stream);
- glob->framerate = *fps;
- write_webm_seek_info(glob);
+ EbmlLoc trackStart;
+ glob->track_pos = ftello(glob->stream);
+ Ebml_StartSubElement(glob, &trackStart, Tracks);
+ {
+ unsigned int trackNumber = 1;
+ uint64_t trackID = 0;
+ EbmlLoc start;
+ Ebml_StartSubElement(glob, &start, TrackEntry);
+ Ebml_SerializeUnsigned(glob, TrackNumber, trackNumber);
+ glob->track_id_pos = ftello(glob->stream);
+ Ebml_SerializeUnsigned32(glob, TrackUID, trackID);
+ Ebml_SerializeUnsigned(glob, TrackType, 1);
+ Ebml_SerializeString(glob, CodecID,
+ fourcc == VP8_FOURCC ? "V_VP8" : "V_VP9");
{
- EbmlLoc trackStart;
- glob->track_pos = ftello(glob->stream);
- Ebml_StartSubElement(glob, &trackStart, Tracks);
- {
- unsigned int trackNumber = 1;
- uint64_t trackID = 0;
-
- EbmlLoc start;
- Ebml_StartSubElement(glob, &start, TrackEntry);
- Ebml_SerializeUnsigned(glob, TrackNumber, trackNumber);
- glob->track_id_pos = ftello(glob->stream);
- Ebml_SerializeUnsigned32(glob, TrackUID, trackID);
- Ebml_SerializeUnsigned(glob, TrackType, 1);
- Ebml_SerializeString(glob, CodecID, "V_VP8");
- {
- unsigned int pixelWidth = cfg->g_w;
- unsigned int pixelHeight = cfg->g_h;
- float frameRate = (float)fps->num/(float)fps->den;
-
- EbmlLoc videoStart;
- Ebml_StartSubElement(glob, &videoStart, Video);
- Ebml_SerializeUnsigned(glob, PixelWidth, pixelWidth);
- Ebml_SerializeUnsigned(glob, PixelHeight, pixelHeight);
- Ebml_SerializeUnsigned(glob, StereoMode, stereo_fmt);
- Ebml_SerializeFloat(glob, FrameRate, frameRate);
- Ebml_EndSubElement(glob, &videoStart);
- }
- Ebml_EndSubElement(glob, &start); /* Track Entry */
- }
- Ebml_EndSubElement(glob, &trackStart);
+ unsigned int pixelWidth = cfg->g_w;
+ unsigned int pixelHeight = cfg->g_h;
+ float frameRate = (float)fps->num / (float)fps->den;
+
+ EbmlLoc videoStart;
+ Ebml_StartSubElement(glob, &videoStart, Video);
+ Ebml_SerializeUnsigned(glob, PixelWidth, pixelWidth);
+ Ebml_SerializeUnsigned(glob, PixelHeight, pixelHeight);
+ Ebml_SerializeUnsigned(glob, StereoMode, stereo_fmt);
+ Ebml_SerializeFloat(glob, FrameRate, frameRate);
+ Ebml_EndSubElement(glob, &videoStart);
}
- /* segment element is open */
+ Ebml_EndSubElement(glob, &start); /* Track Entry */
+ }
+ Ebml_EndSubElement(glob, &trackStart);
}
+ /* segment element is open */
+ }
}
static void
write_webm_block(EbmlGlobal *glob,
const vpx_codec_enc_cfg_t *cfg,
- const vpx_codec_cx_pkt_t *pkt)
-{
- unsigned long block_length;
- unsigned char track_number;
- unsigned short block_timecode = 0;
- unsigned char flags;
- int64_t pts_ms;
- int start_cluster = 0, is_keyframe;
-
- /* Calculate the PTS of this frame in milliseconds */
- pts_ms = pkt->data.frame.pts * 1000
- * (uint64_t)cfg->g_timebase.num / (uint64_t)cfg->g_timebase.den;
- if(pts_ms <= glob->last_pts_ms)
- pts_ms = glob->last_pts_ms + 1;
- glob->last_pts_ms = pts_ms;
-
- /* Calculate the relative time of this block */
- if(pts_ms - glob->cluster_timecode > SHRT_MAX)
- start_cluster = 1;
- else
- block_timecode = (unsigned short)pts_ms - glob->cluster_timecode;
-
- is_keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY);
- if(start_cluster || is_keyframe)
- {
- if(glob->cluster_open)
- Ebml_EndSubElement(glob, &glob->startCluster);
-
- /* Open the new cluster */
- block_timecode = 0;
- glob->cluster_open = 1;
- glob->cluster_timecode = (uint32_t)pts_ms;
- glob->cluster_pos = ftello(glob->stream);
- Ebml_StartSubElement(glob, &glob->startCluster, Cluster); /* cluster */
- Ebml_SerializeUnsigned(glob, Timecode, glob->cluster_timecode);
-
- /* Save a cue point if this is a keyframe. */
- if(is_keyframe)
- {
- struct cue_entry *cue, *new_cue_list;
-
- new_cue_list = realloc(glob->cue_list,
- (glob->cues+1) * sizeof(struct cue_entry));
- if(new_cue_list)
- glob->cue_list = new_cue_list;
- else
- fatal("Failed to realloc cue list.");
-
- cue = &glob->cue_list[glob->cues];
- cue->time = glob->cluster_timecode;
- cue->loc = glob->cluster_pos;
- glob->cues++;
- }
+ const vpx_codec_cx_pkt_t *pkt) {
+ unsigned long block_length;
+ unsigned char track_number;
+ unsigned short block_timecode = 0;
+ unsigned char flags;
+ int64_t pts_ms;
+ int start_cluster = 0, is_keyframe;
+
+ /* Calculate the PTS of this frame in milliseconds */
+ pts_ms = pkt->data.frame.pts * 1000
+ * (uint64_t)cfg->g_timebase.num / (uint64_t)cfg->g_timebase.den;
+ if (pts_ms <= glob->last_pts_ms)
+ pts_ms = glob->last_pts_ms + 1;
+ glob->last_pts_ms = pts_ms;
+
+ /* Calculate the relative time of this block */
+ if (pts_ms - glob->cluster_timecode > SHRT_MAX)
+ start_cluster = 1;
+ else
+ block_timecode = (unsigned short)pts_ms - glob->cluster_timecode;
+
+ is_keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY);
+ if (start_cluster || is_keyframe) {
+ if (glob->cluster_open)
+ Ebml_EndSubElement(glob, &glob->startCluster);
+
+ /* Open the new cluster */
+ block_timecode = 0;
+ glob->cluster_open = 1;
+ glob->cluster_timecode = (uint32_t)pts_ms;
+ glob->cluster_pos = ftello(glob->stream);
+ Ebml_StartSubElement(glob, &glob->startCluster, Cluster); /* cluster */
+ Ebml_SerializeUnsigned(glob, Timecode, glob->cluster_timecode);
+
+ /* Save a cue point if this is a keyframe. */
+ if (is_keyframe) {
+ struct cue_entry *cue, *new_cue_list;
+
+ new_cue_list = realloc(glob->cue_list,
+ (glob->cues + 1) * sizeof(struct cue_entry));
+ if (new_cue_list)
+ glob->cue_list = new_cue_list;
+ else
+ fatal("Failed to realloc cue list.");
+
+ cue = &glob->cue_list[glob->cues];
+ cue->time = glob->cluster_timecode;
+ cue->loc = glob->cluster_pos;
+ glob->cues++;
}
+ }
- /* Write the Simple Block */
- Ebml_WriteID(glob, SimpleBlock);
+ /* Write the Simple Block */
+ Ebml_WriteID(glob, SimpleBlock);
- block_length = (unsigned long)pkt->data.frame.sz + 4;
- block_length |= 0x10000000;
- Ebml_Serialize(glob, &block_length, sizeof(block_length), 4);
+ block_length = (unsigned long)pkt->data.frame.sz + 4;
+ block_length |= 0x10000000;
+ Ebml_Serialize(glob, &block_length, sizeof(block_length), 4);
- track_number = 1;
- track_number |= 0x80;
- Ebml_Write(glob, &track_number, 1);
+ track_number = 1;
+ track_number |= 0x80;
+ Ebml_Write(glob, &track_number, 1);
- Ebml_Serialize(glob, &block_timecode, sizeof(block_timecode), 2);
+ Ebml_Serialize(glob, &block_timecode, sizeof(block_timecode), 2);
- flags = 0;
- if(is_keyframe)
- flags |= 0x80;
- if(pkt->data.frame.flags & VPX_FRAME_IS_INVISIBLE)
- flags |= 0x08;
- Ebml_Write(glob, &flags, 1);
+ flags = 0;
+ if (is_keyframe)
+ flags |= 0x80;
+ if (pkt->data.frame.flags & VPX_FRAME_IS_INVISIBLE)
+ flags |= 0x08;
+ Ebml_Write(glob, &flags, 1);
- Ebml_Write(glob, pkt->data.frame.buf, (unsigned long)pkt->data.frame.sz);
+ Ebml_Write(glob, pkt->data.frame.buf, (unsigned long)pkt->data.frame.sz);
}
static void
-write_webm_file_footer(EbmlGlobal *glob, long hash)
-{
+write_webm_file_footer(EbmlGlobal *glob, long hash) {
- if(glob->cluster_open)
- Ebml_EndSubElement(glob, &glob->startCluster);
+ if (glob->cluster_open)
+ Ebml_EndSubElement(glob, &glob->startCluster);
- {
- EbmlLoc start;
- unsigned int i;
+ {
+ EbmlLoc start;
+ unsigned int i;
- glob->cue_pos = ftello(glob->stream);
- Ebml_StartSubElement(glob, &start, Cues);
- for(i=0; i<glob->cues; i++)
- {
- struct cue_entry *cue = &glob->cue_list[i];
- EbmlLoc start;
+ glob->cue_pos = ftello(glob->stream);
+ Ebml_StartSubElement(glob, &start, Cues);
+ for (i = 0; i < glob->cues; i++) {
+ struct cue_entry *cue = &glob->cue_list[i];
+ EbmlLoc start;
- Ebml_StartSubElement(glob, &start, CuePoint);
- {
- EbmlLoc start;
+ Ebml_StartSubElement(glob, &start, CuePoint);
+ {
+ EbmlLoc start;
- Ebml_SerializeUnsigned(glob, CueTime, cue->time);
+ Ebml_SerializeUnsigned(glob, CueTime, cue->time);
- Ebml_StartSubElement(glob, &start, CueTrackPositions);
- Ebml_SerializeUnsigned(glob, CueTrack, 1);
- Ebml_SerializeUnsigned64(glob, CueClusterPosition,
- cue->loc - glob->position_reference);
- Ebml_EndSubElement(glob, &start);
- }
- Ebml_EndSubElement(glob, &start);
- }
+ Ebml_StartSubElement(glob, &start, CueTrackPositions);
+ Ebml_SerializeUnsigned(glob, CueTrack, 1);
+ Ebml_SerializeUnsigned64(glob, CueClusterPosition,
+ cue->loc - glob->position_reference);
Ebml_EndSubElement(glob, &start);
+ }
+ Ebml_EndSubElement(glob, &start);
}
+ Ebml_EndSubElement(glob, &start);
+ }
- Ebml_EndSubElement(glob, &glob->startSegment);
+ Ebml_EndSubElement(glob, &glob->startSegment);
- /* Patch up the seek info block */
- write_webm_seek_info(glob);
+ /* Patch up the seek info block */
+ write_webm_seek_info(glob);
- /* Patch up the track id */
- fseeko(glob->stream, glob->track_id_pos, SEEK_SET);
- Ebml_SerializeUnsigned32(glob, TrackUID, glob->debug ? 0xDEADBEEF : hash);
+ /* Patch up the track id */
+ fseeko(glob->stream, glob->track_id_pos, SEEK_SET);
+ Ebml_SerializeUnsigned32(glob, TrackUID, glob->debug ? 0xDEADBEEF : hash);
- fseeko(glob->stream, 0, SEEK_END);
+ fseeko(glob->stream, 0, SEEK_END);
}
/* Murmur hash derived from public domain reference implementation at
- * http://sites.google.com/site/murmurhash/
+ * http:// sites.google.com/site/murmurhash/
*/
-static unsigned int murmur ( const void * key, int len, unsigned int seed )
-{
- const unsigned int m = 0x5bd1e995;
- const int r = 24;
+static unsigned int murmur(const void *key, int len, unsigned int seed) {
+ const unsigned int m = 0x5bd1e995;
+ const int r = 24;
- unsigned int h = seed ^ len;
+ unsigned int h = seed ^ len;
- const unsigned char * data = (const unsigned char *)key;
-
- while(len >= 4)
- {
- unsigned int k;
+ const unsigned char *data = (const unsigned char *)key;
- k = data[0];
- k |= data[1] << 8;
- k |= data[2] << 16;
- k |= data[3] << 24;
+ while (len >= 4) {
+ unsigned int k;
- k *= m;
- k ^= k >> r;
- k *= m;
-
- h *= m;
- h ^= k;
-
- data += 4;
- len -= 4;
- }
+ k = data[0];
+ k |= data[1] << 8;
+ k |= data[2] << 16;
+ k |= data[3] << 24;
- switch(len)
- {
- case 3: h ^= data[2] << 16;
- case 2: h ^= data[1] << 8;
- case 1: h ^= data[0];
- h *= m;
- };
+ k *= m;
+ k ^= k >> r;
+ k *= m;
- h ^= h >> 13;
h *= m;
- h ^= h >> 15;
-
- return h;
+ h ^= k;
+
+ data += 4;
+ len -= 4;
+ }
+
+ switch (len) {
+ case 3:
+ h ^= data[2] << 16;
+ case 2:
+ h ^= data[1] << 8;
+ case 1:
+ h ^= data[0];
+ h *= m;
+ };
+
+ h ^= h >> 13;
+ h *= m;
+ h ^= h >> 15;
+
+ return h;
}
#include "math.h"
+#define MAX_PSNR 100
+static double vp8_mse2psnr(double Samples, double Peak, double Mse) {
+ double psnr;
-static double vp8_mse2psnr(double Samples, double Peak, double Mse)
-{
- double psnr;
+ if ((double)Mse > 0.0)
+ psnr = 10.0 * log10(Peak * Peak * Samples / Mse);
+ else
+ psnr = MAX_PSNR; /* Limit to prevent / 0 */
- if ((double)Mse > 0.0)
- psnr = 10.0 * log10(Peak * Peak * Samples / Mse);
- else
- psnr = 60; /* Limit to prevent / 0 */
+ if (psnr > MAX_PSNR)
+ psnr = MAX_PSNR;
- if (psnr > 60)
- psnr = 60;
-
- return psnr;
+ return psnr;
}
#include "args.h"
static const arg_def_t debugmode = ARG_DEF("D", "debug", 0,
- "Debug mode (makes output deterministic)");
+ "Debug mode (makes output deterministic)");
static const arg_def_t outputfile = ARG_DEF("o", "output", 1,
- "Output filename");
+ "Output filename");
static const arg_def_t use_yv12 = ARG_DEF(NULL, "yv12", 0,
- "Input file is YV12 ");
+ "Input file is YV12 ");
static const arg_def_t use_i420 = ARG_DEF(NULL, "i420", 0,
- "Input file is I420 (default)");
+ "Input file is I420 (default)");
static const arg_def_t codecarg = ARG_DEF(NULL, "codec", 1,
- "Codec to use");
+ "Codec to use");
static const arg_def_t passes = ARG_DEF("p", "passes", 1,
- "Number of passes (1/2)");
+ "Number of passes (1/2)");
static const arg_def_t pass_arg = ARG_DEF(NULL, "pass", 1,
- "Pass to execute (1/2)");
+ "Pass to execute (1/2)");
static const arg_def_t fpf_name = ARG_DEF(NULL, "fpf", 1,
- "First pass statistics file name");
+ "First pass statistics file name");
static const arg_def_t limit = ARG_DEF(NULL, "limit", 1,
"Stop encoding after n input frames");
+static const arg_def_t skip = ARG_DEF(NULL, "skip", 1,
+ "Skip the first n input frames");
static const arg_def_t deadline = ARG_DEF("d", "deadline", 1,
- "Deadline per frame (usec)");
+ "Deadline per frame (usec)");
static const arg_def_t best_dl = ARG_DEF(NULL, "best", 0,
- "Use Best Quality Deadline");
+ "Use Best Quality Deadline");
static const arg_def_t good_dl = ARG_DEF(NULL, "good", 0,
- "Use Good Quality Deadline");
+ "Use Good Quality Deadline");
static const arg_def_t rt_dl = ARG_DEF(NULL, "rt", 0,
- "Use Realtime Quality Deadline");
+ "Use Realtime Quality Deadline");
static const arg_def_t quietarg = ARG_DEF("q", "quiet", 0,
- "Do not print encode progress");
+ "Do not print encode progress");
static const arg_def_t verbosearg = ARG_DEF("v", "verbose", 0,
- "Show encoder parameters");
+ "Show encoder parameters");
static const arg_def_t psnrarg = ARG_DEF(NULL, "psnr", 0,
- "Show PSNR in status line");
+ "Show PSNR in status line");
+enum TestDecodeFatality {
+ TEST_DECODE_OFF,
+ TEST_DECODE_FATAL,
+ TEST_DECODE_WARN,
+};
+static const struct arg_enum_list test_decode_enum[] = {
+ {"off", TEST_DECODE_OFF},
+ {"fatal", TEST_DECODE_FATAL},
+ {"warn", TEST_DECODE_WARN},
+ {NULL, 0}
+};
+static const arg_def_t recontest = ARG_DEF_ENUM(NULL, "test-decode", 1,
+ "Test encode/decode mismatch",
+ test_decode_enum);
static const arg_def_t framerate = ARG_DEF(NULL, "fps", 1,
- "Stream frame rate (rate/scale)");
+ "Stream frame rate (rate/scale)");
static const arg_def_t use_ivf = ARG_DEF(NULL, "ivf", 0,
- "Output IVF (default is WebM)");
+ "Output IVF (default is WebM)");
static const arg_def_t out_part = ARG_DEF("P", "output-partitions", 0,
- "Makes encoder output partitions. Requires IVF output!");
+ "Makes encoder output partitions. Requires IVF output!");
static const arg_def_t q_hist_n = ARG_DEF(NULL, "q-hist", 1,
- "Show quantizer histogram (n-buckets)");
+ "Show quantizer histogram (n-buckets)");
static const arg_def_t rate_hist_n = ARG_DEF(NULL, "rate-hist", 1,
- "Show rate histogram (n-buckets)");
-static const arg_def_t *main_args[] =
-{
- &debugmode,
- &outputfile, &codecarg, &passes, &pass_arg, &fpf_name, &limit, &deadline,
- &best_dl, &good_dl, &rt_dl,
- &quietarg, &verbosearg, &psnrarg, &use_ivf, &out_part, &q_hist_n, &rate_hist_n,
- NULL
+ "Show rate histogram (n-buckets)");
+static const arg_def_t *main_args[] = {
+ &debugmode,
+ &outputfile, &codecarg, &passes, &pass_arg, &fpf_name, &limit, &skip,
+ &deadline, &best_dl, &good_dl, &rt_dl,
+ &quietarg, &verbosearg, &psnrarg, &use_ivf, &out_part, &q_hist_n, &rate_hist_n,
+ NULL
};
static const arg_def_t usage = ARG_DEF("u", "usage", 1,
- "Usage profile number to use");
+ "Usage profile number to use");
static const arg_def_t threads = ARG_DEF("t", "threads", 1,
- "Max number of threads to use");
+ "Max number of threads to use");
static const arg_def_t profile = ARG_DEF(NULL, "profile", 1,
- "Bitstream profile number to use");
+ "Bitstream profile number to use");
static const arg_def_t width = ARG_DEF("w", "width", 1,
- "Frame width");
+ "Frame width");
static const arg_def_t height = ARG_DEF("h", "height", 1,
- "Frame height");
+ "Frame height");
static const struct arg_enum_list stereo_mode_enum[] = {
- {"mono" , STEREO_FORMAT_MONO},
- {"left-right", STEREO_FORMAT_LEFT_RIGHT},
- {"bottom-top", STEREO_FORMAT_BOTTOM_TOP},
- {"top-bottom", STEREO_FORMAT_TOP_BOTTOM},
- {"right-left", STEREO_FORMAT_RIGHT_LEFT},
- {NULL, 0}
+ {"mono", STEREO_FORMAT_MONO},
+ {"left-right", STEREO_FORMAT_LEFT_RIGHT},
+ {"bottom-top", STEREO_FORMAT_BOTTOM_TOP},
+ {"top-bottom", STEREO_FORMAT_TOP_BOTTOM},
+ {"right-left", STEREO_FORMAT_RIGHT_LEFT},
+ {NULL, 0}
};
static const arg_def_t stereo_mode = ARG_DEF_ENUM(NULL, "stereo-mode", 1,
- "Stereo 3D video format", stereo_mode_enum);
+ "Stereo 3D video format", stereo_mode_enum);
static const arg_def_t timebase = ARG_DEF(NULL, "timebase", 1,
- "Output timestamp precision (fractional seconds)");
+ "Output timestamp precision (fractional seconds)");
static const arg_def_t error_resilient = ARG_DEF(NULL, "error-resilient", 1,
- "Enable error resiliency features");
+ "Enable error resiliency features");
static const arg_def_t lag_in_frames = ARG_DEF(NULL, "lag-in-frames", 1,
- "Max number of frames to lag");
+ "Max number of frames to lag");
-static const arg_def_t *global_args[] =
-{
- &use_yv12, &use_i420, &usage, &threads, &profile,
- &width, &height, &stereo_mode, &timebase, &framerate, &error_resilient,
- &lag_in_frames, NULL
+static const arg_def_t *global_args[] = {
+ &use_yv12, &use_i420, &usage, &threads, &profile,
+ &width, &height, &stereo_mode, &timebase, &framerate,
+ &error_resilient,
+ &lag_in_frames, NULL
};
static const arg_def_t dropframe_thresh = ARG_DEF(NULL, "drop-frame", 1,
- "Temporal resampling threshold (buf %)");
+ "Temporal resampling threshold (buf %)");
static const arg_def_t resize_allowed = ARG_DEF(NULL, "resize-allowed", 1,
- "Spatial resampling enabled (bool)");
+ "Spatial resampling enabled (bool)");
static const arg_def_t resize_up_thresh = ARG_DEF(NULL, "resize-up", 1,
- "Upscale threshold (buf %)");
+ "Upscale threshold (buf %)");
static const arg_def_t resize_down_thresh = ARG_DEF(NULL, "resize-down", 1,
- "Downscale threshold (buf %)");
+ "Downscale threshold (buf %)");
static const struct arg_enum_list end_usage_enum[] = {
- {"vbr", VPX_VBR},
- {"cbr", VPX_CBR},
- {"cq", VPX_CQ},
- {NULL, 0}
+ {"vbr", VPX_VBR},
+ {"cbr", VPX_CBR},
+ {"cq", VPX_CQ},
+ {NULL, 0}
};
static const arg_def_t end_usage = ARG_DEF_ENUM(NULL, "end-usage", 1,
- "Rate control mode", end_usage_enum);
+ "Rate control mode", end_usage_enum);
static const arg_def_t target_bitrate = ARG_DEF(NULL, "target-bitrate", 1,
- "Bitrate (kbps)");
+ "Bitrate (kbps)");
static const arg_def_t min_quantizer = ARG_DEF(NULL, "min-q", 1,
- "Minimum (best) quantizer");
+ "Minimum (best) quantizer");
static const arg_def_t max_quantizer = ARG_DEF(NULL, "max-q", 1,
- "Maximum (worst) quantizer");
+ "Maximum (worst) quantizer");
static const arg_def_t undershoot_pct = ARG_DEF(NULL, "undershoot-pct", 1,
- "Datarate undershoot (min) target (%)");
+ "Datarate undershoot (min) target (%)");
static const arg_def_t overshoot_pct = ARG_DEF(NULL, "overshoot-pct", 1,
- "Datarate overshoot (max) target (%)");
+ "Datarate overshoot (max) target (%)");
static const arg_def_t buf_sz = ARG_DEF(NULL, "buf-sz", 1,
- "Client buffer size (ms)");
+ "Client buffer size (ms)");
static const arg_def_t buf_initial_sz = ARG_DEF(NULL, "buf-initial-sz", 1,
- "Client initial buffer size (ms)");
+ "Client initial buffer size (ms)");
static const arg_def_t buf_optimal_sz = ARG_DEF(NULL, "buf-optimal-sz", 1,
- "Client optimal buffer size (ms)");
-static const arg_def_t *rc_args[] =
-{
- &dropframe_thresh, &resize_allowed, &resize_up_thresh, &resize_down_thresh,
- &end_usage, &target_bitrate, &min_quantizer, &max_quantizer,
- &undershoot_pct, &overshoot_pct, &buf_sz, &buf_initial_sz, &buf_optimal_sz,
- NULL
+ "Client optimal buffer size (ms)");
+static const arg_def_t *rc_args[] = {
+ &dropframe_thresh, &resize_allowed, &resize_up_thresh, &resize_down_thresh,
+ &end_usage, &target_bitrate, &min_quantizer, &max_quantizer,
+ &undershoot_pct, &overshoot_pct, &buf_sz, &buf_initial_sz, &buf_optimal_sz,
+ NULL
};
static const arg_def_t bias_pct = ARG_DEF(NULL, "bias-pct", 1,
- "CBR/VBR bias (0=CBR, 100=VBR)");
+ "CBR/VBR bias (0=CBR, 100=VBR)");
static const arg_def_t minsection_pct = ARG_DEF(NULL, "minsection-pct", 1,
- "GOP min bitrate (% of target)");
+ "GOP min bitrate (% of target)");
static const arg_def_t maxsection_pct = ARG_DEF(NULL, "maxsection-pct", 1,
- "GOP max bitrate (% of target)");
-static const arg_def_t *rc_twopass_args[] =
-{
- &bias_pct, &minsection_pct, &maxsection_pct, NULL
+ "GOP max bitrate (% of target)");
+static const arg_def_t *rc_twopass_args[] = {
+ &bias_pct, &minsection_pct, &maxsection_pct, NULL
};
static const arg_def_t kf_min_dist = ARG_DEF(NULL, "kf-min-dist", 1,
- "Minimum keyframe interval (frames)");
+ "Minimum keyframe interval (frames)");
static const arg_def_t kf_max_dist = ARG_DEF(NULL, "kf-max-dist", 1,
- "Maximum keyframe interval (frames)");
+ "Maximum keyframe interval (frames)");
static const arg_def_t kf_disabled = ARG_DEF(NULL, "disable-kf", 0,
- "Disable keyframe placement");
-static const arg_def_t *kf_args[] =
-{
- &kf_min_dist, &kf_max_dist, &kf_disabled, NULL
+ "Disable keyframe placement");
+static const arg_def_t *kf_args[] = {
+ &kf_min_dist, &kf_max_dist, &kf_disabled, NULL
};
-#if CONFIG_VP8_ENCODER
static const arg_def_t noise_sens = ARG_DEF(NULL, "noise-sensitivity", 1,
- "Noise sensitivity (frames to blur)");
+ "Noise sensitivity (frames to blur)");
static const arg_def_t sharpness = ARG_DEF(NULL, "sharpness", 1,
- "Filter sharpness (0-7)");
+ "Filter sharpness (0-7)");
static const arg_def_t static_thresh = ARG_DEF(NULL, "static-thresh", 1,
- "Motion detection threshold");
-#endif
-
-#if CONFIG_VP8_ENCODER
+ "Motion detection threshold");
static const arg_def_t cpu_used = ARG_DEF(NULL, "cpu-used", 1,
- "CPU Used (-16..16)");
-#endif
-
-
-#if CONFIG_VP8_ENCODER
+ "CPU Used (-16..16)");
static const arg_def_t token_parts = ARG_DEF(NULL, "token-parts", 1,
"Number of token partitions to use, log2");
+static const arg_def_t tile_cols = ARG_DEF(NULL, "tile-columns", 1,
+ "Number of tile columns to use, log2");
+static const arg_def_t tile_rows = ARG_DEF(NULL, "tile-rows", 1,
+ "Number of tile rows to use, log2");
static const arg_def_t auto_altref = ARG_DEF(NULL, "auto-alt-ref", 1,
- "Enable automatic alt reference frames");
+ "Enable automatic alt reference frames");
static const arg_def_t arnr_maxframes = ARG_DEF(NULL, "arnr-maxframes", 1,
- "AltRef Max Frames");
+ "AltRef Max Frames");
static const arg_def_t arnr_strength = ARG_DEF(NULL, "arnr-strength", 1,
- "AltRef Strength");
+ "AltRef Strength");
static const arg_def_t arnr_type = ARG_DEF(NULL, "arnr-type", 1,
- "AltRef Type");
+ "AltRef Type");
static const struct arg_enum_list tuning_enum[] = {
- {"psnr", VP8_TUNE_PSNR},
- {"ssim", VP8_TUNE_SSIM},
- {NULL, 0}
+ {"psnr", VP8_TUNE_PSNR},
+ {"ssim", VP8_TUNE_SSIM},
+ {NULL, 0}
};
static const arg_def_t tune_ssim = ARG_DEF_ENUM(NULL, "tune", 1,
- "Material to favor", tuning_enum);
+ "Material to favor", tuning_enum);
static const arg_def_t cq_level = ARG_DEF(NULL, "cq-level", 1,
- "Constrained Quality Level");
+ "Constrained Quality Level");
static const arg_def_t max_intra_rate_pct = ARG_DEF(NULL, "max-intra-rate", 1,
- "Max I-frame bitrate (pct)");
+ "Max I-frame bitrate (pct)");
+static const arg_def_t lossless = ARG_DEF(NULL, "lossless", 1, "Lossless mode");
+#if CONFIG_VP9_ENCODER
+static const arg_def_t frame_parallel_decoding = ARG_DEF(
+ NULL, "frame-parallel", 1, "Enable frame parallel decodability features");
+#endif
-static const arg_def_t *vp8_args[] =
-{
- &cpu_used, &auto_altref, &noise_sens, &sharpness, &static_thresh,
- &token_parts, &arnr_maxframes, &arnr_strength, &arnr_type,
- &tune_ssim, &cq_level, &max_intra_rate_pct, NULL
+#if CONFIG_VP8_ENCODER
+static const arg_def_t *vp8_args[] = {
+ &cpu_used, &auto_altref, &noise_sens, &sharpness, &static_thresh,
+ &token_parts, &arnr_maxframes, &arnr_strength, &arnr_type,
+ &tune_ssim, &cq_level, &max_intra_rate_pct,
+ NULL
};
-static const int vp8_arg_ctrl_map[] =
-{
- VP8E_SET_CPUUSED, VP8E_SET_ENABLEAUTOALTREF,
- VP8E_SET_NOISE_SENSITIVITY, VP8E_SET_SHARPNESS, VP8E_SET_STATIC_THRESHOLD,
- VP8E_SET_TOKEN_PARTITIONS,
- VP8E_SET_ARNR_MAXFRAMES, VP8E_SET_ARNR_STRENGTH , VP8E_SET_ARNR_TYPE,
- VP8E_SET_TUNING, VP8E_SET_CQ_LEVEL, VP8E_SET_MAX_INTRA_BITRATE_PCT, 0
+static const int vp8_arg_ctrl_map[] = {
+ VP8E_SET_CPUUSED, VP8E_SET_ENABLEAUTOALTREF,
+ VP8E_SET_NOISE_SENSITIVITY, VP8E_SET_SHARPNESS, VP8E_SET_STATIC_THRESHOLD,
+ VP8E_SET_TOKEN_PARTITIONS,
+ VP8E_SET_ARNR_MAXFRAMES, VP8E_SET_ARNR_STRENGTH, VP8E_SET_ARNR_TYPE,
+ VP8E_SET_TUNING, VP8E_SET_CQ_LEVEL, VP8E_SET_MAX_INTRA_BITRATE_PCT,
+ 0
+};
+#endif
+
+#if CONFIG_VP9_ENCODER
+static const arg_def_t *vp9_args[] = {
+ &cpu_used, &auto_altref, &noise_sens, &sharpness, &static_thresh,
+ &tile_cols, &tile_rows, &arnr_maxframes, &arnr_strength, &arnr_type,
+ &tune_ssim, &cq_level, &max_intra_rate_pct, &lossless,
+ &frame_parallel_decoding,
+ NULL
+};
+static const int vp9_arg_ctrl_map[] = {
+ VP8E_SET_CPUUSED, VP8E_SET_ENABLEAUTOALTREF,
+ VP8E_SET_NOISE_SENSITIVITY, VP8E_SET_SHARPNESS, VP8E_SET_STATIC_THRESHOLD,
+ VP9E_SET_TILE_COLUMNS, VP9E_SET_TILE_ROWS,
+ VP8E_SET_ARNR_MAXFRAMES, VP8E_SET_ARNR_STRENGTH, VP8E_SET_ARNR_TYPE,
+ VP8E_SET_TUNING, VP8E_SET_CQ_LEVEL, VP8E_SET_MAX_INTRA_BITRATE_PCT,
+ VP9E_SET_LOSSLESS, VP9E_SET_FRAME_PARALLEL_DECODING,
+ 0
};
#endif
static const arg_def_t *no_args[] = { NULL };
-static void usage_exit()
-{
- int i;
-
- fprintf(stderr, "Usage: %s <options> -o dst_filename src_filename \n",
- exec_name);
-
- fprintf(stderr, "\nOptions:\n");
- arg_show_usage(stdout, main_args);
- fprintf(stderr, "\nEncoder Global Options:\n");
- arg_show_usage(stdout, global_args);
- fprintf(stderr, "\nRate Control Options:\n");
- arg_show_usage(stdout, rc_args);
- fprintf(stderr, "\nTwopass Rate Control Options:\n");
- arg_show_usage(stdout, rc_twopass_args);
- fprintf(stderr, "\nKeyframe Placement Options:\n");
- arg_show_usage(stdout, kf_args);
+static void usage_exit() {
+ int i;
+
+ fprintf(stderr, "Usage: %s <options> -o dst_filename src_filename \n",
+ exec_name);
+
+ fprintf(stderr, "\nOptions:\n");
+ arg_show_usage(stdout, main_args);
+ fprintf(stderr, "\nEncoder Global Options:\n");
+ arg_show_usage(stdout, global_args);
+ fprintf(stderr, "\nRate Control Options:\n");
+ arg_show_usage(stdout, rc_args);
+ fprintf(stderr, "\nTwopass Rate Control Options:\n");
+ arg_show_usage(stdout, rc_twopass_args);
+ fprintf(stderr, "\nKeyframe Placement Options:\n");
+ arg_show_usage(stdout, kf_args);
#if CONFIG_VP8_ENCODER
- fprintf(stderr, "\nVP8 Specific Options:\n");
- arg_show_usage(stdout, vp8_args);
+ fprintf(stderr, "\nVP8 Specific Options:\n");
+ arg_show_usage(stdout, vp8_args);
#endif
- fprintf(stderr, "\nStream timebase (--timebase):\n"
- " The desired precision of timestamps in the output, expressed\n"
- " in fractional seconds. Default is 1/1000.\n");
- fprintf(stderr, "\n"
- "Included encoders:\n"
- "\n");
-
- for (i = 0; i < sizeof(codecs) / sizeof(codecs[0]); i++)
- fprintf(stderr, " %-6s - %s\n",
- codecs[i].name,
- vpx_codec_iface_name(codecs[i].iface));
-
- exit(EXIT_FAILURE);
+#if CONFIG_VP9_ENCODER
+ fprintf(stderr, "\nVP9 Specific Options:\n");
+ arg_show_usage(stdout, vp9_args);
+#endif
+ fprintf(stderr, "\nStream timebase (--timebase):\n"
+ " The desired precision of timestamps in the output, expressed\n"
+ " in fractional seconds. Default is 1/1000.\n");
+ fprintf(stderr, "\n"
+ "Included encoders:\n"
+ "\n");
+
+ for (i = 0; i < sizeof(codecs) / sizeof(codecs[0]); i++)
+ fprintf(stderr, " %-6s - %s\n",
+ codecs[i].name,
+ vpx_codec_iface_name(codecs[i].iface()));
+
+ exit(EXIT_FAILURE);
}
#define HIST_BAR_MAX 40
-struct hist_bucket
-{
- int low, high, count;
+struct hist_bucket {
+ int low, high, count;
};
static int merge_hist_buckets(struct hist_bucket *bucket,
int *buckets_,
- int max_buckets)
-{
- int small_bucket = 0, merge_bucket = INT_MAX, big_bucket=0;
- int buckets = *buckets_;
- int i;
-
- /* Find the extrema for this list of buckets */
- big_bucket = small_bucket = 0;
- for(i=0; i < buckets; i++)
- {
- if(bucket[i].count < bucket[small_bucket].count)
- small_bucket = i;
- if(bucket[i].count > bucket[big_bucket].count)
- big_bucket = i;
+ int max_buckets) {
+ int small_bucket = 0, merge_bucket = INT_MAX, big_bucket = 0;
+ int buckets = *buckets_;
+ int i;
+
+ /* Find the extrema for this list of buckets */
+ big_bucket = small_bucket = 0;
+ for (i = 0; i < buckets; i++) {
+ if (bucket[i].count < bucket[small_bucket].count)
+ small_bucket = i;
+ if (bucket[i].count > bucket[big_bucket].count)
+ big_bucket = i;
+ }
+
+ /* If we have too many buckets, merge the smallest with an adjacent
+ * bucket.
+ */
+ while (buckets > max_buckets) {
+ int last_bucket = buckets - 1;
+
+ /* merge the small bucket with an adjacent one. */
+ if (small_bucket == 0)
+ merge_bucket = 1;
+ else if (small_bucket == last_bucket)
+ merge_bucket = last_bucket - 1;
+ else if (bucket[small_bucket - 1].count < bucket[small_bucket + 1].count)
+ merge_bucket = small_bucket - 1;
+ else
+ merge_bucket = small_bucket + 1;
+
+ assert(abs(merge_bucket - small_bucket) <= 1);
+ assert(small_bucket < buckets);
+ assert(big_bucket < buckets);
+ assert(merge_bucket < buckets);
+
+ if (merge_bucket < small_bucket) {
+ bucket[merge_bucket].high = bucket[small_bucket].high;
+ bucket[merge_bucket].count += bucket[small_bucket].count;
+ } else {
+ bucket[small_bucket].high = bucket[merge_bucket].high;
+ bucket[small_bucket].count += bucket[merge_bucket].count;
+ merge_bucket = small_bucket;
}
- /* If we have too many buckets, merge the smallest with an adjacent
- * bucket.
- */
- while(buckets > max_buckets)
- {
- int last_bucket = buckets - 1;
-
- /* merge the small bucket with an adjacent one. */
- if(small_bucket == 0)
- merge_bucket = 1;
- else if(small_bucket == last_bucket)
- merge_bucket = last_bucket - 1;
- else if(bucket[small_bucket - 1].count < bucket[small_bucket + 1].count)
- merge_bucket = small_bucket - 1;
- else
- merge_bucket = small_bucket + 1;
-
- assert(abs(merge_bucket - small_bucket) <= 1);
- assert(small_bucket < buckets);
- assert(big_bucket < buckets);
- assert(merge_bucket < buckets);
-
- if(merge_bucket < small_bucket)
- {
- bucket[merge_bucket].high = bucket[small_bucket].high;
- bucket[merge_bucket].count += bucket[small_bucket].count;
- }
- else
- {
- bucket[small_bucket].high = bucket[merge_bucket].high;
- bucket[small_bucket].count += bucket[merge_bucket].count;
- merge_bucket = small_bucket;
- }
-
- assert(bucket[merge_bucket].low != bucket[merge_bucket].high);
+ assert(bucket[merge_bucket].low != bucket[merge_bucket].high);
- buckets--;
-
- /* Remove the merge_bucket from the list, and find the new small
- * and big buckets while we're at it
- */
- big_bucket = small_bucket = 0;
- for(i=0; i < buckets; i++)
- {
- if(i > merge_bucket)
- bucket[i] = bucket[i+1];
-
- if(bucket[i].count < bucket[small_bucket].count)
- small_bucket = i;
- if(bucket[i].count > bucket[big_bucket].count)
- big_bucket = i;
- }
+ buckets--;
+ /* Remove the merge_bucket from the list, and find the new small
+ * and big buckets while we're at it
+ */
+ big_bucket = small_bucket = 0;
+ for (i = 0; i < buckets; i++) {
+ if (i > merge_bucket)
+ bucket[i] = bucket[i + 1];
+
+ if (bucket[i].count < bucket[small_bucket].count)
+ small_bucket = i;
+ if (bucket[i].count > bucket[big_bucket].count)
+ big_bucket = i;
}
- *buckets_ = buckets;
- return bucket[big_bucket].count;
+ }
+
+ *buckets_ = buckets;
+ return bucket[big_bucket].count;
}
static void show_histogram(const struct hist_bucket *bucket,
int buckets,
int total,
- int scale)
-{
- const char *pat1, *pat2;
- int i;
-
- switch((int)(log(bucket[buckets-1].high)/log(10))+1)
- {
- case 1:
- case 2:
- pat1 = "%4d %2s: ";
- pat2 = "%4d-%2d: ";
- break;
- case 3:
- pat1 = "%5d %3s: ";
- pat2 = "%5d-%3d: ";
- break;
- case 4:
- pat1 = "%6d %4s: ";
- pat2 = "%6d-%4d: ";
- break;
- case 5:
- pat1 = "%7d %5s: ";
- pat2 = "%7d-%5d: ";
- break;
- case 6:
- pat1 = "%8d %6s: ";
- pat2 = "%8d-%6d: ";
- break;
- case 7:
- pat1 = "%9d %7s: ";
- pat2 = "%9d-%7d: ";
- break;
- default:
- pat1 = "%12d %10s: ";
- pat2 = "%12d-%10d: ";
- break;
- }
+ int scale) {
+ const char *pat1, *pat2;
+ int i;
+
+ switch ((int)(log(bucket[buckets - 1].high) / log(10)) + 1) {
+ case 1:
+ case 2:
+ pat1 = "%4d %2s: ";
+ pat2 = "%4d-%2d: ";
+ break;
+ case 3:
+ pat1 = "%5d %3s: ";
+ pat2 = "%5d-%3d: ";
+ break;
+ case 4:
+ pat1 = "%6d %4s: ";
+ pat2 = "%6d-%4d: ";
+ break;
+ case 5:
+ pat1 = "%7d %5s: ";
+ pat2 = "%7d-%5d: ";
+ break;
+ case 6:
+ pat1 = "%8d %6s: ";
+ pat2 = "%8d-%6d: ";
+ break;
+ case 7:
+ pat1 = "%9d %7s: ";
+ pat2 = "%9d-%7d: ";
+ break;
+ default:
+ pat1 = "%12d %10s: ";
+ pat2 = "%12d-%10d: ";
+ break;
+ }
+
+ for (i = 0; i < buckets; i++) {
+ int len;
+ int j;
+ float pct;
+
+ pct = (float)(100.0 * bucket[i].count / total);
+ len = HIST_BAR_MAX * bucket[i].count / scale;
+ if (len < 1)
+ len = 1;
+ assert(len <= HIST_BAR_MAX);
+
+ if (bucket[i].low == bucket[i].high)
+ fprintf(stderr, pat1, bucket[i].low, "");
+ else
+ fprintf(stderr, pat2, bucket[i].low, bucket[i].high);
- for(i=0; i<buckets; i++)
- {
- int len;
- int j;
- float pct;
-
- pct = (float)(100.0 * bucket[i].count / total);
- len = HIST_BAR_MAX * bucket[i].count / scale;
- if(len < 1)
- len = 1;
- assert(len <= HIST_BAR_MAX);
-
- if(bucket[i].low == bucket[i].high)
- fprintf(stderr, pat1, bucket[i].low, "");
- else
- fprintf(stderr, pat2, bucket[i].low, bucket[i].high);
-
- for(j=0; j<HIST_BAR_MAX; j++)
- fprintf(stderr, j<len?"=":" ");
- fprintf(stderr, "\t%5d (%6.2f%%)\n",bucket[i].count,pct);
- }
+ for (j = 0; j < HIST_BAR_MAX; j++)
+ fprintf(stderr, j < len ? "=" : " ");
+ fprintf(stderr, "\t%5d (%6.2f%%)\n", bucket[i].count, pct);
+ }
}
-static void show_q_histogram(const int counts[64], int max_buckets)
-{
- struct hist_bucket bucket[64];
- int buckets = 0;
- int total = 0;
- int scale;
- int i;
+static void show_q_histogram(const int counts[64], int max_buckets) {
+ struct hist_bucket bucket[64];
+ int buckets = 0;
+ int total = 0;
+ int scale;
+ int i;
- for(i=0; i<64; i++)
- {
- if(counts[i])
- {
- bucket[buckets].low = bucket[buckets].high = i;
- bucket[buckets].count = counts[i];
- buckets++;
- total += counts[i];
- }
+ for (i = 0; i < 64; i++) {
+ if (counts[i]) {
+ bucket[buckets].low = bucket[buckets].high = i;
+ bucket[buckets].count = counts[i];
+ buckets++;
+ total += counts[i];
}
+ }
- fprintf(stderr, "\nQuantizer Selection:\n");
- scale = merge_hist_buckets(bucket, &buckets, max_buckets);
- show_histogram(bucket, buckets, total, scale);
+ fprintf(stderr, "\nQuantizer Selection:\n");
+ scale = merge_hist_buckets(bucket, &buckets, max_buckets);
+ show_histogram(bucket, buckets, total, scale);
}
#define RATE_BINS (100)
-struct rate_hist
-{
- int64_t *pts;
- int *sz;
- int samples;
- int frames;
- struct hist_bucket bucket[RATE_BINS];
- int total;
+struct rate_hist {
+ int64_t *pts;
+ int *sz;
+ int samples;
+ int frames;
+ struct hist_bucket bucket[RATE_BINS];
+ int total;
};
static void init_rate_histogram(struct rate_hist *hist,
const vpx_codec_enc_cfg_t *cfg,
- const vpx_rational_t *fps)
-{
- int i;
-
- /* Determine the number of samples in the buffer. Use the file's framerate
- * to determine the number of frames in rc_buf_sz milliseconds, with an
- * adjustment (5/4) to account for alt-refs
- */
- hist->samples = cfg->rc_buf_sz * 5 / 4 * fps->num / fps->den / 1000;
-
- /* prevent division by zero */
- if (hist->samples == 0)
- hist->samples=1;
-
- hist->pts = calloc(hist->samples, sizeof(*hist->pts));
- hist->sz = calloc(hist->samples, sizeof(*hist->sz));
- for(i=0; i<RATE_BINS; i++)
- {
- hist->bucket[i].low = INT_MAX;
- hist->bucket[i].high = 0;
- hist->bucket[i].count = 0;
- }
+ const vpx_rational_t *fps) {
+ int i;
+
+ /* Determine the number of samples in the buffer. Use the file's framerate
+ * to determine the number of frames in rc_buf_sz milliseconds, with an
+ * adjustment (5/4) to account for alt-refs
+ */
+ hist->samples = cfg->rc_buf_sz * 5 / 4 * fps->num / fps->den / 1000;
+
+ /* prevent division by zero */
+ if (hist->samples == 0)
+ hist->samples = 1;
+
+ hist->pts = calloc(hist->samples, sizeof(*hist->pts));
+ hist->sz = calloc(hist->samples, sizeof(*hist->sz));
+ for (i = 0; i < RATE_BINS; i++) {
+ hist->bucket[i].low = INT_MAX;
+ hist->bucket[i].high = 0;
+ hist->bucket[i].count = 0;
+ }
}
-static void destroy_rate_histogram(struct rate_hist *hist)
-{
- free(hist->pts);
- free(hist->sz);
+static void destroy_rate_histogram(struct rate_hist *hist) {
+ free(hist->pts);
+ free(hist->sz);
}
static void update_rate_histogram(struct rate_hist *hist,
const vpx_codec_enc_cfg_t *cfg,
- const vpx_codec_cx_pkt_t *pkt)
-{
- int i, idx;
- int64_t now, then, sum_sz = 0, avg_bitrate;
-
- now = pkt->data.frame.pts * 1000
- * (uint64_t)cfg->g_timebase.num / (uint64_t)cfg->g_timebase.den;
-
- idx = hist->frames++ % hist->samples;
- hist->pts[idx] = now;
- hist->sz[idx] = (int)pkt->data.frame.sz;
-
- if(now < cfg->rc_buf_initial_sz)
- return;
+ const vpx_codec_cx_pkt_t *pkt) {
+ int i, idx;
+ int64_t now, then, sum_sz = 0, avg_bitrate;
+
+ now = pkt->data.frame.pts * 1000
+ * (uint64_t)cfg->g_timebase.num / (uint64_t)cfg->g_timebase.den;
+
+ idx = hist->frames++ % hist->samples;
+ hist->pts[idx] = now;
+ hist->sz[idx] = (int)pkt->data.frame.sz;
+
+ if (now < cfg->rc_buf_initial_sz)
+ return;
+
+ then = now;
+
+ /* Sum the size over the past rc_buf_sz ms */
+ for (i = hist->frames; i > 0 && hist->frames - i < hist->samples; i--) {
+ int i_idx = (i - 1) % hist->samples;
+
+ then = hist->pts[i_idx];
+ if (now - then > cfg->rc_buf_sz)
+ break;
+ sum_sz += hist->sz[i_idx];
+ }
+
+ if (now == then)
+ return;
+
+ avg_bitrate = sum_sz * 8 * 1000 / (now - then);
+ idx = (int)(avg_bitrate * (RATE_BINS / 2) / (cfg->rc_target_bitrate * 1000));
+ if (idx < 0)
+ idx = 0;
+ if (idx > RATE_BINS - 1)
+ idx = RATE_BINS - 1;
+ if (hist->bucket[idx].low > avg_bitrate)
+ hist->bucket[idx].low = (int)avg_bitrate;
+ if (hist->bucket[idx].high < avg_bitrate)
+ hist->bucket[idx].high = (int)avg_bitrate;
+ hist->bucket[idx].count++;
+ hist->total++;
+}
- then = now;
- /* Sum the size over the past rc_buf_sz ms */
- for(i = hist->frames; i > 0 && hist->frames - i < hist->samples; i--)
- {
- int i_idx = (i-1) % hist->samples;
+static void show_rate_histogram(struct rate_hist *hist,
+ const vpx_codec_enc_cfg_t *cfg,
+ int max_buckets) {
+ int i, scale;
+ int buckets = 0;
+
+ for (i = 0; i < RATE_BINS; i++) {
+ if (hist->bucket[i].low == INT_MAX)
+ continue;
+ hist->bucket[buckets++] = hist->bucket[i];
+ }
+
+ fprintf(stderr, "\nRate (over %dms window):\n", cfg->rc_buf_sz);
+ scale = merge_hist_buckets(hist->bucket, &buckets, max_buckets);
+ show_histogram(hist->bucket, buckets, hist->total, scale);
+}
- then = hist->pts[i_idx];
- if(now - then > cfg->rc_buf_sz)
+#define mmin(a, b) ((a) < (b) ? (a) : (b))
+static void find_mismatch(vpx_image_t *img1, vpx_image_t *img2,
+ int yloc[4], int uloc[4], int vloc[4]) {
+ const unsigned int bsize = 64;
+ const unsigned int bsizey = bsize >> img1->y_chroma_shift;
+ const unsigned int bsizex = bsize >> img1->x_chroma_shift;
+ const int c_w = (img1->d_w + img1->x_chroma_shift) >> img1->x_chroma_shift;
+ const int c_h = (img1->d_h + img1->y_chroma_shift) >> img1->y_chroma_shift;
+ unsigned int match = 1;
+ unsigned int i, j;
+ yloc[0] = yloc[1] = yloc[2] = yloc[3] = -1;
+ for (i = 0, match = 1; match && i < img1->d_h; i += bsize) {
+ for (j = 0; match && j < img1->d_w; j += bsize) {
+ int k, l;
+ int si = mmin(i + bsize, img1->d_h) - i;
+ int sj = mmin(j + bsize, img1->d_w) - j;
+ for (k = 0; match && k < si; k++)
+ for (l = 0; match && l < sj; l++) {
+ if (*(img1->planes[VPX_PLANE_Y] +
+ (i + k) * img1->stride[VPX_PLANE_Y] + j + l) !=
+ *(img2->planes[VPX_PLANE_Y] +
+ (i + k) * img2->stride[VPX_PLANE_Y] + j + l)) {
+ yloc[0] = i + k;
+ yloc[1] = j + l;
+ yloc[2] = *(img1->planes[VPX_PLANE_Y] +
+ (i + k) * img1->stride[VPX_PLANE_Y] + j + l);
+ yloc[3] = *(img2->planes[VPX_PLANE_Y] +
+ (i + k) * img2->stride[VPX_PLANE_Y] + j + l);
+ match = 0;
break;
- sum_sz += hist->sz[i_idx];
+ }
+ }
}
-
- if (now == then)
- return;
-
- avg_bitrate = sum_sz * 8 * 1000 / (now - then);
- idx = (int)(avg_bitrate * (RATE_BINS/2) / (cfg->rc_target_bitrate * 1000));
- if(idx < 0)
- idx = 0;
- if(idx > RATE_BINS-1)
- idx = RATE_BINS-1;
- if(hist->bucket[idx].low > avg_bitrate)
- hist->bucket[idx].low = (int)avg_bitrate;
- if(hist->bucket[idx].high < avg_bitrate)
- hist->bucket[idx].high = (int)avg_bitrate;
- hist->bucket[idx].count++;
- hist->total++;
+ }
+
+ uloc[0] = uloc[1] = uloc[2] = uloc[3] = -1;
+ for (i = 0, match = 1; match && i < c_h; i += bsizey) {
+ for (j = 0; match && j < c_w; j += bsizex) {
+ int k, l;
+ int si = mmin(i + bsizey, c_h - i);
+ int sj = mmin(j + bsizex, c_w - j);
+ for (k = 0; match && k < si; k++)
+ for (l = 0; match && l < sj; l++) {
+ if (*(img1->planes[VPX_PLANE_U] +
+ (i + k) * img1->stride[VPX_PLANE_U] + j + l) !=
+ *(img2->planes[VPX_PLANE_U] +
+ (i + k) * img2->stride[VPX_PLANE_U] + j + l)) {
+ uloc[0] = i + k;
+ uloc[1] = j + l;
+ uloc[2] = *(img1->planes[VPX_PLANE_U] +
+ (i + k) * img1->stride[VPX_PLANE_U] + j + l);
+ uloc[3] = *(img2->planes[VPX_PLANE_U] +
+ (i + k) * img2->stride[VPX_PLANE_V] + j + l);
+ match = 0;
+ break;
+ }
+ }
+ }
+ }
+ vloc[0] = vloc[1] = vloc[2] = vloc[3] = -1;
+ for (i = 0, match = 1; match && i < c_h; i += bsizey) {
+ for (j = 0; match && j < c_w; j += bsizex) {
+ int k, l;
+ int si = mmin(i + bsizey, c_h - i);
+ int sj = mmin(j + bsizex, c_w - j);
+ for (k = 0; match && k < si; k++)
+ for (l = 0; match && l < sj; l++) {
+ if (*(img1->planes[VPX_PLANE_V] +
+ (i + k) * img1->stride[VPX_PLANE_V] + j + l) !=
+ *(img2->planes[VPX_PLANE_V] +
+ (i + k) * img2->stride[VPX_PLANE_V] + j + l)) {
+ vloc[0] = i + k;
+ vloc[1] = j + l;
+ vloc[2] = *(img1->planes[VPX_PLANE_V] +
+ (i + k) * img1->stride[VPX_PLANE_V] + j + l);
+ vloc[3] = *(img2->planes[VPX_PLANE_V] +
+ (i + k) * img2->stride[VPX_PLANE_V] + j + l);
+ match = 0;
+ break;
+ }
+ }
+ }
+ }
}
-
-static void show_rate_histogram(struct rate_hist *hist,
- const vpx_codec_enc_cfg_t *cfg,
- int max_buckets)
+static int compare_img(vpx_image_t *img1, vpx_image_t *img2)
{
- int i, scale;
- int buckets = 0;
+ const int c_w = (img1->d_w + img1->x_chroma_shift) >> img1->x_chroma_shift;
+ const int c_h = (img1->d_h + img1->y_chroma_shift) >> img1->y_chroma_shift;
+ int match = 1;
+ unsigned int i;
- for(i = 0; i < RATE_BINS; i++)
- {
- if(hist->bucket[i].low == INT_MAX)
- continue;
- hist->bucket[buckets++] = hist->bucket[i];
- }
+ match &= (img1->fmt == img2->fmt);
+ match &= (img1->w == img2->w);
+ match &= (img1->h == img2->h);
+
+ for (i = 0; i < img1->d_h; i++)
+ match &= (memcmp(img1->planes[VPX_PLANE_Y]+i*img1->stride[VPX_PLANE_Y],
+ img2->planes[VPX_PLANE_Y]+i*img2->stride[VPX_PLANE_Y],
+ img1->d_w) == 0);
+
+ for (i = 0; i < c_h; i++)
+ match &= (memcmp(img1->planes[VPX_PLANE_U]+i*img1->stride[VPX_PLANE_U],
+ img2->planes[VPX_PLANE_U]+i*img2->stride[VPX_PLANE_U],
+ c_w) == 0);
- fprintf(stderr, "\nRate (over %dms window):\n", cfg->rc_buf_sz);
- scale = merge_hist_buckets(hist->bucket, &buckets, max_buckets);
- show_histogram(hist->bucket, buckets, hist->total, scale);
+ for (i = 0; i < c_h; i++)
+ match &= (memcmp(img1->planes[VPX_PLANE_V]+i*img1->stride[VPX_PLANE_U],
+ img2->planes[VPX_PLANE_V]+i*img2->stride[VPX_PLANE_U],
+ c_w) == 0);
+
+ return match;
}
+
#define NELEMENTS(x) (sizeof(x)/sizeof(x[0]))
+#define MAX(x,y) ((x)>(y)?(x):(y))
+#if CONFIG_VP8_ENCODER && !CONFIG_VP9_ENCODER
#define ARG_CTRL_CNT_MAX NELEMENTS(vp8_arg_ctrl_map)
-
+#elif !CONFIG_VP8_ENCODER && CONFIG_VP9_ENCODER
+#define ARG_CTRL_CNT_MAX NELEMENTS(vp9_arg_ctrl_map)
+#else
+#define ARG_CTRL_CNT_MAX MAX(NELEMENTS(vp8_arg_ctrl_map), \
+ NELEMENTS(vp9_arg_ctrl_map))
+#endif
/* Configuration elements common to all streams */
-struct global_config
-{
- const struct codec_item *codec;
- int passes;
- int pass;
- int usage;
- int deadline;
- int use_i420;
- int quiet;
- int verbose;
- int limit;
- int show_psnr;
- int have_framerate;
- struct vpx_rational framerate;
- int out_part;
- int debug;
- int show_q_hist_buckets;
- int show_rate_hist_buckets;
+struct global_config {
+ const struct codec_item *codec;
+ int passes;
+ int pass;
+ int usage;
+ int deadline;
+ int use_i420;
+ int quiet;
+ int verbose;
+ int limit;
+ int skip_frames;
+ int show_psnr;
+ enum TestDecodeFatality test_decode;
+ int have_framerate;
+ struct vpx_rational framerate;
+ int out_part;
+ int debug;
+ int show_q_hist_buckets;
+ int show_rate_hist_buckets;
};
/* Per-stream configuration */
-struct stream_config
-{
- struct vpx_codec_enc_cfg cfg;
- const char *out_fn;
- const char *stats_fn;
- stereo_format_t stereo_fmt;
- int arg_ctrls[ARG_CTRL_CNT_MAX][2];
- int arg_ctrl_cnt;
- int write_webm;
- int have_kf_max_dist;
+struct stream_config {
+ struct vpx_codec_enc_cfg cfg;
+ const char *out_fn;
+ const char *stats_fn;
+ stereo_format_t stereo_fmt;
+ int arg_ctrls[ARG_CTRL_CNT_MAX][2];
+ int arg_ctrl_cnt;
+ int write_webm;
+ int have_kf_max_dist;
};
-struct stream_state
-{
- int index;
- struct stream_state *next;
- struct stream_config config;
- FILE *file;
- struct rate_hist rate_hist;
- EbmlGlobal ebml;
- uint32_t hash;
- uint64_t psnr_sse_total;
- uint64_t psnr_samples_total;
- double psnr_totals[4];
- int psnr_count;
- int counts[64];
- vpx_codec_ctx_t encoder;
- unsigned int frames_out;
- uint64_t cx_time;
- size_t nbytes;
- stats_io_t stats;
+struct stream_state {
+ int index;
+ struct stream_state *next;
+ struct stream_config config;
+ FILE *file;
+ struct rate_hist rate_hist;
+ EbmlGlobal ebml;
+ uint32_t hash;
+ uint64_t psnr_sse_total;
+ uint64_t psnr_samples_total;
+ double psnr_totals[4];
+ int psnr_count;
+ int counts[64];
+ vpx_codec_ctx_t encoder;
+ unsigned int frames_out;
+ uint64_t cx_time;
+ size_t nbytes;
+ stats_io_t stats;
+ struct vpx_image *img;
+ vpx_codec_ctx_t decoder;
+ int mismatch_seen;
};
void validate_positive_rational(const char *msg,
- struct vpx_rational *rat)
-{
- if (rat->den < 0)
- {
- rat->num *= -1;
- rat->den *= -1;
- }
+ struct vpx_rational *rat) {
+ if (rat->den < 0) {
+ rat->num *= -1;
+ rat->den *= -1;
+ }
- if (rat->num < 0)
- die("Error: %s must be positive\n", msg);
+ if (rat->num < 0)
+ die("Error: %s must be positive\n", msg);
- if (!rat->den)
- die("Error: %s has zero denominator\n", msg);
+ if (!rat->den)
+ die("Error: %s has zero denominator\n", msg);
}
-static void parse_global_config(struct global_config *global, char **argv)
-{
- char **argi, **argj;
- struct arg arg;
-
- /* Initialize default parameters */
- memset(global, 0, sizeof(*global));
- global->codec = codecs;
- global->passes = 1;
- global->use_i420 = 1;
-
- for (argi = argj = argv; (*argj = *argi); argi += arg.argv_step)
- {
- arg.argv_step = 1;
-
- if (arg_match(&arg, &codecarg, argi))
- {
- int j, k = -1;
-
- for (j = 0; j < sizeof(codecs) / sizeof(codecs[0]); j++)
- if (!strcmp(codecs[j].name, arg.val))
- k = j;
-
- if (k >= 0)
- global->codec = codecs + k;
- else
- die("Error: Unrecognized argument (%s) to --codec\n",
- arg.val);
-
- }
- else if (arg_match(&arg, &passes, argi))
- {
- global->passes = arg_parse_uint(&arg);
-
- if (global->passes < 1 || global->passes > 2)
- die("Error: Invalid number of passes (%d)\n", global->passes);
- }
- else if (arg_match(&arg, &pass_arg, argi))
- {
- global->pass = arg_parse_uint(&arg);
-
- if (global->pass < 1 || global->pass > 2)
- die("Error: Invalid pass selected (%d)\n",
- global->pass);
- }
- else if (arg_match(&arg, &usage, argi))
- global->usage = arg_parse_uint(&arg);
- else if (arg_match(&arg, &deadline, argi))
- global->deadline = arg_parse_uint(&arg);
- else if (arg_match(&arg, &best_dl, argi))
- global->deadline = VPX_DL_BEST_QUALITY;
- else if (arg_match(&arg, &good_dl, argi))
- global->deadline = VPX_DL_GOOD_QUALITY;
- else if (arg_match(&arg, &rt_dl, argi))
- global->deadline = VPX_DL_REALTIME;
- else if (arg_match(&arg, &use_yv12, argi))
- global->use_i420 = 0;
- else if (arg_match(&arg, &use_i420, argi))
- global->use_i420 = 1;
- else if (arg_match(&arg, &quietarg, argi))
- global->quiet = 1;
- else if (arg_match(&arg, &verbosearg, argi))
- global->verbose = 1;
- else if (arg_match(&arg, &limit, argi))
- global->limit = arg_parse_uint(&arg);
- else if (arg_match(&arg, &psnrarg, argi))
- global->show_psnr = 1;
- else if (arg_match(&arg, &framerate, argi))
- {
- global->framerate = arg_parse_rational(&arg);
- validate_positive_rational(arg.name, &global->framerate);
- global->have_framerate = 1;
- }
- else if (arg_match(&arg,&out_part, argi))
- global->out_part = 1;
- else if (arg_match(&arg, &debugmode, argi))
- global->debug = 1;
- else if (arg_match(&arg, &q_hist_n, argi))
- global->show_q_hist_buckets = arg_parse_uint(&arg);
- else if (arg_match(&arg, &rate_hist_n, argi))
- global->show_rate_hist_buckets = arg_parse_uint(&arg);
- else
- argj++;
- }
+static void parse_global_config(struct global_config *global, char **argv) {
+ char **argi, **argj;
+ struct arg arg;
+
+ /* Initialize default parameters */
+ memset(global, 0, sizeof(*global));
+ global->codec = codecs;
+ global->passes = 1;
+ global->use_i420 = 1;
+
+ for (argi = argj = argv; (*argj = *argi); argi += arg.argv_step) {
+ arg.argv_step = 1;
+
+ if (arg_match(&arg, &codecarg, argi)) {
+ int j, k = -1;
+
+ for (j = 0; j < sizeof(codecs) / sizeof(codecs[0]); j++)
+ if (!strcmp(codecs[j].name, arg.val))
+ k = j;
+
+ if (k >= 0)
+ global->codec = codecs + k;
+ else
+ die("Error: Unrecognized argument (%s) to --codec\n",
+ arg.val);
+
+ } else if (arg_match(&arg, &passes, argi)) {
+ global->passes = arg_parse_uint(&arg);
+
+ if (global->passes < 1 || global->passes > 2)
+ die("Error: Invalid number of passes (%d)\n", global->passes);
+ } else if (arg_match(&arg, &pass_arg, argi)) {
+ global->pass = arg_parse_uint(&arg);
+
+ if (global->pass < 1 || global->pass > 2)
+ die("Error: Invalid pass selected (%d)\n",
+ global->pass);
+ } else if (arg_match(&arg, &usage, argi))
+ global->usage = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &deadline, argi))
+ global->deadline = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &best_dl, argi))
+ global->deadline = VPX_DL_BEST_QUALITY;
+ else if (arg_match(&arg, &good_dl, argi))
+ global->deadline = VPX_DL_GOOD_QUALITY;
+ else if (arg_match(&arg, &rt_dl, argi))
+ global->deadline = VPX_DL_REALTIME;
+ else if (arg_match(&arg, &use_yv12, argi))
+ global->use_i420 = 0;
+ else if (arg_match(&arg, &use_i420, argi))
+ global->use_i420 = 1;
+ else if (arg_match(&arg, &quietarg, argi))
+ global->quiet = 1;
+ else if (arg_match(&arg, &verbosearg, argi))
+ global->verbose = 1;
+ else if (arg_match(&arg, &limit, argi))
+ global->limit = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &skip, argi))
+ global->skip_frames = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &psnrarg, argi))
+ global->show_psnr = 1;
+ else if (arg_match(&arg, &recontest, argi))
+ global->test_decode = arg_parse_enum_or_int(&arg);
+ else if (arg_match(&arg, &framerate, argi)) {
+ global->framerate = arg_parse_rational(&arg);
+ validate_positive_rational(arg.name, &global->framerate);
+ global->have_framerate = 1;
+ } else if (arg_match(&arg, &out_part, argi))
+ global->out_part = 1;
+ else if (arg_match(&arg, &debugmode, argi))
+ global->debug = 1;
+ else if (arg_match(&arg, &q_hist_n, argi))
+ global->show_q_hist_buckets = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &rate_hist_n, argi))
+ global->show_rate_hist_buckets = arg_parse_uint(&arg);
+ else
+ argj++;
+ }
- /* Validate global config */
+ /* Validate global config */
- if (global->pass)
- {
- /* DWIM: Assume the user meant passes=2 if pass=2 is specified */
- if (global->pass > global->passes)
- {
- warn("Assuming --pass=%d implies --passes=%d\n",
- global->pass, global->pass);
- global->passes = global->pass;
- }
+ if (global->pass) {
+ /* DWIM: Assume the user meant passes=2 if pass=2 is specified */
+ if (global->pass > global->passes) {
+ warn("Assuming --pass=%d implies --passes=%d\n",
+ global->pass, global->pass);
+ global->passes = global->pass;
}
+ }
}
-void open_input_file(struct input_state *input)
-{
- unsigned int fourcc;
+void open_input_file(struct input_state *input) {
+ unsigned int fourcc;
- /* Parse certain options from the input file, if possible */
- input->file = strcmp(input->fn, "-") ? fopen(input->fn, "rb")
- : set_binary_mode(stdin);
+ /* Parse certain options from the input file, if possible */
+ input->file = strcmp(input->fn, "-") ? fopen(input->fn, "rb")
+ : set_binary_mode(stdin);
- if (!input->file)
- fatal("Failed to open input file");
+ if (!input->file)
+ fatal("Failed to open input file");
- /* For RAW input sources, these bytes will applied on the first frame
- * in read_frame().
+ if (!fseeko(input->file, 0, SEEK_END)) {
+ /* Input file is seekable. Figure out how long it is, so we can get
+ * progress info.
*/
- input->detect.buf_read = fread(input->detect.buf, 1, 4, input->file);
- input->detect.position = 0;
-
- if (input->detect.buf_read == 4
- && file_is_y4m(input->file, &input->y4m, input->detect.buf))
- {
- if (y4m_input_open(&input->y4m, input->file, input->detect.buf, 4) >= 0)
- {
- input->file_type = FILE_TYPE_Y4M;
- input->w = input->y4m.pic_w;
- input->h = input->y4m.pic_h;
- input->framerate.num = input->y4m.fps_n;
- input->framerate.den = input->y4m.fps_d;
- input->use_i420 = 0;
- }
- else
- fatal("Unsupported Y4M stream.");
- }
- else if (input->detect.buf_read == 4 && file_is_ivf(input, &fourcc))
- {
- input->file_type = FILE_TYPE_IVF;
- switch (fourcc)
- {
- case 0x32315659:
- input->use_i420 = 0;
- break;
- case 0x30323449:
- input->use_i420 = 1;
- break;
- default:
- fatal("Unsupported fourcc (%08x) in IVF", fourcc);
- }
- }
- else
- {
- input->file_type = FILE_TYPE_RAW;
+ input->length = ftello(input->file);
+ rewind(input->file);
+ }
+
+ /* For RAW input sources, these bytes will applied on the first frame
+ * in read_frame().
+ */
+ input->detect.buf_read = fread(input->detect.buf, 1, 4, input->file);
+ input->detect.position = 0;
+
+ if (input->detect.buf_read == 4
+ && file_is_y4m(input->file, &input->y4m, input->detect.buf)) {
+ if (y4m_input_open(&input->y4m, input->file, input->detect.buf, 4,
+ input->only_i420) >= 0) {
+ input->file_type = FILE_TYPE_Y4M;
+ input->w = input->y4m.pic_w;
+ input->h = input->y4m.pic_h;
+ input->framerate.num = input->y4m.fps_n;
+ input->framerate.den = input->y4m.fps_d;
+ input->use_i420 = 0;
+ } else
+ fatal("Unsupported Y4M stream.");
+ } else if (input->detect.buf_read == 4 && file_is_ivf(input, &fourcc)) {
+ input->file_type = FILE_TYPE_IVF;
+ switch (fourcc) {
+ case 0x32315659:
+ input->use_i420 = 0;
+ break;
+ case 0x30323449:
+ input->use_i420 = 1;
+ break;
+ default:
+ fatal("Unsupported fourcc (%08x) in IVF", fourcc);
}
+ } else {
+ input->file_type = FILE_TYPE_RAW;
+ }
}
-static void close_input_file(struct input_state *input)
-{
- fclose(input->file);
- if (input->file_type == FILE_TYPE_Y4M)
- y4m_input_close(&input->y4m);
+static void close_input_file(struct input_state *input) {
+ fclose(input->file);
+ if (input->file_type == FILE_TYPE_Y4M)
+ y4m_input_close(&input->y4m);
}
static struct stream_state *new_stream(struct global_config *global,
- struct stream_state *prev)
-{
- struct stream_state *stream;
+ struct stream_state *prev) {
+ struct stream_state *stream;
+
+ stream = calloc(1, sizeof(*stream));
+ if (!stream)
+ fatal("Failed to allocate new stream.");
+ if (prev) {
+ memcpy(stream, prev, sizeof(*stream));
+ stream->index++;
+ prev->next = stream;
+ } else {
+ vpx_codec_err_t res;
+
+ /* Populate encoder configuration */
+ res = vpx_codec_enc_config_default(global->codec->iface(),
+ &stream->config.cfg,
+ global->usage);
+ if (res)
+ fatal("Failed to get config: %s\n", vpx_codec_err_to_string(res));
+
+ /* Change the default timebase to a high enough value so that the
+ * encoder will always create strictly increasing timestamps.
+ */
+ stream->config.cfg.g_timebase.den = 1000;
- stream = calloc(1, sizeof(*stream));
- if(!stream)
- fatal("Failed to allocate new stream.");
- if(prev)
- {
- memcpy(stream, prev, sizeof(*stream));
- stream->index++;
- prev->next = stream;
- }
- else
- {
- vpx_codec_err_t res;
-
- /* Populate encoder configuration */
- res = vpx_codec_enc_config_default(global->codec->iface,
- &stream->config.cfg,
- global->usage);
- if (res)
- fatal("Failed to get config: %s\n", vpx_codec_err_to_string(res));
-
- /* Change the default timebase to a high enough value so that the
- * encoder will always create strictly increasing timestamps.
- */
- stream->config.cfg.g_timebase.den = 1000;
-
- /* Never use the library's default resolution, require it be parsed
- * from the file or set on the command line.
- */
- stream->config.cfg.g_w = 0;
- stream->config.cfg.g_h = 0;
-
- /* Initialize remaining stream parameters */
- stream->config.stereo_fmt = STEREO_FORMAT_MONO;
- stream->config.write_webm = 1;
- stream->ebml.last_pts_ms = -1;
-
- /* Allows removal of the application version from the EBML tags */
- stream->ebml.debug = global->debug;
- }
+ /* Never use the library's default resolution, require it be parsed
+ * from the file or set on the command line.
+ */
+ stream->config.cfg.g_w = 0;
+ stream->config.cfg.g_h = 0;
+
+ /* Initialize remaining stream parameters */
+ stream->config.stereo_fmt = STEREO_FORMAT_MONO;
+ stream->config.write_webm = 1;
+ stream->ebml.last_pts_ms = -1;
+
+ /* Allows removal of the application version from the EBML tags */
+ stream->ebml.debug = global->debug;
+ }
- /* Output files must be specified for each stream */
- stream->config.out_fn = NULL;
+ /* Output files must be specified for each stream */
+ stream->config.out_fn = NULL;
- stream->next = NULL;
- return stream;
+ stream->next = NULL;
+ return stream;
}
static int parse_stream_params(struct global_config *global,
struct stream_state *stream,
- char **argv)
-{
- char **argi, **argj;
- struct arg arg;
- static const arg_def_t **ctrl_args = no_args;
- static const int *ctrl_args_map = NULL;
- struct stream_config *config = &stream->config;
- int eos_mark_found = 0;
-
- /* Handle codec specific options */
- if (global->codec->iface == &vpx_codec_vp8_cx_algo)
- {
- ctrl_args = vp8_args;
- ctrl_args_map = vp8_arg_ctrl_map;
- }
-
- for (argi = argj = argv; (*argj = *argi); argi += arg.argv_step)
- {
- arg.argv_step = 1;
-
- /* Once we've found an end-of-stream marker (--) we want to continue
- * shifting arguments but not consuming them.
- */
- if (eos_mark_found)
- {
- argj++;
- continue;
- }
- else if (!strcmp(*argj, "--"))
- {
- eos_mark_found = 1;
- continue;
- }
+ char **argv) {
+ char **argi, **argj;
+ struct arg arg;
+ static const arg_def_t **ctrl_args = no_args;
+ static const int *ctrl_args_map = NULL;
+ struct stream_config *config = &stream->config;
+ int eos_mark_found = 0;
+
+ /* Handle codec specific options */
+ if (0) {
+#if CONFIG_VP8_ENCODER
+ } else if (global->codec->iface == vpx_codec_vp8_cx) {
+ ctrl_args = vp8_args;
+ ctrl_args_map = vp8_arg_ctrl_map;
+#endif
+#if CONFIG_VP9_ENCODER
+ } else if (global->codec->iface == vpx_codec_vp9_cx) {
+ ctrl_args = vp9_args;
+ ctrl_args_map = vp9_arg_ctrl_map;
+#endif
+ }
- if (0);
- else if (arg_match(&arg, &outputfile, argi))
- config->out_fn = arg.val;
- else if (arg_match(&arg, &fpf_name, argi))
- config->stats_fn = arg.val;
- else if (arg_match(&arg, &use_ivf, argi))
- config->write_webm = 0;
- else if (arg_match(&arg, &threads, argi))
- config->cfg.g_threads = arg_parse_uint(&arg);
- else if (arg_match(&arg, &profile, argi))
- config->cfg.g_profile = arg_parse_uint(&arg);
- else if (arg_match(&arg, &width, argi))
- config->cfg.g_w = arg_parse_uint(&arg);
- else if (arg_match(&arg, &height, argi))
- config->cfg.g_h = arg_parse_uint(&arg);
- else if (arg_match(&arg, &stereo_mode, argi))
- config->stereo_fmt = arg_parse_enum_or_int(&arg);
- else if (arg_match(&arg, &timebase, argi))
- {
- config->cfg.g_timebase = arg_parse_rational(&arg);
- validate_positive_rational(arg.name, &config->cfg.g_timebase);
- }
- else if (arg_match(&arg, &error_resilient, argi))
- config->cfg.g_error_resilient = arg_parse_uint(&arg);
- else if (arg_match(&arg, &lag_in_frames, argi))
- config->cfg.g_lag_in_frames = arg_parse_uint(&arg);
- else if (arg_match(&arg, &dropframe_thresh, argi))
- config->cfg.rc_dropframe_thresh = arg_parse_uint(&arg);
- else if (arg_match(&arg, &resize_allowed, argi))
- config->cfg.rc_resize_allowed = arg_parse_uint(&arg);
- else if (arg_match(&arg, &resize_up_thresh, argi))
- config->cfg.rc_resize_up_thresh = arg_parse_uint(&arg);
- else if (arg_match(&arg, &resize_down_thresh, argi))
- config->cfg.rc_resize_down_thresh = arg_parse_uint(&arg);
- else if (arg_match(&arg, &end_usage, argi))
- config->cfg.rc_end_usage = arg_parse_enum_or_int(&arg);
- else if (arg_match(&arg, &target_bitrate, argi))
- config->cfg.rc_target_bitrate = arg_parse_uint(&arg);
- else if (arg_match(&arg, &min_quantizer, argi))
- config->cfg.rc_min_quantizer = arg_parse_uint(&arg);
- else if (arg_match(&arg, &max_quantizer, argi))
- config->cfg.rc_max_quantizer = arg_parse_uint(&arg);
- else if (arg_match(&arg, &undershoot_pct, argi))
- config->cfg.rc_undershoot_pct = arg_parse_uint(&arg);
- else if (arg_match(&arg, &overshoot_pct, argi))
- config->cfg.rc_overshoot_pct = arg_parse_uint(&arg);
- else if (arg_match(&arg, &buf_sz, argi))
- config->cfg.rc_buf_sz = arg_parse_uint(&arg);
- else if (arg_match(&arg, &buf_initial_sz, argi))
- config->cfg.rc_buf_initial_sz = arg_parse_uint(&arg);
- else if (arg_match(&arg, &buf_optimal_sz, argi))
- config->cfg.rc_buf_optimal_sz = arg_parse_uint(&arg);
- else if (arg_match(&arg, &bias_pct, argi))
- {
- config->cfg.rc_2pass_vbr_bias_pct = arg_parse_uint(&arg);
+ for (argi = argj = argv; (*argj = *argi); argi += arg.argv_step) {
+ arg.argv_step = 1;
- if (global->passes < 2)
- warn("option %s ignored in one-pass mode.\n", arg.name);
- }
- else if (arg_match(&arg, &minsection_pct, argi))
- {
- config->cfg.rc_2pass_vbr_minsection_pct = arg_parse_uint(&arg);
+ /* Once we've found an end-of-stream marker (--) we want to continue
+ * shifting arguments but not consuming them.
+ */
+ if (eos_mark_found) {
+ argj++;
+ continue;
+ } else if (!strcmp(*argj, "--")) {
+ eos_mark_found = 1;
+ continue;
+ }
- if (global->passes < 2)
- warn("option %s ignored in one-pass mode.\n", arg.name);
- }
- else if (arg_match(&arg, &maxsection_pct, argi))
- {
- config->cfg.rc_2pass_vbr_maxsection_pct = arg_parse_uint(&arg);
+ if (0);
+ else if (arg_match(&arg, &outputfile, argi))
+ config->out_fn = arg.val;
+ else if (arg_match(&arg, &fpf_name, argi))
+ config->stats_fn = arg.val;
+ else if (arg_match(&arg, &use_ivf, argi))
+ config->write_webm = 0;
+ else if (arg_match(&arg, &threads, argi))
+ config->cfg.g_threads = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &profile, argi))
+ config->cfg.g_profile = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &width, argi))
+ config->cfg.g_w = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &height, argi))
+ config->cfg.g_h = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &stereo_mode, argi))
+ config->stereo_fmt = arg_parse_enum_or_int(&arg);
+ else if (arg_match(&arg, &timebase, argi)) {
+ config->cfg.g_timebase = arg_parse_rational(&arg);
+ validate_positive_rational(arg.name, &config->cfg.g_timebase);
+ } else if (arg_match(&arg, &error_resilient, argi))
+ config->cfg.g_error_resilient = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &lag_in_frames, argi))
+ config->cfg.g_lag_in_frames = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &dropframe_thresh, argi))
+ config->cfg.rc_dropframe_thresh = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &resize_allowed, argi))
+ config->cfg.rc_resize_allowed = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &resize_up_thresh, argi))
+ config->cfg.rc_resize_up_thresh = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &resize_down_thresh, argi))
+ config->cfg.rc_resize_down_thresh = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &end_usage, argi))
+ config->cfg.rc_end_usage = arg_parse_enum_or_int(&arg);
+ else if (arg_match(&arg, &target_bitrate, argi))
+ config->cfg.rc_target_bitrate = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &min_quantizer, argi))
+ config->cfg.rc_min_quantizer = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &max_quantizer, argi))
+ config->cfg.rc_max_quantizer = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &undershoot_pct, argi))
+ config->cfg.rc_undershoot_pct = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &overshoot_pct, argi))
+ config->cfg.rc_overshoot_pct = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &buf_sz, argi))
+ config->cfg.rc_buf_sz = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &buf_initial_sz, argi))
+ config->cfg.rc_buf_initial_sz = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &buf_optimal_sz, argi))
+ config->cfg.rc_buf_optimal_sz = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &bias_pct, argi)) {
+ config->cfg.rc_2pass_vbr_bias_pct = arg_parse_uint(&arg);
+
+ if (global->passes < 2)
+ warn("option %s ignored in one-pass mode.\n", arg.name);
+ } else if (arg_match(&arg, &minsection_pct, argi)) {
+ config->cfg.rc_2pass_vbr_minsection_pct = arg_parse_uint(&arg);
+
+ if (global->passes < 2)
+ warn("option %s ignored in one-pass mode.\n", arg.name);
+ } else if (arg_match(&arg, &maxsection_pct, argi)) {
+ config->cfg.rc_2pass_vbr_maxsection_pct = arg_parse_uint(&arg);
+
+ if (global->passes < 2)
+ warn("option %s ignored in one-pass mode.\n", arg.name);
+ } else if (arg_match(&arg, &kf_min_dist, argi))
+ config->cfg.kf_min_dist = arg_parse_uint(&arg);
+ else if (arg_match(&arg, &kf_max_dist, argi)) {
+ config->cfg.kf_max_dist = arg_parse_uint(&arg);
+ config->have_kf_max_dist = 1;
+ } else if (arg_match(&arg, &kf_disabled, argi))
+ config->cfg.kf_mode = VPX_KF_DISABLED;
+ else {
+ int i, match = 0;
+
+ for (i = 0; ctrl_args[i]; i++) {
+ if (arg_match(&arg, ctrl_args[i], argi)) {
+ int j;
+ match = 1;
+
+ /* Point either to the next free element or the first
+ * instance of this control.
+ */
+ for (j = 0; j < config->arg_ctrl_cnt; j++)
+ if (config->arg_ctrls[j][0] == ctrl_args_map[i])
+ break;
+
+ /* Update/insert */
+ assert(j < ARG_CTRL_CNT_MAX);
+ if (j < ARG_CTRL_CNT_MAX) {
+ config->arg_ctrls[j][0] = ctrl_args_map[i];
+ config->arg_ctrls[j][1] = arg_parse_enum_or_int(&arg);
+ if (j == config->arg_ctrl_cnt)
+ config->arg_ctrl_cnt++;
+ }
- if (global->passes < 2)
- warn("option %s ignored in one-pass mode.\n", arg.name);
- }
- else if (arg_match(&arg, &kf_min_dist, argi))
- config->cfg.kf_min_dist = arg_parse_uint(&arg);
- else if (arg_match(&arg, &kf_max_dist, argi))
- {
- config->cfg.kf_max_dist = arg_parse_uint(&arg);
- config->have_kf_max_dist = 1;
}
- else if (arg_match(&arg, &kf_disabled, argi))
- config->cfg.kf_mode = VPX_KF_DISABLED;
- else
- {
- int i, match = 0;
-
- for (i = 0; ctrl_args[i]; i++)
- {
- if (arg_match(&arg, ctrl_args[i], argi))
- {
- int j;
- match = 1;
-
- /* Point either to the next free element or the first
- * instance of this control.
- */
- for(j=0; j<config->arg_ctrl_cnt; j++)
- if(config->arg_ctrls[j][0] == ctrl_args_map[i])
- break;
-
- /* Update/insert */
- assert(j < ARG_CTRL_CNT_MAX);
- if (j < ARG_CTRL_CNT_MAX)
- {
- config->arg_ctrls[j][0] = ctrl_args_map[i];
- config->arg_ctrls[j][1] = arg_parse_enum_or_int(&arg);
- if(j == config->arg_ctrl_cnt)
- config->arg_ctrl_cnt++;
- }
-
- }
- }
+ }
- if (!match)
- argj++;
- }
+ if (!match)
+ argj++;
}
+ }
- return eos_mark_found;
+ return eos_mark_found;
}
#define FOREACH_STREAM(func)\
-do\
-{\
+ do\
+ {\
struct stream_state *stream;\
-\
+ \
for(stream = streams; stream; stream = stream->next)\
- func;\
-}while(0)
-
-
-static void validate_stream_config(struct stream_state *stream)
-{
- struct stream_state *streami;
-
- if(!stream->config.cfg.g_w || !stream->config.cfg.g_h)
- fatal("Stream %d: Specify stream dimensions with --width (-w) "
- " and --height (-h)", stream->index);
-
- for(streami = stream; streami; streami = streami->next)
- {
- /* All streams require output files */
- if(!streami->config.out_fn)
- fatal("Stream %d: Output file is required (specify with -o)",
- streami->index);
-
- /* Check for two streams outputting to the same file */
- if(streami != stream)
- {
- const char *a = stream->config.out_fn;
- const char *b = streami->config.out_fn;
- if(!strcmp(a,b) && strcmp(a, "/dev/null") && strcmp(a, ":nul"))
- fatal("Stream %d: duplicate output file (from stream %d)",
- streami->index, stream->index);
- }
+ func;\
+ }while(0)
+
+
+static void validate_stream_config(struct stream_state *stream) {
+ struct stream_state *streami;
+
+ if (!stream->config.cfg.g_w || !stream->config.cfg.g_h)
+ fatal("Stream %d: Specify stream dimensions with --width (-w) "
+ " and --height (-h)", stream->index);
+
+ for (streami = stream; streami; streami = streami->next) {
+ /* All streams require output files */
+ if (!streami->config.out_fn)
+ fatal("Stream %d: Output file is required (specify with -o)",
+ streami->index);
+
+ /* Check for two streams outputting to the same file */
+ if (streami != stream) {
+ const char *a = stream->config.out_fn;
+ const char *b = streami->config.out_fn;
+ if (!strcmp(a, b) && strcmp(a, "/dev/null") && strcmp(a, ":nul"))
+ fatal("Stream %d: duplicate output file (from stream %d)",
+ streami->index, stream->index);
+ }
- /* Check for two streams sharing a stats file. */
- if(streami != stream)
- {
- const char *a = stream->config.stats_fn;
- const char *b = streami->config.stats_fn;
- if(a && b && !strcmp(a,b))
- fatal("Stream %d: duplicate stats file (from stream %d)",
- streami->index, stream->index);
- }
+ /* Check for two streams sharing a stats file. */
+ if (streami != stream) {
+ const char *a = stream->config.stats_fn;
+ const char *b = streami->config.stats_fn;
+ if (a && b && !strcmp(a, b))
+ fatal("Stream %d: duplicate stats file (from stream %d)",
+ streami->index, stream->index);
}
+ }
}
static void set_stream_dimensions(struct stream_state *stream,
unsigned int w,
- unsigned int h)
-{
- if ((stream->config.cfg.g_w && stream->config.cfg.g_w != w)
- ||(stream->config.cfg.g_h && stream->config.cfg.g_h != h))
- fatal("Stream %d: Resizing not yet supported", stream->index);
- stream->config.cfg.g_w = w;
- stream->config.cfg.g_h = h;
+ unsigned int h) {
+ if (!stream->config.cfg.g_w) {
+ if (!stream->config.cfg.g_h)
+ stream->config.cfg.g_w = w;
+ else
+ stream->config.cfg.g_w = w * stream->config.cfg.g_h / h;
+ }
+ if (!stream->config.cfg.g_h) {
+ stream->config.cfg.g_h = h * stream->config.cfg.g_w / w;
+ }
}
static void set_default_kf_interval(struct stream_state *stream,
- struct global_config *global)
-{
- /* Use a max keyframe interval of 5 seconds, if none was
- * specified on the command line.
- */
- if (!stream->config.have_kf_max_dist)
- {
- double framerate = (double)global->framerate.num/global->framerate.den;
- if (framerate > 0.0)
- stream->config.cfg.kf_max_dist = (unsigned int)(5.0*framerate);
- }
+ struct global_config *global) {
+ /* Use a max keyframe interval of 5 seconds, if none was
+ * specified on the command line.
+ */
+ if (!stream->config.have_kf_max_dist) {
+ double framerate = (double)global->framerate.num / global->framerate.den;
+ if (framerate > 0.0)
+ stream->config.cfg.kf_max_dist = (unsigned int)(5.0 * framerate);
+ }
}
static void show_stream_config(struct stream_state *stream,
struct global_config *global,
- struct input_state *input)
-{
+ struct input_state *input) {
#define SHOW(field) \
- fprintf(stderr, " %-28s = %d\n", #field, stream->config.cfg.field)
-
- if(stream->index == 0)
- {
- fprintf(stderr, "Codec: %s\n",
- vpx_codec_iface_name(global->codec->iface));
- fprintf(stderr, "Source file: %s Format: %s\n", input->fn,
- input->use_i420 ? "I420" : "YV12");
- }
- if(stream->next || stream->index)
- fprintf(stderr, "\nStream Index: %d\n", stream->index);
- fprintf(stderr, "Destination file: %s\n", stream->config.out_fn);
- fprintf(stderr, "Encoder parameters:\n");
-
- SHOW(g_usage);
- SHOW(g_threads);
- SHOW(g_profile);
- SHOW(g_w);
- SHOW(g_h);
- SHOW(g_timebase.num);
- SHOW(g_timebase.den);
- SHOW(g_error_resilient);
- SHOW(g_pass);
- SHOW(g_lag_in_frames);
- SHOW(rc_dropframe_thresh);
- SHOW(rc_resize_allowed);
- SHOW(rc_resize_up_thresh);
- SHOW(rc_resize_down_thresh);
- SHOW(rc_end_usage);
- SHOW(rc_target_bitrate);
- SHOW(rc_min_quantizer);
- SHOW(rc_max_quantizer);
- SHOW(rc_undershoot_pct);
- SHOW(rc_overshoot_pct);
- SHOW(rc_buf_sz);
- SHOW(rc_buf_initial_sz);
- SHOW(rc_buf_optimal_sz);
- SHOW(rc_2pass_vbr_bias_pct);
- SHOW(rc_2pass_vbr_minsection_pct);
- SHOW(rc_2pass_vbr_maxsection_pct);
- SHOW(kf_mode);
- SHOW(kf_min_dist);
- SHOW(kf_max_dist);
+ fprintf(stderr, " %-28s = %d\n", #field, stream->config.cfg.field)
+
+ if (stream->index == 0) {
+ fprintf(stderr, "Codec: %s\n",
+ vpx_codec_iface_name(global->codec->iface()));
+ fprintf(stderr, "Source file: %s Format: %s\n", input->fn,
+ input->use_i420 ? "I420" : "YV12");
+ }
+ if (stream->next || stream->index)
+ fprintf(stderr, "\nStream Index: %d\n", stream->index);
+ fprintf(stderr, "Destination file: %s\n", stream->config.out_fn);
+ fprintf(stderr, "Encoder parameters:\n");
+
+ SHOW(g_usage);
+ SHOW(g_threads);
+ SHOW(g_profile);
+ SHOW(g_w);
+ SHOW(g_h);
+ SHOW(g_timebase.num);
+ SHOW(g_timebase.den);
+ SHOW(g_error_resilient);
+ SHOW(g_pass);
+ SHOW(g_lag_in_frames);
+ SHOW(rc_dropframe_thresh);
+ SHOW(rc_resize_allowed);
+ SHOW(rc_resize_up_thresh);
+ SHOW(rc_resize_down_thresh);
+ SHOW(rc_end_usage);
+ SHOW(rc_target_bitrate);
+ SHOW(rc_min_quantizer);
+ SHOW(rc_max_quantizer);
+ SHOW(rc_undershoot_pct);
+ SHOW(rc_overshoot_pct);
+ SHOW(rc_buf_sz);
+ SHOW(rc_buf_initial_sz);
+ SHOW(rc_buf_optimal_sz);
+ SHOW(rc_2pass_vbr_bias_pct);
+ SHOW(rc_2pass_vbr_minsection_pct);
+ SHOW(rc_2pass_vbr_maxsection_pct);
+ SHOW(kf_mode);
+ SHOW(kf_min_dist);
+ SHOW(kf_max_dist);
}
static void open_output_file(struct stream_state *stream,
- struct global_config *global)
-{
- const char *fn = stream->config.out_fn;
-
- stream->file = strcmp(fn, "-") ? fopen(fn, "wb") : set_binary_mode(stdout);
-
- if (!stream->file)
- fatal("Failed to open output file");
-
- if(stream->config.write_webm && fseek(stream->file, 0, SEEK_CUR))
- fatal("WebM output to pipes not supported.");
-
- if(stream->config.write_webm)
- {
- stream->ebml.stream = stream->file;
- write_webm_file_header(&stream->ebml, &stream->config.cfg,
- &global->framerate,
- stream->config.stereo_fmt);
- }
- else
- write_ivf_file_header(stream->file, &stream->config.cfg,
- global->codec->fourcc, 0);
+ struct global_config *global) {
+ const char *fn = stream->config.out_fn;
+
+ stream->file = strcmp(fn, "-") ? fopen(fn, "wb") : set_binary_mode(stdout);
+
+ if (!stream->file)
+ fatal("Failed to open output file");
+
+ if (stream->config.write_webm && fseek(stream->file, 0, SEEK_CUR))
+ fatal("WebM output to pipes not supported.");
+
+ if (stream->config.write_webm) {
+ stream->ebml.stream = stream->file;
+ write_webm_file_header(&stream->ebml, &stream->config.cfg,
+ &global->framerate,
+ stream->config.stereo_fmt,
+ global->codec->fourcc);
+ } else
+ write_ivf_file_header(stream->file, &stream->config.cfg,
+ global->codec->fourcc, 0);
}
static void close_output_file(struct stream_state *stream,
- unsigned int fourcc)
-{
- if(stream->config.write_webm)
- {
- write_webm_file_footer(&stream->ebml, stream->hash);
- free(stream->ebml.cue_list);
- stream->ebml.cue_list = NULL;
- }
- else
- {
- if (!fseek(stream->file, 0, SEEK_SET))
- write_ivf_file_header(stream->file, &stream->config.cfg,
- fourcc,
- stream->frames_out);
- }
-
- fclose(stream->file);
+ unsigned int fourcc) {
+ if (stream->config.write_webm) {
+ write_webm_file_footer(&stream->ebml, stream->hash);
+ free(stream->ebml.cue_list);
+ stream->ebml.cue_list = NULL;
+ } else {
+ if (!fseek(stream->file, 0, SEEK_SET))
+ write_ivf_file_header(stream->file, &stream->config.cfg,
+ fourcc,
+ stream->frames_out);
+ }
+
+ fclose(stream->file);
}
static void setup_pass(struct stream_state *stream,
struct global_config *global,
- int pass)
-{
- if (stream->config.stats_fn)
- {
- if (!stats_open_file(&stream->stats, stream->config.stats_fn,
- pass))
- fatal("Failed to open statistics store");
- }
- else
- {
- if (!stats_open_mem(&stream->stats, pass))
- fatal("Failed to open statistics store");
- }
-
- stream->config.cfg.g_pass = global->passes == 2
- ? pass ? VPX_RC_LAST_PASS : VPX_RC_FIRST_PASS
- : VPX_RC_ONE_PASS;
- if (pass)
- stream->config.cfg.rc_twopass_stats_in = stats_get(&stream->stats);
-
- stream->cx_time = 0;
- stream->nbytes = 0;
- stream->frames_out = 0;
+ int pass) {
+ if (stream->config.stats_fn) {
+ if (!stats_open_file(&stream->stats, stream->config.stats_fn,
+ pass))
+ fatal("Failed to open statistics store");
+ } else {
+ if (!stats_open_mem(&stream->stats, pass))
+ fatal("Failed to open statistics store");
+ }
+
+ stream->config.cfg.g_pass = global->passes == 2
+ ? pass ? VPX_RC_LAST_PASS : VPX_RC_FIRST_PASS
+ : VPX_RC_ONE_PASS;
+ if (pass)
+ stream->config.cfg.rc_twopass_stats_in = stats_get(&stream->stats);
+
+ stream->cx_time = 0;
+ stream->nbytes = 0;
+ stream->frames_out = 0;
}
static void initialize_encoder(struct stream_state *stream,
- struct global_config *global)
-{
- int i;
- int flags = 0;
-
- flags |= global->show_psnr ? VPX_CODEC_USE_PSNR : 0;
- flags |= global->out_part ? VPX_CODEC_USE_OUTPUT_PARTITION : 0;
-
- /* Construct Encoder Context */
- vpx_codec_enc_init(&stream->encoder, global->codec->iface,
- &stream->config.cfg, flags);
- ctx_exit_on_error(&stream->encoder, "Failed to initialize encoder");
-
- /* Note that we bypass the vpx_codec_control wrapper macro because
- * we're being clever to store the control IDs in an array. Real
- * applications will want to make use of the enumerations directly
- */
- for (i = 0; i < stream->config.arg_ctrl_cnt; i++)
- {
- int ctrl = stream->config.arg_ctrls[i][0];
- int value = stream->config.arg_ctrls[i][1];
- if (vpx_codec_control_(&stream->encoder, ctrl, value))
- fprintf(stderr, "Error: Tried to set control %d = %d\n",
- ctrl, value);
-
- ctx_exit_on_error(&stream->encoder, "Failed to control codec");
- }
+ struct global_config *global) {
+ int i;
+ int flags = 0;
+
+ flags |= global->show_psnr ? VPX_CODEC_USE_PSNR : 0;
+ flags |= global->out_part ? VPX_CODEC_USE_OUTPUT_PARTITION : 0;
+
+ /* Construct Encoder Context */
+ vpx_codec_enc_init(&stream->encoder, global->codec->iface(),
+ &stream->config.cfg, flags);
+ ctx_exit_on_error(&stream->encoder, "Failed to initialize encoder");
+
+ /* Note that we bypass the vpx_codec_control wrapper macro because
+ * we're being clever to store the control IDs in an array. Real
+ * applications will want to make use of the enumerations directly
+ */
+ for (i = 0; i < stream->config.arg_ctrl_cnt; i++) {
+ int ctrl = stream->config.arg_ctrls[i][0];
+ int value = stream->config.arg_ctrls[i][1];
+ if (vpx_codec_control_(&stream->encoder, ctrl, value))
+ fprintf(stderr, "Error: Tried to set control %d = %d\n",
+ ctrl, value);
+
+ ctx_exit_on_error(&stream->encoder, "Failed to control codec");
+ }
+
+#if CONFIG_DECODERS
+ if (global->test_decode != TEST_DECODE_OFF) {
+ vpx_codec_dec_init(&stream->decoder, global->codec->dx_iface(), NULL, 0);
+ }
+#endif
}
static void encode_frame(struct stream_state *stream,
struct global_config *global,
struct vpx_image *img,
- unsigned int frames_in)
-{
- vpx_codec_pts_t frame_start, next_frame_start;
- struct vpx_codec_enc_cfg *cfg = &stream->config.cfg;
- struct vpx_usec_timer timer;
-
- frame_start = (cfg->g_timebase.den * (int64_t)(frames_in - 1)
- * global->framerate.den)
- / cfg->g_timebase.num / global->framerate.num;
- next_frame_start = (cfg->g_timebase.den * (int64_t)(frames_in)
- * global->framerate.den)
- / cfg->g_timebase.num / global->framerate.num;
- vpx_usec_timer_start(&timer);
- vpx_codec_encode(&stream->encoder, img, frame_start,
- (unsigned long)(next_frame_start - frame_start),
- 0, global->deadline);
- vpx_usec_timer_mark(&timer);
- stream->cx_time += vpx_usec_timer_elapsed(&timer);
- ctx_exit_on_error(&stream->encoder, "Stream %d: Failed to encode frame",
- stream->index);
+ unsigned int frames_in) {
+ vpx_codec_pts_t frame_start, next_frame_start;
+ struct vpx_codec_enc_cfg *cfg = &stream->config.cfg;
+ struct vpx_usec_timer timer;
+
+ frame_start = (cfg->g_timebase.den * (int64_t)(frames_in - 1)
+ * global->framerate.den)
+ / cfg->g_timebase.num / global->framerate.num;
+ next_frame_start = (cfg->g_timebase.den * (int64_t)(frames_in)
+ * global->framerate.den)
+ / cfg->g_timebase.num / global->framerate.num;
+
+ /* Scale if necessary */
+ if (img && (img->d_w != cfg->g_w || img->d_h != cfg->g_h)) {
+ if (!stream->img)
+ stream->img = vpx_img_alloc(NULL, VPX_IMG_FMT_I420,
+ cfg->g_w, cfg->g_h, 16);
+ I420Scale(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
+ img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
+ img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
+ img->d_w, img->d_h,
+ stream->img->planes[VPX_PLANE_Y],
+ stream->img->stride[VPX_PLANE_Y],
+ stream->img->planes[VPX_PLANE_U],
+ stream->img->stride[VPX_PLANE_U],
+ stream->img->planes[VPX_PLANE_V],
+ stream->img->stride[VPX_PLANE_V],
+ stream->img->d_w, stream->img->d_h,
+ kFilterBox);
+
+ img = stream->img;
+ }
+
+ vpx_usec_timer_start(&timer);
+ vpx_codec_encode(&stream->encoder, img, frame_start,
+ (unsigned long)(next_frame_start - frame_start),
+ 0, global->deadline);
+ vpx_usec_timer_mark(&timer);
+ stream->cx_time += vpx_usec_timer_elapsed(&timer);
+ ctx_exit_on_error(&stream->encoder, "Stream %d: Failed to encode frame",
+ stream->index);
}
-static void update_quantizer_histogram(struct stream_state *stream)
-{
- if(stream->config.cfg.g_pass != VPX_RC_FIRST_PASS)
- {
- int q;
+static void update_quantizer_histogram(struct stream_state *stream) {
+ if (stream->config.cfg.g_pass != VPX_RC_FIRST_PASS) {
+ int q;
- vpx_codec_control(&stream->encoder, VP8E_GET_LAST_QUANTIZER_64, &q);
- ctx_exit_on_error(&stream->encoder, "Failed to read quantizer");
- stream->counts[q]++;
- }
+ vpx_codec_control(&stream->encoder, VP8E_GET_LAST_QUANTIZER_64, &q);
+ ctx_exit_on_error(&stream->encoder, "Failed to read quantizer");
+ stream->counts[q]++;
+ }
}
static void get_cx_data(struct stream_state *stream,
struct global_config *global,
- int *got_data)
-{
- const vpx_codec_cx_pkt_t *pkt;
- const struct vpx_codec_enc_cfg *cfg = &stream->config.cfg;
- vpx_codec_iter_t iter = NULL;
+ int *got_data) {
+ const vpx_codec_cx_pkt_t *pkt;
+ const struct vpx_codec_enc_cfg *cfg = &stream->config.cfg;
+ vpx_codec_iter_t iter = NULL;
+
+ *got_data = 0;
+ while ((pkt = vpx_codec_get_cx_data(&stream->encoder, &iter))) {
+ static size_t fsize = 0;
+ static off_t ivf_header_pos = 0;
+
+ switch (pkt->kind) {
+ case VPX_CODEC_CX_FRAME_PKT:
+ if (!(pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT)) {
+ stream->frames_out++;
+ }
+ if (!global->quiet)
+ fprintf(stderr, " %6luF", (unsigned long)pkt->data.frame.sz);
+
+ update_rate_histogram(&stream->rate_hist, cfg, pkt);
+ if (stream->config.write_webm) {
+ /* Update the hash */
+ if (!stream->ebml.debug)
+ stream->hash = murmur(pkt->data.frame.buf,
+ (int)pkt->data.frame.sz,
+ stream->hash);
+
+ write_webm_block(&stream->ebml, cfg, pkt);
+ } else {
+ if (pkt->data.frame.partition_id <= 0) {
+ ivf_header_pos = ftello(stream->file);
+ fsize = pkt->data.frame.sz;
+
+ write_ivf_frame_header(stream->file, pkt);
+ } else {
+ fsize += pkt->data.frame.sz;
+
+ if (!(pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT)) {
+ off_t currpos = ftello(stream->file);
+ fseeko(stream->file, ivf_header_pos, SEEK_SET);
+ write_ivf_frame_size(stream->file, fsize);
+ fseeko(stream->file, currpos, SEEK_SET);
+ }
+ }
- while ((pkt = vpx_codec_get_cx_data(&stream->encoder, &iter)))
- {
- static size_t fsize = 0;
- static off_t ivf_header_pos = 0;
+ (void) fwrite(pkt->data.frame.buf, 1, pkt->data.frame.sz,
+ stream->file);
+ }
+ stream->nbytes += pkt->data.raw.sz;
*got_data = 1;
-
- switch (pkt->kind)
- {
- case VPX_CODEC_CX_FRAME_PKT:
- if (!(pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT))
- {
- stream->frames_out++;
- }
+#if CONFIG_DECODERS
+ if (global->test_decode != TEST_DECODE_OFF && !stream->mismatch_seen) {
+ vpx_codec_decode(&stream->decoder, pkt->data.frame.buf,
+ pkt->data.frame.sz, NULL, 0);
+ if (stream->decoder.err) {
+ warn_or_exit_on_error(&stream->decoder,
+ global->test_decode == TEST_DECODE_FATAL,
+ "Failed to decode frame %d in stream %d",
+ stream->frames_out + 1, stream->index);
+ stream->mismatch_seen = stream->frames_out + 1;
+ }
+ }
+#endif
+ break;
+ case VPX_CODEC_STATS_PKT:
+ stream->frames_out++;
+ stats_write(&stream->stats,
+ pkt->data.twopass_stats.buf,
+ pkt->data.twopass_stats.sz);
+ stream->nbytes += pkt->data.raw.sz;
+ break;
+ case VPX_CODEC_PSNR_PKT:
+
+ if (global->show_psnr) {
+ int i;
+
+ stream->psnr_sse_total += pkt->data.psnr.sse[0];
+ stream->psnr_samples_total += pkt->data.psnr.samples[0];
+ for (i = 0; i < 4; i++) {
if (!global->quiet)
- fprintf(stderr, " %6luF",
- (unsigned long)pkt->data.frame.sz);
-
- update_rate_histogram(&stream->rate_hist, cfg, pkt);
- if(stream->config.write_webm)
- {
- /* Update the hash */
- if(!stream->ebml.debug)
- stream->hash = murmur(pkt->data.frame.buf,
- (int)pkt->data.frame.sz,
- stream->hash);
-
- write_webm_block(&stream->ebml, cfg, pkt);
- }
- else
- {
- if (pkt->data.frame.partition_id <= 0)
- {
- ivf_header_pos = ftello(stream->file);
- fsize = pkt->data.frame.sz;
-
- write_ivf_frame_header(stream->file, pkt);
- }
- else
- {
- fsize += pkt->data.frame.sz;
-
- if (!(pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT))
- {
- off_t currpos = ftello(stream->file);
- fseeko(stream->file, ivf_header_pos, SEEK_SET);
- write_ivf_frame_size(stream->file, fsize);
- fseeko(stream->file, currpos, SEEK_SET);
- }
- }
-
- (void) fwrite(pkt->data.frame.buf, 1, pkt->data.frame.sz,
- stream->file);
- }
- stream->nbytes += pkt->data.raw.sz;
- break;
- case VPX_CODEC_STATS_PKT:
- stream->frames_out++;
- fprintf(stderr, " %6luS",
- (unsigned long)pkt->data.twopass_stats.sz);
- stats_write(&stream->stats,
- pkt->data.twopass_stats.buf,
- pkt->data.twopass_stats.sz);
- stream->nbytes += pkt->data.raw.sz;
- break;
- case VPX_CODEC_PSNR_PKT:
-
- if (global->show_psnr)
- {
- int i;
-
- stream->psnr_sse_total += pkt->data.psnr.sse[0];
- stream->psnr_samples_total += pkt->data.psnr.samples[0];
- for (i = 0; i < 4; i++)
- {
- if (!global->quiet)
- fprintf(stderr, "%.3f ", pkt->data.psnr.psnr[i]);
- stream->psnr_totals[i] += pkt->data.psnr.psnr[i];
- }
- stream->psnr_count++;
- }
-
- break;
- default:
- break;
+ fprintf(stderr, "%.3f ", pkt->data.psnr.psnr[i]);
+ stream->psnr_totals[i] += pkt->data.psnr.psnr[i];
+ }
+ stream->psnr_count++;
}
+
+ break;
+ default:
+ break;
}
+ }
}
-static void show_psnr(struct stream_state *stream)
-{
- int i;
- double ovpsnr;
+static void show_psnr(struct stream_state *stream) {
+ int i;
+ double ovpsnr;
- if (!stream->psnr_count)
- return;
+ if (!stream->psnr_count)
+ return;
- fprintf(stderr, "Stream %d PSNR (Overall/Avg/Y/U/V)", stream->index);
- ovpsnr = vp8_mse2psnr((double)stream->psnr_samples_total, 255.0,
- (double)stream->psnr_sse_total);
- fprintf(stderr, " %.3f", ovpsnr);
+ fprintf(stderr, "Stream %d PSNR (Overall/Avg/Y/U/V)", stream->index);
+ ovpsnr = vp8_mse2psnr((double)stream->psnr_samples_total, 255.0,
+ (double)stream->psnr_sse_total);
+ fprintf(stderr, " %.3f", ovpsnr);
- for (i = 0; i < 4; i++)
- {
- fprintf(stderr, " %.3f", stream->psnr_totals[i]/stream->psnr_count);
- }
- fprintf(stderr, "\n");
+ for (i = 0; i < 4; i++) {
+ fprintf(stderr, " %.3f", stream->psnr_totals[i] / stream->psnr_count);
+ }
+ fprintf(stderr, "\n");
}
-float usec_to_fps(uint64_t usec, unsigned int frames)
-{
- return (float)(usec > 0 ? frames * 1000000.0 / (float)usec : 0);
+static float usec_to_fps(uint64_t usec, unsigned int frames) {
+ return (float)(usec > 0 ? frames * 1000000.0 / (float)usec : 0);
}
-int main(int argc, const char **argv_)
-{
- int pass;
- vpx_image_t raw;
- int frame_avail, got_data;
-
- struct input_state input = {0};
- struct global_config global;
- struct stream_state *streams = NULL;
- char **argv, **argi;
- unsigned long cx_time = 0;
- int stream_cnt = 0;
-
- exec_name = argv_[0];
-
- if (argc < 3)
- usage_exit();
-
- /* Setup default input stream settings */
- input.framerate.num = 30;
- input.framerate.den = 1;
- input.use_i420 = 1;
-
- /* First parse the global configuration values, because we want to apply
- * other parameters on top of the default configuration provided by the
- * codec.
- */
- argv = argv_dup(argc - 1, argv_ + 1);
- parse_global_config(&global, argv);
+static void test_decode(struct stream_state *stream,
+ enum TestDecodeFatality fatal,
+ const struct codec_item *codec) {
+ vpx_image_t enc_img, dec_img;
+
+ if (stream->mismatch_seen)
+ return;
+
+ /* Get the internal reference frame */
+ if (codec->fourcc == VP8_FOURCC) {
+ struct vpx_ref_frame ref_enc, ref_dec;
+ int width, height;
+
+ width = (stream->config.cfg.g_w + 15) & ~15;
+ height = (stream->config.cfg.g_h + 15) & ~15;
+ vpx_img_alloc(&ref_enc.img, VPX_IMG_FMT_I420, width, height, 1);
+ enc_img = ref_enc.img;
+ vpx_img_alloc(&ref_dec.img, VPX_IMG_FMT_I420, width, height, 1);
+ dec_img = ref_dec.img;
+
+ ref_enc.frame_type = VP8_LAST_FRAME;
+ ref_dec.frame_type = VP8_LAST_FRAME;
+ vpx_codec_control(&stream->encoder, VP8_COPY_REFERENCE, &ref_enc);
+ vpx_codec_control(&stream->decoder, VP8_COPY_REFERENCE, &ref_dec);
+ } else {
+ struct vp9_ref_frame ref;
+
+ ref.idx = 0;
+ vpx_codec_control(&stream->encoder, VP9_GET_REFERENCE, &ref);
+ enc_img = ref.img;
+ vpx_codec_control(&stream->decoder, VP9_GET_REFERENCE, &ref);
+ dec_img = ref.img;
+ }
+ ctx_exit_on_error(&stream->encoder, "Failed to get encoder reference frame");
+ ctx_exit_on_error(&stream->decoder, "Failed to get decoder reference frame");
+
+ if (!compare_img(&enc_img, &dec_img)) {
+ int y[4], u[4], v[4];
+ find_mismatch(&enc_img, &dec_img, y, u, v);
+ stream->decoder.err = 1;
+ warn_or_exit_on_error(&stream->decoder, fatal == TEST_DECODE_FATAL,
+ "Stream %d: Encode/decode mismatch on frame %d at"
+ " Y[%d, %d] {%d/%d},"
+ " U[%d, %d] {%d/%d},"
+ " V[%d, %d] {%d/%d}",
+ stream->index, stream->frames_out,
+ y[0], y[1], y[2], y[3],
+ u[0], u[1], u[2], u[3],
+ v[0], v[1], v[2], v[3]);
+ stream->mismatch_seen = stream->frames_out;
+ }
+
+ vpx_img_free(&enc_img);
+ vpx_img_free(&dec_img);
+}
- {
- /* Now parse each stream's parameters. Using a local scope here
- * due to the use of 'stream' as loop variable in FOREACH_STREAM
- * loops
- */
- struct stream_state *stream = NULL;
- do
- {
- stream = new_stream(&global, stream);
- stream_cnt++;
- if(!streams)
- streams = stream;
- } while(parse_stream_params(&global, stream, argv));
- }
+static void print_time(const char *label, int64_t etl) {
+ int hours, mins, secs;
- /* Check for unrecognized options */
- for (argi = argv; *argi; argi++)
- if (argi[0][0] == '-' && argi[0][1])
- die("Error: Unrecognized option %s\n", *argi);
+ if (etl >= 0) {
+ hours = etl / 3600;
+ etl -= hours * 3600;
+ mins = etl / 60;
+ etl -= mins * 60;
+ secs = etl;
- /* Handle non-option arguments */
- input.fn = argv[0];
+ fprintf(stderr, "[%3s %2d:%02d:%02d] ",
+ label, hours, mins, secs);
+ } else {
+ fprintf(stderr, "[%3s unknown] ", label);
+ }
+}
- if (!input.fn)
- usage_exit();
+int main(int argc, const char **argv_) {
+ int pass;
+ vpx_image_t raw;
+ int frame_avail, got_data;
- for (pass = global.pass ? global.pass - 1 : 0; pass < global.passes; pass++)
- {
- int frames_in = 0;
-
- open_input_file(&input);
-
- /* If the input file doesn't specify its w/h (raw files), try to get
- * the data from the first stream's configuration.
- */
- if(!input.w || !input.h)
- FOREACH_STREAM({
- if(stream->config.cfg.g_w && stream->config.cfg.g_h)
- {
- input.w = stream->config.cfg.g_w;
- input.h = stream->config.cfg.g_h;
- break;
- }
- });
-
- /* Update stream configurations from the input file's parameters */
- FOREACH_STREAM(set_stream_dimensions(stream, input.w, input.h));
- FOREACH_STREAM(validate_stream_config(stream));
-
- /* Ensure that --passes and --pass are consistent. If --pass is set and
- * --passes=2, ensure --fpf was set.
- */
- if (global.pass && global.passes == 2)
- FOREACH_STREAM({
- if(!stream->config.stats_fn)
- die("Stream %d: Must specify --fpf when --pass=%d"
- " and --passes=2\n", stream->index, global.pass);
- });
-
-
- /* Use the frame rate from the file only if none was specified
- * on the command-line.
- */
- if (!global.have_framerate)
- global.framerate = input.framerate;
-
- FOREACH_STREAM(set_default_kf_interval(stream, &global));
-
- /* Show configuration */
- if (global.verbose && pass == 0)
- FOREACH_STREAM(show_stream_config(stream, &global, &input));
-
- if(pass == (global.pass ? global.pass - 1 : 0)) {
- if (input.file_type == FILE_TYPE_Y4M)
- /*The Y4M reader does its own allocation.
- Just initialize this here to avoid problems if we never read any
- frames.*/
- memset(&raw, 0, sizeof(raw));
- else
- vpx_img_alloc(&raw,
- input.use_i420 ? VPX_IMG_FMT_I420
- : VPX_IMG_FMT_YV12,
- input.w, input.h, 32);
-
- FOREACH_STREAM(init_rate_histogram(&stream->rate_hist,
- &stream->config.cfg,
- &global.framerate));
- }
+ struct input_state input = {0};
+ struct global_config global;
+ struct stream_state *streams = NULL;
+ char **argv, **argi;
+ uint64_t cx_time = 0;
+ int stream_cnt = 0;
+ int res = 0;
- FOREACH_STREAM(open_output_file(stream, &global));
- FOREACH_STREAM(setup_pass(stream, &global, pass));
- FOREACH_STREAM(initialize_encoder(stream, &global));
+ exec_name = argv_[0];
- frame_avail = 1;
- got_data = 0;
+ if (argc < 3)
+ usage_exit();
- while (frame_avail || got_data)
- {
- struct vpx_usec_timer timer;
-
- if (!global.limit || frames_in < global.limit)
- {
- frame_avail = read_frame(&input, &raw);
-
- if (frame_avail)
- frames_in++;
-
- if (!global.quiet)
- {
- if(stream_cnt == 1)
- fprintf(stderr,
- "\rPass %d/%d frame %4d/%-4d %7"PRId64"B \033[K",
- pass + 1, global.passes, frames_in,
- streams->frames_out, (int64_t)streams->nbytes);
- else
- fprintf(stderr,
- "\rPass %d/%d frame %4d %7lu %s (%.2f fps)\033[K",
- pass + 1, global.passes, frames_in,
- cx_time > 9999999 ? cx_time / 1000 : cx_time,
- cx_time > 9999999 ? "ms" : "us",
- usec_to_fps(cx_time, frames_in));
- }
+ /* Setup default input stream settings */
+ input.framerate.num = 30;
+ input.framerate.den = 1;
+ input.use_i420 = 1;
+ input.only_i420 = 1;
+
+ /* First parse the global configuration values, because we want to apply
+ * other parameters on top of the default configuration provided by the
+ * codec.
+ */
+ argv = argv_dup(argc - 1, argv_ + 1);
+ parse_global_config(&global, argv);
+
+ {
+ /* Now parse each stream's parameters. Using a local scope here
+ * due to the use of 'stream' as loop variable in FOREACH_STREAM
+ * loops
+ */
+ struct stream_state *stream = NULL;
- }
- else
- frame_avail = 0;
+ do {
+ stream = new_stream(&global, stream);
+ stream_cnt++;
+ if (!streams)
+ streams = stream;
+ } while (parse_stream_params(&global, stream, argv));
+ }
- vpx_usec_timer_start(&timer);
- FOREACH_STREAM(encode_frame(stream, &global,
- frame_avail ? &raw : NULL,
- frames_in));
- vpx_usec_timer_mark(&timer);
- cx_time += (unsigned long)vpx_usec_timer_elapsed(&timer);
+ /* Check for unrecognized options */
+ for (argi = argv; *argi; argi++)
+ if (argi[0][0] == '-' && argi[0][1])
+ die("Error: Unrecognized option %s\n", *argi);
- FOREACH_STREAM(update_quantizer_histogram(stream));
+ /* Handle non-option arguments */
+ input.fn = argv[0];
- got_data = 0;
- FOREACH_STREAM(get_cx_data(stream, &global, &got_data));
+ if (!input.fn)
+ usage_exit();
- fflush(stdout);
- }
+#if CONFIG_NON420
+ /* Decide if other chroma subsamplings than 4:2:0 are supported */
+ if (global.codec->fourcc == VP9_FOURCC)
+ input.only_i420 = 0;
+#endif
+
+ for (pass = global.pass ? global.pass - 1 : 0; pass < global.passes; pass++) {
+ int frames_in = 0, seen_frames = 0;
+ int64_t estimated_time_left = -1;
+ int64_t average_rate = -1;
+ off_t lagged_count = 0;
- if(stream_cnt > 1)
- fprintf(stderr, "\n");
+ open_input_file(&input);
- if (!global.quiet)
- FOREACH_STREAM(fprintf(
- stderr,
- "\rPass %d/%d frame %4d/%-4d %7"PRId64"B %7lub/f %7"PRId64"b/s"
- " %7"PRId64" %s (%.2f fps)\033[K\n", pass + 1,
- global.passes, frames_in, stream->frames_out, (int64_t)stream->nbytes,
- frames_in ? (unsigned long)(stream->nbytes * 8 / frames_in) : 0,
- frames_in ? (int64_t)stream->nbytes * 8
- * (int64_t)global.framerate.num / global.framerate.den
- / frames_in
- : 0,
- stream->cx_time > 9999999 ? stream->cx_time / 1000 : stream->cx_time,
- stream->cx_time > 9999999 ? "ms" : "us",
- usec_to_fps(stream->cx_time, frames_in));
- );
+ /* If the input file doesn't specify its w/h (raw files), try to get
+ * the data from the first stream's configuration.
+ */
+ if (!input.w || !input.h)
+ FOREACH_STREAM( {
+ if (stream->config.cfg.g_w && stream->config.cfg.g_h) {
+ input.w = stream->config.cfg.g_w;
+ input.h = stream->config.cfg.g_h;
+ break;
+ }
+ });
+
+ /* Update stream configurations from the input file's parameters */
+ if (!input.w || !input.h)
+ fatal("Specify stream dimensions with --width (-w) "
+ " and --height (-h)");
+ FOREACH_STREAM(set_stream_dimensions(stream, input.w, input.h));
+ FOREACH_STREAM(validate_stream_config(stream));
+
+ /* Ensure that --passes and --pass are consistent. If --pass is set and
+ * --passes=2, ensure --fpf was set.
+ */
+ if (global.pass && global.passes == 2)
+ FOREACH_STREAM( {
+ if (!stream->config.stats_fn)
+ die("Stream %d: Must specify --fpf when --pass=%d"
+ " and --passes=2\n", stream->index, global.pass);
+ });
+
+ /* Use the frame rate from the file only if none was specified
+ * on the command-line.
+ */
+ if (!global.have_framerate)
+ global.framerate = input.framerate;
+
+ FOREACH_STREAM(set_default_kf_interval(stream, &global));
+
+ /* Show configuration */
+ if (global.verbose && pass == 0)
+ FOREACH_STREAM(show_stream_config(stream, &global, &input));
+
+ if (pass == (global.pass ? global.pass - 1 : 0)) {
+ if (input.file_type == FILE_TYPE_Y4M)
+ /*The Y4M reader does its own allocation.
+ Just initialize this here to avoid problems if we never read any
+ frames.*/
+ memset(&raw, 0, sizeof(raw));
+ else
+ vpx_img_alloc(&raw,
+ input.use_i420 ? VPX_IMG_FMT_I420
+ : VPX_IMG_FMT_YV12,
+ input.w, input.h, 32);
+
+ FOREACH_STREAM(init_rate_histogram(&stream->rate_hist,
+ &stream->config.cfg,
+ &global.framerate));
+ }
+
+ FOREACH_STREAM(open_output_file(stream, &global));
+ FOREACH_STREAM(setup_pass(stream, &global, pass));
+ FOREACH_STREAM(initialize_encoder(stream, &global));
+
+ frame_avail = 1;
+ got_data = 0;
+
+ while (frame_avail || got_data) {
+ struct vpx_usec_timer timer;
+
+ if (!global.limit || frames_in < global.limit) {
+ frame_avail = read_frame(&input, &raw);
+
+ if (frame_avail)
+ frames_in++;
+ seen_frames = frames_in > global.skip_frames ?
+ frames_in - global.skip_frames : 0;
+
+ if (!global.quiet) {
+ float fps = usec_to_fps(cx_time, seen_frames);
+ fprintf(stderr, "\rPass %d/%d ", pass + 1, global.passes);
+
+ if (stream_cnt == 1)
+ fprintf(stderr,
+ "frame %4d/%-4d %7"PRId64"B ",
+ frames_in, streams->frames_out, (int64_t)streams->nbytes);
+ else
+ fprintf(stderr, "frame %4d ", frames_in);
+
+ fprintf(stderr, "%7"PRId64" %s %.2f %s ",
+ cx_time > 9999999 ? cx_time / 1000 : cx_time,
+ cx_time > 9999999 ? "ms" : "us",
+ fps >= 1.0 ? fps : 1000.0 / fps,
+ fps >= 1.0 ? "fps" : "ms/f");
+ print_time("ETA", estimated_time_left);
+ fprintf(stderr, "\033[K");
+ }
+
+ } else
+ frame_avail = 0;
- if (global.show_psnr)
- FOREACH_STREAM(show_psnr(stream));
+ if (frames_in > global.skip_frames) {
+ vpx_usec_timer_start(&timer);
+ FOREACH_STREAM(encode_frame(stream, &global,
+ frame_avail ? &raw : NULL,
+ frames_in));
+ vpx_usec_timer_mark(&timer);
+ cx_time += vpx_usec_timer_elapsed(&timer);
- FOREACH_STREAM(vpx_codec_destroy(&stream->encoder));
+ FOREACH_STREAM(update_quantizer_histogram(stream));
- close_input_file(&input);
+ got_data = 0;
+ FOREACH_STREAM(get_cx_data(stream, &global, &got_data));
+
+ if (!got_data && input.length && !streams->frames_out) {
+ lagged_count = global.limit ? seen_frames : ftello(input.file);
+ } else if (input.length) {
+ int64_t remaining;
+ int64_t rate;
+
+ if (global.limit) {
+ int frame_in_lagged = (seen_frames - lagged_count) * 1000;
+
+ rate = cx_time ? frame_in_lagged * (int64_t)1000000 / cx_time : 0;
+ remaining = 1000 * (global.limit - global.skip_frames
+ - seen_frames + lagged_count);
+ } else {
+ off_t input_pos = ftello(input.file);
+ off_t input_pos_lagged = input_pos - lagged_count;
+ int64_t limit = input.length;
+
+ rate = cx_time ? input_pos_lagged * (int64_t)1000000 / cx_time : 0;
+ remaining = limit - input_pos + lagged_count;
+ }
+
+ average_rate = (average_rate <= 0)
+ ? rate
+ : (average_rate * 7 + rate) / 8;
+ estimated_time_left = average_rate ? remaining / average_rate : -1;
+ }
- FOREACH_STREAM(close_output_file(stream, global.codec->fourcc));
+ if (got_data && global.test_decode != TEST_DECODE_OFF)
+ FOREACH_STREAM(test_decode(stream, global.test_decode, global.codec));
+ }
- FOREACH_STREAM(stats_close(&stream->stats, global.passes-1));
+ fflush(stdout);
+ }
- if (global.pass)
- break;
+ if (stream_cnt > 1)
+ fprintf(stderr, "\n");
+
+ if (!global.quiet)
+ FOREACH_STREAM(fprintf(
+ stderr,
+ "\rPass %d/%d frame %4d/%-4d %7"PRId64"B %7lub/f %7"PRId64"b/s"
+ " %7"PRId64" %s (%.2f fps)\033[K\n", pass + 1,
+ global.passes, frames_in, stream->frames_out, (int64_t)stream->nbytes,
+ seen_frames ? (unsigned long)(stream->nbytes * 8 / seen_frames) : 0,
+ seen_frames ? (int64_t)stream->nbytes * 8
+ * (int64_t)global.framerate.num / global.framerate.den
+ / seen_frames
+ : 0,
+ stream->cx_time > 9999999 ? stream->cx_time / 1000 : stream->cx_time,
+ stream->cx_time > 9999999 ? "ms" : "us",
+ usec_to_fps(stream->cx_time, seen_frames));
+ );
+
+ if (global.show_psnr)
+ FOREACH_STREAM(show_psnr(stream));
+
+ FOREACH_STREAM(vpx_codec_destroy(&stream->encoder));
+
+ if (global.test_decode != TEST_DECODE_OFF) {
+ FOREACH_STREAM(vpx_codec_destroy(&stream->decoder));
}
- if (global.show_q_hist_buckets)
- FOREACH_STREAM(show_q_histogram(stream->counts,
- global.show_q_hist_buckets));
+ close_input_file(&input);
- if (global.show_rate_hist_buckets)
- FOREACH_STREAM(show_rate_histogram(&stream->rate_hist,
- &stream->config.cfg,
- global.show_rate_hist_buckets));
- FOREACH_STREAM(destroy_rate_histogram(&stream->rate_hist));
+ if (global.test_decode == TEST_DECODE_FATAL) {
+ FOREACH_STREAM(res |= stream->mismatch_seen);
+ }
+ FOREACH_STREAM(close_output_file(stream, global.codec->fourcc));
+
+ FOREACH_STREAM(stats_close(&stream->stats, global.passes - 1));
+
+ if (global.pass)
+ break;
+ }
+
+ if (global.show_q_hist_buckets)
+ FOREACH_STREAM(show_q_histogram(stream->counts,
+ global.show_q_hist_buckets));
+
+ if (global.show_rate_hist_buckets)
+ FOREACH_STREAM(show_rate_histogram(&stream->rate_hist,
+ &stream->config.cfg,
+ global.show_rate_hist_buckets));
+ FOREACH_STREAM(destroy_rate_histogram(&stream->rate_hist));
+
+#if CONFIG_INTERNAL_STATS
+ /* TODO(jkoleszar): This doesn't belong in this executable. Do it for now,
+ * to match some existing utilities.
+ */
+ FOREACH_STREAM({
+ FILE *f = fopen("opsnr.stt", "a");
+ if (stream->mismatch_seen) {
+ fprintf(f, "First mismatch occurred in frame %d\n",
+ stream->mismatch_seen);
+ } else {
+ fprintf(f, "No mismatch detected in recon buffers\n");
+ }
+ fclose(f);
+ });
+#endif
- vpx_img_free(&raw);
- free(argv);
- free(streams);
- return EXIT_SUCCESS;
+ vpx_img_free(&raw);
+ free(argv);
+ free(streams);
+ return res ? EXIT_FAILURE : EXIT_SUCCESS;
}
diff --git a/libvpx/y4minput.c b/libvpx/y4minput.c
index ff9ffbc..47f005a 100644
--- a/libvpx/y4minput.c
+++ b/libvpx/y4minput.c
@@ -14,7 +14,7 @@
#include <string.h>
#include "y4minput.h"
-static int y4m_parse_tags(y4m_input *_y4m,char *_tags){
+static int y4m_parse_tags(y4m_input *_y4m, char *_tags) {
int got_w;
int got_h;
int got_fps;
@@ -23,55 +23,61 @@ static int y4m_parse_tags(y4m_input *_y4m,char *_tags){
int got_chroma;
char *p;
char *q;
- got_w=got_h=got_fps=got_interlace=got_par=got_chroma=0;
- for(p=_tags;;p=q){
+ got_w = got_h = got_fps = got_interlace = got_par = got_chroma = 0;
+ for (p = _tags;; p = q) {
/*Skip any leading spaces.*/
- while(*p==' ')p++;
+ while (*p == ' ')p++;
/*If that's all we have, stop.*/
- if(p[0]=='\0')break;
+ if (p[0] == '\0')break;
/*Find the end of this tag.*/
- for(q=p+1;*q!='\0'&&*q!=' ';q++);
+ for (q = p + 1; *q != '\0' && *q != ' '; q++);
/*Process the tag.*/
- switch(p[0]){
- case 'W':{
- if(sscanf(p+1,"%d",&_y4m->pic_w)!=1)return -1;
- got_w=1;
- }break;
- case 'H':{
- if(sscanf(p+1,"%d",&_y4m->pic_h)!=1)return -1;
- got_h=1;
- }break;
- case 'F':{
- if(sscanf(p+1,"%d:%d",&_y4m->fps_n,&_y4m->fps_d)!=2){
+ switch (p[0]) {
+ case 'W': {
+ if (sscanf(p + 1, "%d", &_y4m->pic_w) != 1)return -1;
+ got_w = 1;
+ }
+ break;
+ case 'H': {
+ if (sscanf(p + 1, "%d", &_y4m->pic_h) != 1)return -1;
+ got_h = 1;
+ }
+ break;
+ case 'F': {
+ if (sscanf(p + 1, "%d:%d", &_y4m->fps_n, &_y4m->fps_d) != 2) {
return -1;
}
- got_fps=1;
- }break;
- case 'I':{
- _y4m->interlace=p[1];
- got_interlace=1;
- }break;
- case 'A':{
- if(sscanf(p+1,"%d:%d",&_y4m->par_n,&_y4m->par_d)!=2){
+ got_fps = 1;
+ }
+ break;
+ case 'I': {
+ _y4m->interlace = p[1];
+ got_interlace = 1;
+ }
+ break;
+ case 'A': {
+ if (sscanf(p + 1, "%d:%d", &_y4m->par_n, &_y4m->par_d) != 2) {
return -1;
}
- got_par=1;
- }break;
- case 'C':{
- if(q-p>16)return -1;
- memcpy(_y4m->chroma_type,p+1,q-p-1);
- _y4m->chroma_type[q-p-1]='\0';
- got_chroma=1;
- }break;
+ got_par = 1;
+ }
+ break;
+ case 'C': {
+ if (q - p > 16)return -1;
+ memcpy(_y4m->chroma_type, p + 1, q - p - 1);
+ _y4m->chroma_type[q - p - 1] = '\0';
+ got_chroma = 1;
+ }
+ break;
/*Ignore unknown tags.*/
}
}
- if(!got_w||!got_h||!got_fps)return -1;
- if(!got_interlace)_y4m->interlace='?';
- if(!got_par)_y4m->par_n=_y4m->par_d=0;
+ if (!got_w || !got_h || !got_fps)return -1;
+ if (!got_interlace)_y4m->interlace = '?';
+ if (!got_par)_y4m->par_n = _y4m->par_d = 0;
/*Chroma-type is not specified in older files, e.g., those generated by
mplayer.*/
- if(!got_chroma)strcpy(_y4m->chroma_type,"420");
+ if (!got_chroma)strcpy(_y4m->chroma_type, "420");
return 0;
}
@@ -145,48 +151,48 @@ static int y4m_parse_tags(y4m_input *_y4m,char *_tags){
lines, and they are vertically co-sited with the luma samples in both the
mpeg2 and jpeg cases (thus requiring no vertical resampling).*/
static void y4m_42xmpeg2_42xjpeg_helper(unsigned char *_dst,
- const unsigned char *_src,int _c_w,int _c_h){
+ const unsigned char *_src, int _c_w, int _c_h) {
int y;
int x;
- for(y=0;y<_c_h;y++){
+ for (y = 0; y < _c_h; y++) {
/*Filter: [4 -17 114 35 -9 1]/128, derived from a 6-tap Lanczos
window.*/
- for(x=0;x<OC_MINI(_c_w,2);x++){
- _dst[x]=(unsigned char)OC_CLAMPI(0,(4*_src[0]-17*_src[OC_MAXI(x-1,0)]+
- 114*_src[x]+35*_src[OC_MINI(x+1,_c_w-1)]-9*_src[OC_MINI(x+2,_c_w-1)]+
- _src[OC_MINI(x+3,_c_w-1)]+64)>>7,255);
+ for (x = 0; x < OC_MINI(_c_w, 2); x++) {
+ _dst[x] = (unsigned char)OC_CLAMPI(0, (4 * _src[0] - 17 * _src[OC_MAXI(x - 1, 0)] +
+ 114 * _src[x] + 35 * _src[OC_MINI(x + 1, _c_w - 1)] - 9 * _src[OC_MINI(x + 2, _c_w - 1)] +
+ _src[OC_MINI(x + 3, _c_w - 1)] + 64) >> 7, 255);
}
- for(;x<_c_w-3;x++){
- _dst[x]=(unsigned char)OC_CLAMPI(0,(4*_src[x-2]-17*_src[x-1]+
- 114*_src[x]+35*_src[x+1]-9*_src[x+2]+_src[x+3]+64)>>7,255);
+ for (; x < _c_w - 3; x++) {
+ _dst[x] = (unsigned char)OC_CLAMPI(0, (4 * _src[x - 2] - 17 * _src[x - 1] +
+ 114 * _src[x] + 35 * _src[x + 1] - 9 * _src[x + 2] + _src[x + 3] + 64) >> 7, 255);
}
- for(;x<_c_w;x++){
- _dst[x]=(unsigned char)OC_CLAMPI(0,(4*_src[x-2]-17*_src[x-1]+
- 114*_src[x]+35*_src[OC_MINI(x+1,_c_w-1)]-9*_src[OC_MINI(x+2,_c_w-1)]+
- _src[_c_w-1]+64)>>7,255);
+ for (; x < _c_w; x++) {
+ _dst[x] = (unsigned char)OC_CLAMPI(0, (4 * _src[x - 2] - 17 * _src[x - 1] +
+ 114 * _src[x] + 35 * _src[OC_MINI(x + 1, _c_w - 1)] - 9 * _src[OC_MINI(x + 2, _c_w - 1)] +
+ _src[_c_w - 1] + 64) >> 7, 255);
}
- _dst+=_c_w;
- _src+=_c_w;
+ _dst += _c_w;
+ _src += _c_w;
}
}
/*Handles both 422 and 420mpeg2 to 422jpeg and 420jpeg, respectively.*/
-static void y4m_convert_42xmpeg2_42xjpeg(y4m_input *_y4m,unsigned char *_dst,
- unsigned char *_aux){
+static void y4m_convert_42xmpeg2_42xjpeg(y4m_input *_y4m, unsigned char *_dst,
+ unsigned char *_aux) {
int c_w;
int c_h;
int c_sz;
int pli;
/*Skip past the luma data.*/
- _dst+=_y4m->pic_w*_y4m->pic_h;
+ _dst += _y4m->pic_w * _y4m->pic_h;
/*Compute the size of each chroma plane.*/
- c_w=(_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h;
- c_h=(_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v;
- c_sz=c_w*c_h;
- for(pli=1;pli<3;pli++){
- y4m_42xmpeg2_42xjpeg_helper(_dst,_aux,c_w,c_h);
- _dst+=c_sz;
- _aux+=c_sz;
+ c_w = (_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h;
+ c_h = (_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v;
+ c_sz = c_w * c_h;
+ for (pli = 1; pli < 3; pli++) {
+ y4m_42xmpeg2_42xjpeg_helper(_dst, _aux, c_w, c_h);
+ _dst += c_sz;
+ _aux += c_sz;
}
}
@@ -233,8 +239,8 @@ static void y4m_convert_42xmpeg2_42xjpeg(y4m_input *_y4m,unsigned char *_dst,
the chroma plane's resolution) to the right.
Then we use another filter to move the C_r location down one quarter pixel,
and the C_b location up one quarter pixel.*/
-static void y4m_convert_42xpaldv_42xjpeg(y4m_input *_y4m,unsigned char *_dst,
- unsigned char *_aux){
+static void y4m_convert_42xpaldv_42xjpeg(y4m_input *_y4m, unsigned char *_dst,
+ unsigned char *_aux) {
unsigned char *tmp;
int c_w;
int c_h;
@@ -243,69 +249,71 @@ static void y4m_convert_42xpaldv_42xjpeg(y4m_input *_y4m,unsigned char *_dst,
int y;
int x;
/*Skip past the luma data.*/
- _dst+=_y4m->pic_w*_y4m->pic_h;
+ _dst += _y4m->pic_w * _y4m->pic_h;
/*Compute the size of each chroma plane.*/
- c_w=(_y4m->pic_w+1)/2;
- c_h=(_y4m->pic_h+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h;
- c_sz=c_w*c_h;
- tmp=_aux+2*c_sz;
- for(pli=1;pli<3;pli++){
+ c_w = (_y4m->pic_w + 1) / 2;
+ c_h = (_y4m->pic_h + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h;
+ c_sz = c_w * c_h;
+ tmp = _aux + 2 * c_sz;
+ for (pli = 1; pli < 3; pli++) {
/*First do the horizontal re-sampling.
This is the same as the mpeg2 case, except that after the horizontal
case, we need to apply a second vertical filter.*/
- y4m_42xmpeg2_42xjpeg_helper(tmp,_aux,c_w,c_h);
- _aux+=c_sz;
- switch(pli){
- case 1:{
+ y4m_42xmpeg2_42xjpeg_helper(tmp, _aux, c_w, c_h);
+ _aux += c_sz;
+ switch (pli) {
+ case 1: {
/*Slide C_b up a quarter-pel.
This is the same filter used above, but in the other order.*/
- for(x=0;x<c_w;x++){
- for(y=0;y<OC_MINI(c_h,3);y++){
- _dst[y*c_w]=(unsigned char)OC_CLAMPI(0,(tmp[0]
- -9*tmp[OC_MAXI(y-2,0)*c_w]+35*tmp[OC_MAXI(y-1,0)*c_w]
- +114*tmp[y*c_w]-17*tmp[OC_MINI(y+1,c_h-1)*c_w]
- +4*tmp[OC_MINI(y+2,c_h-1)*c_w]+64)>>7,255);
+ for (x = 0; x < c_w; x++) {
+ for (y = 0; y < OC_MINI(c_h, 3); y++) {
+ _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (tmp[0]
+ - 9 * tmp[OC_MAXI(y - 2, 0) * c_w] + 35 * tmp[OC_MAXI(y - 1, 0) * c_w]
+ + 114 * tmp[y * c_w] - 17 * tmp[OC_MINI(y + 1, c_h - 1) * c_w]
+ + 4 * tmp[OC_MINI(y + 2, c_h - 1) * c_w] + 64) >> 7, 255);
}
- for(;y<c_h-2;y++){
- _dst[y*c_w]=(unsigned char)OC_CLAMPI(0,(tmp[(y-3)*c_w]
- -9*tmp[(y-2)*c_w]+35*tmp[(y-1)*c_w]+114*tmp[y*c_w]
- -17*tmp[(y+1)*c_w]+4*tmp[(y+2)*c_w]+64)>>7,255);
+ for (; y < c_h - 2; y++) {
+ _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (tmp[(y - 3) * c_w]
+ - 9 * tmp[(y - 2) * c_w] + 35 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w]
+ - 17 * tmp[(y + 1) * c_w] + 4 * tmp[(y + 2) * c_w] + 64) >> 7, 255);
}
- for(;y<c_h;y++){
- _dst[y*c_w]=(unsigned char)OC_CLAMPI(0,(tmp[(y-3)*c_w]
- -9*tmp[(y-2)*c_w]+35*tmp[(y-1)*c_w]+114*tmp[y*c_w]
- -17*tmp[OC_MINI(y+1,c_h-1)*c_w]+4*tmp[(c_h-1)*c_w]+64)>>7,255);
+ for (; y < c_h; y++) {
+ _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (tmp[(y - 3) * c_w]
+ - 9 * tmp[(y - 2) * c_w] + 35 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w]
+ - 17 * tmp[OC_MINI(y + 1, c_h - 1) * c_w] + 4 * tmp[(c_h - 1) * c_w] + 64) >> 7, 255);
}
_dst++;
tmp++;
}
- _dst+=c_sz-c_w;
- tmp-=c_w;
- }break;
- case 2:{
+ _dst += c_sz - c_w;
+ tmp -= c_w;
+ }
+ break;
+ case 2: {
/*Slide C_r down a quarter-pel.
This is the same as the horizontal filter.*/
- for(x=0;x<c_w;x++){
- for(y=0;y<OC_MINI(c_h,2);y++){
- _dst[y*c_w]=(unsigned char)OC_CLAMPI(0,(4*tmp[0]
- -17*tmp[OC_MAXI(y-1,0)*c_w]+114*tmp[y*c_w]
- +35*tmp[OC_MINI(y+1,c_h-1)*c_w]-9*tmp[OC_MINI(y+2,c_h-1)*c_w]
- +tmp[OC_MINI(y+3,c_h-1)*c_w]+64)>>7,255);
+ for (x = 0; x < c_w; x++) {
+ for (y = 0; y < OC_MINI(c_h, 2); y++) {
+ _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (4 * tmp[0]
+ - 17 * tmp[OC_MAXI(y - 1, 0) * c_w] + 114 * tmp[y * c_w]
+ + 35 * tmp[OC_MINI(y + 1, c_h - 1) * c_w] - 9 * tmp[OC_MINI(y + 2, c_h - 1) * c_w]
+ + tmp[OC_MINI(y + 3, c_h - 1) * c_w] + 64) >> 7, 255);
}
- for(;y<c_h-3;y++){
- _dst[y*c_w]=(unsigned char)OC_CLAMPI(0,(4*tmp[(y-2)*c_w]
- -17*tmp[(y-1)*c_w]+114*tmp[y*c_w]+35*tmp[(y+1)*c_w]
- -9*tmp[(y+2)*c_w]+tmp[(y+3)*c_w]+64)>>7,255);
+ for (; y < c_h - 3; y++) {
+ _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (4 * tmp[(y - 2) * c_w]
+ - 17 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w] + 35 * tmp[(y + 1) * c_w]
+ - 9 * tmp[(y + 2) * c_w] + tmp[(y + 3) * c_w] + 64) >> 7, 255);
}
- for(;y<c_h;y++){
- _dst[y*c_w]=(unsigned char)OC_CLAMPI(0,(4*tmp[(y-2)*c_w]
- -17*tmp[(y-1)*c_w]+114*tmp[y*c_w]+35*tmp[OC_MINI(y+1,c_h-1)*c_w]
- -9*tmp[OC_MINI(y+2,c_h-1)*c_w]+tmp[(c_h-1)*c_w]+64)>>7,255);
+ for (; y < c_h; y++) {
+ _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (4 * tmp[(y - 2) * c_w]
+ - 17 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w] + 35 * tmp[OC_MINI(y + 1, c_h - 1) * c_w]
+ - 9 * tmp[OC_MINI(y + 2, c_h - 1) * c_w] + tmp[(c_h - 1) * c_w] + 64) >> 7, 255);
}
_dst++;
tmp++;
}
- }break;
+ }
+ break;
}
/*For actual interlaced material, this would have to be done separately on
each field, and the shift amounts would be different.
@@ -320,27 +328,27 @@ static void y4m_convert_42xpaldv_42xjpeg(y4m_input *_y4m,unsigned char *_dst,
/*Perform vertical filtering to reduce a single plane from 4:2:2 to 4:2:0.
This is used as a helper by several converation routines.*/
static void y4m_422jpeg_420jpeg_helper(unsigned char *_dst,
- const unsigned char *_src,int _c_w,int _c_h){
+ const unsigned char *_src, int _c_w, int _c_h) {
int y;
int x;
/*Filter: [3 -17 78 78 -17 3]/128, derived from a 6-tap Lanczos window.*/
- for(x=0;x<_c_w;x++){
- for(y=0;y<OC_MINI(_c_h,2);y+=2){
- _dst[(y>>1)*_c_w]=OC_CLAMPI(0,(64*_src[0]
- +78*_src[OC_MINI(1,_c_h-1)*_c_w]
- -17*_src[OC_MINI(2,_c_h-1)*_c_w]
- +3*_src[OC_MINI(3,_c_h-1)*_c_w]+64)>>7,255);
+ for (x = 0; x < _c_w; x++) {
+ for (y = 0; y < OC_MINI(_c_h, 2); y += 2) {
+ _dst[(y >> 1)*_c_w] = OC_CLAMPI(0, (64 * _src[0]
+ + 78 * _src[OC_MINI(1, _c_h - 1) * _c_w]
+ - 17 * _src[OC_MINI(2, _c_h - 1) * _c_w]
+ + 3 * _src[OC_MINI(3, _c_h - 1) * _c_w] + 64) >> 7, 255);
}
- for(;y<_c_h-3;y+=2){
- _dst[(y>>1)*_c_w]=OC_CLAMPI(0,(3*(_src[(y-2)*_c_w]+_src[(y+3)*_c_w])
- -17*(_src[(y-1)*_c_w]+_src[(y+2)*_c_w])
- +78*(_src[y*_c_w]+_src[(y+1)*_c_w])+64)>>7,255);
+ for (; y < _c_h - 3; y += 2) {
+ _dst[(y >> 1)*_c_w] = OC_CLAMPI(0, (3 * (_src[(y - 2) * _c_w] + _src[(y + 3) * _c_w])
+ - 17 * (_src[(y - 1) * _c_w] + _src[(y + 2) * _c_w])
+ + 78 * (_src[y * _c_w] + _src[(y + 1) * _c_w]) + 64) >> 7, 255);
}
- for(;y<_c_h;y+=2){
- _dst[(y>>1)*_c_w]=OC_CLAMPI(0,(3*(_src[(y-2)*_c_w]
- +_src[(_c_h-1)*_c_w])-17*(_src[(y-1)*_c_w]
- +_src[OC_MINI(y+2,_c_h-1)*_c_w])
- +78*(_src[y*_c_w]+_src[OC_MINI(y+1,_c_h-1)*_c_w])+64)>>7,255);
+ for (; y < _c_h; y += 2) {
+ _dst[(y >> 1)*_c_w] = OC_CLAMPI(0, (3 * (_src[(y - 2) * _c_w]
+ + _src[(_c_h - 1) * _c_w]) - 17 * (_src[(y - 1) * _c_w]
+ + _src[OC_MINI(y + 2, _c_h - 1) * _c_w])
+ + 78 * (_src[y * _c_w] + _src[OC_MINI(y + 1, _c_h - 1) * _c_w]) + 64) >> 7, 255);
}
_src++;
_dst++;
@@ -385,8 +393,8 @@ static void y4m_422jpeg_420jpeg_helper(unsigned char *_dst,
We use a resampling filter to decimate the chroma planes by two in the
vertical direction.*/
-static void y4m_convert_422jpeg_420jpeg(y4m_input *_y4m,unsigned char *_dst,
- unsigned char *_aux){
+static void y4m_convert_422jpeg_420jpeg(y4m_input *_y4m, unsigned char *_dst,
+ unsigned char *_aux) {
int c_w;
int c_h;
int c_sz;
@@ -395,18 +403,18 @@ static void y4m_convert_422jpeg_420jpeg(y4m_input *_y4m,unsigned char *_dst,
int dst_c_sz;
int pli;
/*Skip past the luma data.*/
- _dst+=_y4m->pic_w*_y4m->pic_h;
+ _dst += _y4m->pic_w * _y4m->pic_h;
/*Compute the size of each chroma plane.*/
- c_w=(_y4m->pic_w+_y4m->src_c_dec_h-1)/_y4m->src_c_dec_h;
- c_h=_y4m->pic_h;
- dst_c_w=(_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h;
- dst_c_h=(_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v;
- c_sz=c_w*c_h;
- dst_c_sz=dst_c_w*dst_c_h;
- for(pli=1;pli<3;pli++){
- y4m_422jpeg_420jpeg_helper(_dst,_aux,c_w,c_h);
- _aux+=c_sz;
- _dst+=dst_c_sz;
+ c_w = (_y4m->pic_w + _y4m->src_c_dec_h - 1) / _y4m->src_c_dec_h;
+ c_h = _y4m->pic_h;
+ dst_c_w = (_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h;
+ dst_c_h = (_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v;
+ c_sz = c_w * c_h;
+ dst_c_sz = dst_c_w * dst_c_h;
+ for (pli = 1; pli < 3; pli++) {
+ y4m_422jpeg_420jpeg_helper(_dst, _aux, c_w, c_h);
+ _aux += c_sz;
+ _dst += dst_c_sz;
}
}
@@ -450,8 +458,8 @@ static void y4m_convert_422jpeg_420jpeg(y4m_input *_y4m,unsigned char *_dst,
pixel (at the original chroma resolution) to the right.
Then we use a second resampling filter to decimate the chroma planes by two
in the vertical direction.*/
-static void y4m_convert_422_420jpeg(y4m_input *_y4m,unsigned char *_dst,
- unsigned char *_aux){
+static void y4m_convert_422_420jpeg(y4m_input *_y4m, unsigned char *_dst,
+ unsigned char *_aux) {
unsigned char *tmp;
int c_w;
int c_h;
@@ -460,24 +468,24 @@ static void y4m_convert_422_420jpeg(y4m_input *_y4m,unsigned char *_dst,
int dst_c_sz;
int pli;
/*Skip past the luma data.*/
- _dst+=_y4m->pic_w*_y4m->pic_h;
+ _dst += _y4m->pic_w * _y4m->pic_h;
/*Compute the size of each chroma plane.*/
- c_w=(_y4m->pic_w+_y4m->src_c_dec_h-1)/_y4m->src_c_dec_h;
- c_h=_y4m->pic_h;
- dst_c_h=(_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v;
- c_sz=c_w*c_h;
- dst_c_sz=c_w*dst_c_h;
- tmp=_aux+2*c_sz;
- for(pli=1;pli<3;pli++){
+ c_w = (_y4m->pic_w + _y4m->src_c_dec_h - 1) / _y4m->src_c_dec_h;
+ c_h = _y4m->pic_h;
+ dst_c_h = (_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v;
+ c_sz = c_w * c_h;
+ dst_c_sz = c_w * dst_c_h;
+ tmp = _aux + 2 * c_sz;
+ for (pli = 1; pli < 3; pli++) {
/*In reality, the horizontal and vertical steps could be pipelined, for
less memory consumption and better cache performance, but we do them
separately for simplicity.*/
/*First do horizontal filtering (convert to 422jpeg)*/
- y4m_42xmpeg2_42xjpeg_helper(tmp,_aux,c_w,c_h);
+ y4m_42xmpeg2_42xjpeg_helper(tmp, _aux, c_w, c_h);
/*Now do the vertical filtering.*/
- y4m_422jpeg_420jpeg_helper(_dst,tmp,c_w,c_h);
- _aux+=c_sz;
- _dst+=dst_c_sz;
+ y4m_422jpeg_420jpeg_helper(_dst, tmp, c_w, c_h);
+ _aux += c_sz;
+ _dst += dst_c_sz;
}
}
@@ -522,8 +530,8 @@ static void y4m_convert_422_420jpeg(y4m_input *_y4m,unsigned char *_dst,
right.
Then we use another filter to decimate the planes by 2 in the vertical
direction.*/
-static void y4m_convert_411_420jpeg(y4m_input *_y4m,unsigned char *_dst,
- unsigned char *_aux){
+static void y4m_convert_411_420jpeg(y4m_input *_y4m, unsigned char *_dst,
+ unsigned char *_aux) {
unsigned char *tmp;
int c_w;
int c_h;
@@ -536,57 +544,57 @@ static void y4m_convert_411_420jpeg(y4m_input *_y4m,unsigned char *_dst,
int y;
int x;
/*Skip past the luma data.*/
- _dst+=_y4m->pic_w*_y4m->pic_h;
+ _dst += _y4m->pic_w * _y4m->pic_h;
/*Compute the size of each chroma plane.*/
- c_w=(_y4m->pic_w+_y4m->src_c_dec_h-1)/_y4m->src_c_dec_h;
- c_h=_y4m->pic_h;
- dst_c_w=(_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h;
- dst_c_h=(_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v;
- c_sz=c_w*c_h;
- dst_c_sz=dst_c_w*dst_c_h;
- tmp_sz=dst_c_w*c_h;
- tmp=_aux+2*c_sz;
- for(pli=1;pli<3;pli++){
+ c_w = (_y4m->pic_w + _y4m->src_c_dec_h - 1) / _y4m->src_c_dec_h;
+ c_h = _y4m->pic_h;
+ dst_c_w = (_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h;
+ dst_c_h = (_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v;
+ c_sz = c_w * c_h;
+ dst_c_sz = dst_c_w * dst_c_h;
+ tmp_sz = dst_c_w * c_h;
+ tmp = _aux + 2 * c_sz;
+ for (pli = 1; pli < 3; pli++) {
/*In reality, the horizontal and vertical steps could be pipelined, for
less memory consumption and better cache performance, but we do them
separately for simplicity.*/
/*First do horizontal filtering (convert to 422jpeg)*/
- for(y=0;y<c_h;y++){
+ for (y = 0; y < c_h; y++) {
/*Filters: [1 110 18 -1]/128 and [-3 50 86 -5]/128, both derived from a
4-tap Mitchell window.*/
- for(x=0;x<OC_MINI(c_w,1);x++){
- tmp[x<<1]=(unsigned char)OC_CLAMPI(0,(111*_aux[0]
- +18*_aux[OC_MINI(1,c_w-1)]-_aux[OC_MINI(2,c_w-1)]+64)>>7,255);
- tmp[x<<1|1]=(unsigned char)OC_CLAMPI(0,(47*_aux[0]
- +86*_aux[OC_MINI(1,c_w-1)]-5*_aux[OC_MINI(2,c_w-1)]+64)>>7,255);
+ for (x = 0; x < OC_MINI(c_w, 1); x++) {
+ tmp[x << 1] = (unsigned char)OC_CLAMPI(0, (111 * _aux[0]
+ + 18 * _aux[OC_MINI(1, c_w - 1)] - _aux[OC_MINI(2, c_w - 1)] + 64) >> 7, 255);
+ tmp[x << 1 | 1] = (unsigned char)OC_CLAMPI(0, (47 * _aux[0]
+ + 86 * _aux[OC_MINI(1, c_w - 1)] - 5 * _aux[OC_MINI(2, c_w - 1)] + 64) >> 7, 255);
}
- for(;x<c_w-2;x++){
- tmp[x<<1]=(unsigned char)OC_CLAMPI(0,(_aux[x-1]+110*_aux[x]
- +18*_aux[x+1]-_aux[x+2]+64)>>7,255);
- tmp[x<<1|1]=(unsigned char)OC_CLAMPI(0,(-3*_aux[x-1]+50*_aux[x]
- +86*_aux[x+1]-5*_aux[x+2]+64)>>7,255);
+ for (; x < c_w - 2; x++) {
+ tmp[x << 1] = (unsigned char)OC_CLAMPI(0, (_aux[x - 1] + 110 * _aux[x]
+ + 18 * _aux[x + 1] - _aux[x + 2] + 64) >> 7, 255);
+ tmp[x << 1 | 1] = (unsigned char)OC_CLAMPI(0, (-3 * _aux[x - 1] + 50 * _aux[x]
+ + 86 * _aux[x + 1] - 5 * _aux[x + 2] + 64) >> 7, 255);
}
- for(;x<c_w;x++){
- tmp[x<<1]=(unsigned char)OC_CLAMPI(0,(_aux[x-1]+110*_aux[x]
- +18*_aux[OC_MINI(x+1,c_w-1)]-_aux[c_w-1]+64)>>7,255);
- if((x<<1|1)<dst_c_w){
- tmp[x<<1|1]=(unsigned char)OC_CLAMPI(0,(-3*_aux[x-1]+50*_aux[x]
- +86*_aux[OC_MINI(x+1,c_w-1)]-5*_aux[c_w-1]+64)>>7,255);
+ for (; x < c_w; x++) {
+ tmp[x << 1] = (unsigned char)OC_CLAMPI(0, (_aux[x - 1] + 110 * _aux[x]
+ + 18 * _aux[OC_MINI(x + 1, c_w - 1)] - _aux[c_w - 1] + 64) >> 7, 255);
+ if ((x << 1 | 1) < dst_c_w) {
+ tmp[x << 1 | 1] = (unsigned char)OC_CLAMPI(0, (-3 * _aux[x - 1] + 50 * _aux[x]
+ + 86 * _aux[OC_MINI(x + 1, c_w - 1)] - 5 * _aux[c_w - 1] + 64) >> 7, 255);
}
}
- tmp+=dst_c_w;
- _aux+=c_w;
+ tmp += dst_c_w;
+ _aux += c_w;
}
- tmp-=tmp_sz;
+ tmp -= tmp_sz;
/*Now do the vertical filtering.*/
- y4m_422jpeg_420jpeg_helper(_dst,tmp,dst_c_w,c_h);
- _dst+=dst_c_sz;
+ y4m_422jpeg_420jpeg_helper(_dst, tmp, dst_c_w, c_h);
+ _dst += dst_c_sz;
}
}
/*Convert 444 to 420jpeg.*/
-static void y4m_convert_444_420jpeg(y4m_input *_y4m,unsigned char *_dst,
- unsigned char *_aux){
+static void y4m_convert_444_420jpeg(y4m_input *_y4m, unsigned char *_dst,
+ unsigned char *_aux) {
unsigned char *tmp;
int c_w;
int c_h;
@@ -599,218 +607,247 @@ static void y4m_convert_444_420jpeg(y4m_input *_y4m,unsigned char *_dst,
int y;
int x;
/*Skip past the luma data.*/
- _dst+=_y4m->pic_w*_y4m->pic_h;
+ _dst += _y4m->pic_w * _y4m->pic_h;
/*Compute the size of each chroma plane.*/
- c_w=(_y4m->pic_w+_y4m->src_c_dec_h-1)/_y4m->src_c_dec_h;
- c_h=_y4m->pic_h;
- dst_c_w=(_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h;
- dst_c_h=(_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v;
- c_sz=c_w*c_h;
- dst_c_sz=dst_c_w*dst_c_h;
- tmp_sz=dst_c_w*c_h;
- tmp=_aux+2*c_sz;
- for(pli=1;pli<3;pli++){
+ c_w = (_y4m->pic_w + _y4m->src_c_dec_h - 1) / _y4m->src_c_dec_h;
+ c_h = _y4m->pic_h;
+ dst_c_w = (_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h;
+ dst_c_h = (_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v;
+ c_sz = c_w * c_h;
+ dst_c_sz = dst_c_w * dst_c_h;
+ tmp_sz = dst_c_w * c_h;
+ tmp = _aux + 2 * c_sz;
+ for (pli = 1; pli < 3; pli++) {
/*Filter: [3 -17 78 78 -17 3]/128, derived from a 6-tap Lanczos window.*/
- for(y=0;y<c_h;y++){
- for(x=0;x<OC_MINI(c_w,2);x+=2){
- tmp[x>>1]=OC_CLAMPI(0,(64*_aux[0]+78*_aux[OC_MINI(1,c_w-1)]
- -17*_aux[OC_MINI(2,c_w-1)]
- +3*_aux[OC_MINI(3,c_w-1)]+64)>>7,255);
+ for (y = 0; y < c_h; y++) {
+ for (x = 0; x < OC_MINI(c_w, 2); x += 2) {
+ tmp[x >> 1] = OC_CLAMPI(0, (64 * _aux[0] + 78 * _aux[OC_MINI(1, c_w - 1)]
+ - 17 * _aux[OC_MINI(2, c_w - 1)]
+ + 3 * _aux[OC_MINI(3, c_w - 1)] + 64) >> 7, 255);
}
- for(;x<c_w-3;x+=2){
- tmp[x>>1]=OC_CLAMPI(0,(3*(_aux[x-2]+_aux[x+3])
- -17*(_aux[x-1]+_aux[x+2])+78*(_aux[x]+_aux[x+1])+64)>>7,255);
+ for (; x < c_w - 3; x += 2) {
+ tmp[x >> 1] = OC_CLAMPI(0, (3 * (_aux[x - 2] + _aux[x + 3])
+ - 17 * (_aux[x - 1] + _aux[x + 2]) + 78 * (_aux[x] + _aux[x + 1]) + 64) >> 7, 255);
}
- for(;x<c_w;x+=2){
- tmp[x>>1]=OC_CLAMPI(0,(3*(_aux[x-2]+_aux[c_w-1])-
- 17*(_aux[x-1]+_aux[OC_MINI(x+2,c_w-1)])+
- 78*(_aux[x]+_aux[OC_MINI(x+1,c_w-1)])+64)>>7,255);
+ for (; x < c_w; x += 2) {
+ tmp[x >> 1] = OC_CLAMPI(0, (3 * (_aux[x - 2] + _aux[c_w - 1]) -
+ 17 * (_aux[x - 1] + _aux[OC_MINI(x + 2, c_w - 1)]) +
+ 78 * (_aux[x] + _aux[OC_MINI(x + 1, c_w - 1)]) + 64) >> 7, 255);
}
- tmp+=dst_c_w;
- _aux+=c_w;
+ tmp += dst_c_w;
+ _aux += c_w;
}
- tmp-=tmp_sz;
+ tmp -= tmp_sz;
/*Now do the vertical filtering.*/
- y4m_422jpeg_420jpeg_helper(_dst,tmp,dst_c_w,c_h);
- _dst+=dst_c_sz;
+ y4m_422jpeg_420jpeg_helper(_dst, tmp, dst_c_w, c_h);
+ _dst += dst_c_sz;
}
}
/*The image is padded with empty chroma components at 4:2:0.*/
-static void y4m_convert_mono_420jpeg(y4m_input *_y4m,unsigned char *_dst,
- unsigned char *_aux){
+static void y4m_convert_mono_420jpeg(y4m_input *_y4m, unsigned char *_dst,
+ unsigned char *_aux) {
int c_sz;
- _dst+=_y4m->pic_w*_y4m->pic_h;
- c_sz=((_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h)*
- ((_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v);
- memset(_dst,128,c_sz*2);
+ _dst += _y4m->pic_w * _y4m->pic_h;
+ c_sz = ((_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h) *
+ ((_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v);
+ memset(_dst, 128, c_sz * 2);
}
/*No conversion function needed.*/
-static void y4m_convert_null(y4m_input *_y4m,unsigned char *_dst,
- unsigned char *_aux){
+static void y4m_convert_null(y4m_input *_y4m, unsigned char *_dst,
+ unsigned char *_aux) {
}
-int y4m_input_open(y4m_input *_y4m,FILE *_fin,char *_skip,int _nskip){
+int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip,
+ int only_420) {
char buffer[80];
int ret;
int i;
/*Read until newline, or 80 cols, whichever happens first.*/
- for(i=0;i<79;i++){
- if(_nskip>0){
- buffer[i]=*_skip++;
+ for (i = 0; i < 79; i++) {
+ if (_nskip > 0) {
+ buffer[i] = *_skip++;
_nskip--;
+ } else {
+ ret = (int)fread(buffer + i, 1, 1, _fin);
+ if (ret < 1)return -1;
}
- else{
- ret=(int)fread(buffer+i,1,1,_fin);
- if(ret<1)return -1;
- }
- if(buffer[i]=='\n')break;
+ if (buffer[i] == '\n')break;
}
/*We skipped too much header data.*/
- if(_nskip>0)return -1;
- if(i==79){
- fprintf(stderr,"Error parsing header; not a YUV2MPEG2 file?\n");
+ if (_nskip > 0)return -1;
+ if (i == 79) {
+ fprintf(stderr, "Error parsing header; not a YUV2MPEG2 file?\n");
return -1;
}
- buffer[i]='\0';
- if(memcmp(buffer,"YUV4MPEG",8)){
- fprintf(stderr,"Incomplete magic for YUV4MPEG file.\n");
+ buffer[i] = '\0';
+ if (memcmp(buffer, "YUV4MPEG", 8)) {
+ fprintf(stderr, "Incomplete magic for YUV4MPEG file.\n");
return -1;
}
- if(buffer[8]!='2'){
- fprintf(stderr,"Incorrect YUV input file version; YUV4MPEG2 required.\n");
+ if (buffer[8] != '2') {
+ fprintf(stderr, "Incorrect YUV input file version; YUV4MPEG2 required.\n");
}
- ret=y4m_parse_tags(_y4m,buffer+5);
- if(ret<0){
- fprintf(stderr,"Error parsing YUV4MPEG2 header.\n");
+ ret = y4m_parse_tags(_y4m, buffer + 5);
+ if (ret < 0) {
+ fprintf(stderr, "Error parsing YUV4MPEG2 header.\n");
return ret;
}
- if(_y4m->interlace=='?'){
- fprintf(stderr,"Warning: Input video interlacing format unknown; "
- "assuming progressive scan.\n");
- }
- else if(_y4m->interlace!='p'){
- fprintf(stderr,"Input video is interlaced; "
- "Only progressive scan handled.\n");
+ if (_y4m->interlace == '?') {
+ fprintf(stderr, "Warning: Input video interlacing format unknown; "
+ "assuming progressive scan.\n");
+ } else if (_y4m->interlace != 'p') {
+ fprintf(stderr, "Input video is interlaced; "
+ "Only progressive scan handled.\n");
return -1;
}
- if(strcmp(_y4m->chroma_type,"420")==0||
- strcmp(_y4m->chroma_type,"420jpeg")==0){
- _y4m->src_c_dec_h=_y4m->dst_c_dec_h=_y4m->src_c_dec_v=_y4m->dst_c_dec_v=2;
- _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h
- +2*((_y4m->pic_w+1)/2)*((_y4m->pic_h+1)/2);
+ _y4m->vpx_fmt = VPX_IMG_FMT_I420;
+ _y4m->vpx_bps = 12;
+ if (strcmp(_y4m->chroma_type, "420") == 0 ||
+ strcmp(_y4m->chroma_type, "420jpeg") == 0) {
+ _y4m->src_c_dec_h = _y4m->dst_c_dec_h = _y4m->src_c_dec_v = _y4m->dst_c_dec_v = 2;
+ _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h
+ + 2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2);
/*Natively supported: no conversion required.*/
- _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=0;
- _y4m->convert=y4m_convert_null;
- }
- else if(strcmp(_y4m->chroma_type,"420mpeg2")==0){
- _y4m->src_c_dec_h=_y4m->dst_c_dec_h=_y4m->src_c_dec_v=_y4m->dst_c_dec_v=2;
- _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
+ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
+ _y4m->convert = y4m_convert_null;
+ } else if (strcmp(_y4m->chroma_type, "420mpeg2") == 0) {
+ _y4m->src_c_dec_h = _y4m->dst_c_dec_h = _y4m->src_c_dec_v = _y4m->dst_c_dec_v = 2;
+ _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
/*Chroma filter required: read into the aux buf first.*/
- _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=
- 2*((_y4m->pic_w+1)/2)*((_y4m->pic_h+1)/2);
- _y4m->convert=y4m_convert_42xmpeg2_42xjpeg;
- }
- else if(strcmp(_y4m->chroma_type,"420paldv")==0){
- _y4m->src_c_dec_h=_y4m->dst_c_dec_h=_y4m->src_c_dec_v=_y4m->dst_c_dec_v=2;
- _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
+ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz =
+ 2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2);
+ _y4m->convert = y4m_convert_42xmpeg2_42xjpeg;
+ } else if (strcmp(_y4m->chroma_type, "420paldv") == 0) {
+ _y4m->src_c_dec_h = _y4m->dst_c_dec_h = _y4m->src_c_dec_v = _y4m->dst_c_dec_v = 2;
+ _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
/*Chroma filter required: read into the aux buf first.
We need to make two filter passes, so we need some extra space in the
aux buffer.*/
- _y4m->aux_buf_sz=3*((_y4m->pic_w+1)/2)*((_y4m->pic_h+1)/2);
- _y4m->aux_buf_read_sz=2*((_y4m->pic_w+1)/2)*((_y4m->pic_h+1)/2);
- _y4m->convert=y4m_convert_42xpaldv_42xjpeg;
- }
- else if(strcmp(_y4m->chroma_type,"422jpeg")==0){
- _y4m->src_c_dec_h=_y4m->dst_c_dec_h=2;
- _y4m->src_c_dec_v=1;
- _y4m->dst_c_dec_v=2;
- _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
+ _y4m->aux_buf_sz = 3 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2);
+ _y4m->aux_buf_read_sz = 2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2);
+ _y4m->convert = y4m_convert_42xpaldv_42xjpeg;
+ } else if (strcmp(_y4m->chroma_type, "422jpeg") == 0) {
+ _y4m->src_c_dec_h = _y4m->dst_c_dec_h = 2;
+ _y4m->src_c_dec_v = 1;
+ _y4m->dst_c_dec_v = 2;
+ _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
/*Chroma filter required: read into the aux buf first.*/
- _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=2*((_y4m->pic_w+1)/2)*_y4m->pic_h;
- _y4m->convert=y4m_convert_422jpeg_420jpeg;
- }
- else if(strcmp(_y4m->chroma_type,"422")==0){
- _y4m->src_c_dec_h=_y4m->dst_c_dec_h=2;
- _y4m->src_c_dec_v=1;
- _y4m->dst_c_dec_v=2;
- _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
- /*Chroma filter required: read into the aux buf first.
- We need to make two filter passes, so we need some extra space in the
- aux buffer.*/
- _y4m->aux_buf_read_sz=2*((_y4m->pic_w+1)/2)*_y4m->pic_h;
- _y4m->aux_buf_sz=_y4m->aux_buf_read_sz+((_y4m->pic_w+1)/2)*_y4m->pic_h;
- _y4m->convert=y4m_convert_422_420jpeg;
- }
- else if(strcmp(_y4m->chroma_type,"411")==0){
- _y4m->src_c_dec_h=4;
- _y4m->dst_c_dec_h=2;
- _y4m->src_c_dec_v=1;
- _y4m->dst_c_dec_v=2;
- _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
- /*Chroma filter required: read into the aux buf first.
- We need to make two filter passes, so we need some extra space in the
- aux buffer.*/
- _y4m->aux_buf_read_sz=2*((_y4m->pic_w+3)/4)*_y4m->pic_h;
- _y4m->aux_buf_sz=_y4m->aux_buf_read_sz+((_y4m->pic_w+1)/2)*_y4m->pic_h;
- _y4m->convert=y4m_convert_411_420jpeg;
- }
- else if(strcmp(_y4m->chroma_type,"444")==0){
- _y4m->src_c_dec_h=1;
- _y4m->dst_c_dec_h=2;
- _y4m->src_c_dec_v=1;
- _y4m->dst_c_dec_v=2;
- _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
+ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
+ _y4m->convert = y4m_convert_422jpeg_420jpeg;
+ } else if (strcmp(_y4m->chroma_type, "422") == 0) {
+ _y4m->src_c_dec_h = 2;
+ _y4m->src_c_dec_v = 1;
+ if (only_420) {
+ _y4m->dst_c_dec_h = 2;
+ _y4m->dst_c_dec_v = 2;
+ _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
+ /*Chroma filter required: read into the aux buf first.
+ We need to make two filter passes, so we need some extra space in the
+ aux buffer.*/
+ _y4m->aux_buf_read_sz = 2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
+ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz +
+ ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
+ _y4m->convert = y4m_convert_422_420jpeg;
+ } else {
+ _y4m->vpx_fmt = VPX_IMG_FMT_I422;
+ _y4m->vpx_bps = 16;
+ _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
+ _y4m->dst_c_dec_v = _y4m->src_c_dec_v;
+ _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h
+ + 2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
+ /*Natively supported: no conversion required.*/
+ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
+ _y4m->convert = y4m_convert_null;
+ }
+ } else if (strcmp(_y4m->chroma_type, "411") == 0) {
+ _y4m->src_c_dec_h = 4;
+ _y4m->dst_c_dec_h = 2;
+ _y4m->src_c_dec_v = 1;
+ _y4m->dst_c_dec_v = 2;
+ _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
/*Chroma filter required: read into the aux buf first.
We need to make two filter passes, so we need some extra space in the
aux buffer.*/
- _y4m->aux_buf_read_sz=2*_y4m->pic_w*_y4m->pic_h;
- _y4m->aux_buf_sz=_y4m->aux_buf_read_sz+((_y4m->pic_w+1)/2)*_y4m->pic_h;
- _y4m->convert=y4m_convert_444_420jpeg;
- }
- else if(strcmp(_y4m->chroma_type,"444alpha")==0){
- _y4m->src_c_dec_h=1;
- _y4m->dst_c_dec_h=2;
- _y4m->src_c_dec_v=1;
- _y4m->dst_c_dec_v=2;
- _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
- /*Chroma filter required: read into the aux buf first.
- We need to make two filter passes, so we need some extra space in the
- aux buffer.
- The extra plane also gets read into the aux buf.
- It will be discarded.*/
- _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=3*_y4m->pic_w*_y4m->pic_h;
- _y4m->convert=y4m_convert_444_420jpeg;
- }
- else if(strcmp(_y4m->chroma_type,"mono")==0){
- _y4m->src_c_dec_h=_y4m->src_c_dec_v=0;
- _y4m->dst_c_dec_h=_y4m->dst_c_dec_v=2;
- _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
+ _y4m->aux_buf_read_sz = 2 * ((_y4m->pic_w + 3) / 4) * _y4m->pic_h;
+ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz + ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
+ _y4m->convert = y4m_convert_411_420jpeg;
+ } else if (strcmp(_y4m->chroma_type, "444") == 0) {
+ _y4m->src_c_dec_h = 1;
+ _y4m->src_c_dec_v = 1;
+ if (only_420) {
+ _y4m->dst_c_dec_h = 2;
+ _y4m->dst_c_dec_v = 2;
+ _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
+ /*Chroma filter required: read into the aux buf first.
+ We need to make two filter passes, so we need some extra space in the
+ aux buffer.*/
+ _y4m->aux_buf_read_sz = 2 * _y4m->pic_w * _y4m->pic_h;
+ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz +
+ ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
+ _y4m->convert = y4m_convert_444_420jpeg;
+ } else {
+ _y4m->vpx_fmt = VPX_IMG_FMT_I444;
+ _y4m->vpx_bps = 24;
+ _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
+ _y4m->dst_c_dec_v = _y4m->src_c_dec_v;
+ _y4m->dst_buf_read_sz = 3 * _y4m->pic_w * _y4m->pic_h;
+ /*Natively supported: no conversion required.*/
+ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
+ _y4m->convert = y4m_convert_null;
+ }
+ } else if (strcmp(_y4m->chroma_type, "444alpha") == 0) {
+ _y4m->src_c_dec_h = 1;
+ _y4m->src_c_dec_v = 1;
+ if (only_420) {
+ _y4m->dst_c_dec_h = 2;
+ _y4m->dst_c_dec_v = 2;
+ _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
+ /*Chroma filter required: read into the aux buf first.
+ We need to make two filter passes, so we need some extra space in the
+ aux buffer.
+ The extra plane also gets read into the aux buf.
+ It will be discarded.*/
+ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 3 * _y4m->pic_w * _y4m->pic_h;
+ _y4m->convert = y4m_convert_444_420jpeg;
+ } else {
+ _y4m->vpx_fmt = VPX_IMG_FMT_444A;
+ _y4m->vpx_bps = 32;
+ _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
+ _y4m->dst_c_dec_v = _y4m->src_c_dec_v;
+ _y4m->dst_buf_read_sz = 4 * _y4m->pic_w * _y4m->pic_h;
+ /*Natively supported: no conversion required.*/
+ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
+ _y4m->convert = y4m_convert_null;
+ }
+ } else if (strcmp(_y4m->chroma_type, "mono") == 0) {
+ _y4m->src_c_dec_h = _y4m->src_c_dec_v = 0;
+ _y4m->dst_c_dec_h = _y4m->dst_c_dec_v = 2;
+ _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
/*No extra space required, but we need to clear the chroma planes.*/
- _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=0;
- _y4m->convert=y4m_convert_mono_420jpeg;
- }
- else{
- fprintf(stderr,"Unknown chroma sampling type: %s\n",_y4m->chroma_type);
+ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
+ _y4m->convert = y4m_convert_mono_420jpeg;
+ } else {
+ fprintf(stderr, "Unknown chroma sampling type: %s\n", _y4m->chroma_type);
return -1;
}
/*The size of the final frame buffers is always computed from the
destination chroma decimation type.*/
- _y4m->dst_buf_sz=_y4m->pic_w*_y4m->pic_h
- +2*((_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h)*
- ((_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v);
- _y4m->dst_buf=(unsigned char *)malloc(_y4m->dst_buf_sz);
- _y4m->aux_buf=(unsigned char *)malloc(_y4m->aux_buf_sz);
+ _y4m->dst_buf_sz = _y4m->pic_w * _y4m->pic_h
+ + 2 * ((_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h) *
+ ((_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v);
+ _y4m->dst_buf = (unsigned char *)malloc(_y4m->dst_buf_sz);
+ _y4m->aux_buf = (unsigned char *)malloc(_y4m->aux_buf_sz);
return 0;
}
-void y4m_input_close(y4m_input *_y4m){
+void y4m_input_close(y4m_input *_y4m) {
free(_y4m->dst_buf);
free(_y4m->aux_buf);
}
-int y4m_input_fetch_frame(y4m_input *_y4m,FILE *_fin,vpx_image_t *_img){
+int y4m_input_fetch_frame(y4m_input *_y4m, FILE *_fin, vpx_image_t *_img) {
char frame[6];
int pic_sz;
int c_w;
@@ -818,54 +855,55 @@ int y4m_input_fetch_frame(y4m_input *_y4m,FILE *_fin,vpx_image_t *_img){
int c_sz;
int ret;
/*Read and skip the frame header.*/
- ret=(int)fread(frame,1,6,_fin);
- if(ret<6)return 0;
- if(memcmp(frame,"FRAME",5)){
- fprintf(stderr,"Loss of framing in Y4M input data\n");
+ ret = (int)fread(frame, 1, 6, _fin);
+ if (ret < 6)return 0;
+ if (memcmp(frame, "FRAME", 5)) {
+ fprintf(stderr, "Loss of framing in Y4M input data\n");
return -1;
}
- if(frame[5]!='\n'){
+ if (frame[5] != '\n') {
char c;
int j;
- for(j=0;j<79&&fread(&c,1,1,_fin)&&c!='\n';j++);
- if(j==79){
- fprintf(stderr,"Error parsing Y4M frame header\n");
+ for (j = 0; j < 79 && fread(&c, 1, 1, _fin) && c != '\n'; j++);
+ if (j == 79) {
+ fprintf(stderr, "Error parsing Y4M frame header\n");
return -1;
}
}
/*Read the frame data that needs no conversion.*/
- if(fread(_y4m->dst_buf,1,_y4m->dst_buf_read_sz,_fin)!=_y4m->dst_buf_read_sz){
- fprintf(stderr,"Error reading Y4M frame data.\n");
+ if (fread(_y4m->dst_buf, 1, _y4m->dst_buf_read_sz, _fin) != _y4m->dst_buf_read_sz) {
+ fprintf(stderr, "Error reading Y4M frame data.\n");
return -1;
}
/*Read the frame data that does need conversion.*/
- if(fread(_y4m->aux_buf,1,_y4m->aux_buf_read_sz,_fin)!=_y4m->aux_buf_read_sz){
- fprintf(stderr,"Error reading Y4M frame data.\n");
+ if (fread(_y4m->aux_buf, 1, _y4m->aux_buf_read_sz, _fin) != _y4m->aux_buf_read_sz) {
+ fprintf(stderr, "Error reading Y4M frame data.\n");
return -1;
}
/*Now convert the just read frame.*/
- (*_y4m->convert)(_y4m,_y4m->dst_buf,_y4m->aux_buf);
+ (*_y4m->convert)(_y4m, _y4m->dst_buf, _y4m->aux_buf);
/*Fill in the frame buffer pointers.
We don't use vpx_img_wrap() because it forces padding for odd picture
sizes, which would require a separate fread call for every row.*/
- memset(_img,0,sizeof(*_img));
+ memset(_img, 0, sizeof(*_img));
/*Y4M has the planes in Y'CbCr order, which libvpx calls Y, U, and V.*/
- _img->fmt=IMG_FMT_I420;
- _img->w=_img->d_w=_y4m->pic_w;
- _img->h=_img->d_h=_y4m->pic_h;
- /*This is hard-coded to 4:2:0 for now, as that's all VP8 supports.*/
- _img->x_chroma_shift=1;
- _img->y_chroma_shift=1;
- _img->bps=12;
+ _img->fmt = _y4m->vpx_fmt;
+ _img->w = _img->d_w = _y4m->pic_w;
+ _img->h = _img->d_h = _y4m->pic_h;
+ _img->x_chroma_shift = _y4m->dst_c_dec_h >> 1;
+ _img->y_chroma_shift = _y4m->dst_c_dec_v >> 1;
+ _img->bps = _y4m->vpx_bps;
+
/*Set up the buffer pointers.*/
- pic_sz=_y4m->pic_w*_y4m->pic_h;
- c_w=(_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h;
- c_h=(_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v;
- c_sz=c_w*c_h;
- _img->stride[PLANE_Y]=_y4m->pic_w;
- _img->stride[PLANE_U]=_img->stride[PLANE_V]=c_w;
- _img->planes[PLANE_Y]=_y4m->dst_buf;
- _img->planes[PLANE_U]=_y4m->dst_buf+pic_sz;
- _img->planes[PLANE_V]=_y4m->dst_buf+pic_sz+c_sz;
+ pic_sz = _y4m->pic_w * _y4m->pic_h;
+ c_w = (_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h;
+ c_h = (_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v;
+ c_sz = c_w * c_h;
+ _img->stride[PLANE_Y] = _img->stride[PLANE_ALPHA] = _y4m->pic_w;
+ _img->stride[PLANE_U] = _img->stride[PLANE_V] = c_w;
+ _img->planes[PLANE_Y] = _y4m->dst_buf;
+ _img->planes[PLANE_U] = _y4m->dst_buf + pic_sz;
+ _img->planes[PLANE_V] = _y4m->dst_buf + pic_sz + c_sz;
+ _img->planes[PLANE_ALPHA] = _y4m->dst_buf + pic_sz + 2 * c_sz;
return 1;
}
diff --git a/libvpx/y4minput.h b/libvpx/y4minput.h
index 1a01bcd..b2a390c 100644
--- a/libvpx/y4minput.h
+++ b/libvpx/y4minput.h
@@ -23,11 +23,11 @@ typedef struct y4m_input y4m_input;
/*The function used to perform chroma conversion.*/
typedef void (*y4m_convert_func)(y4m_input *_y4m,
- unsigned char *_dst,unsigned char *_src);
+ unsigned char *_dst, unsigned char *_src);
-struct y4m_input{
+struct y4m_input {
int pic_w;
int pic_h;
int fps_n;
@@ -51,10 +51,13 @@ struct y4m_input{
y4m_convert_func convert;
unsigned char *dst_buf;
unsigned char *aux_buf;
+ enum vpx_img_fmt vpx_fmt;
+ int vpx_bps;
};
-int y4m_input_open(y4m_input *_y4m,FILE *_fin,char *_skip,int _nskip);
+int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip,
+ int only_420);
void y4m_input_close(y4m_input *_y4m);
-int y4m_input_fetch_frame(y4m_input *_y4m,FILE *_fin,vpx_image_t *img);
+int y4m_input_fetch_frame(y4m_input *_y4m, FILE *_fin, vpx_image_t *img);
#endif
diff --git a/mips-dspr2/libvpx_srcs.txt b/mips-dspr2/libvpx_srcs.txt
index 8b5b727..e74102e 100644
--- a/mips-dspr2/libvpx_srcs.txt
+++ b/mips-dspr2/libvpx_srcs.txt
@@ -4,7 +4,6 @@ CHANGELOG
libs.mk
vp8/common/alloccommon.c
vp8/common/alloccommon.h
-vp8/common/asm_com_offsets.c
vp8/common/blockd.c
vp8/common/blockd.h
vp8/common/coefupdateprobs.h
@@ -67,8 +66,8 @@ vp8/common/treecoder.c
vp8/common/treecoder.h
vp8/common/variance_c.c
vp8/common/variance.h
+vp8/common/vp8_asm_com_offsets.c
vp8/common/vp8_entropymodedata.h
-vp8/decoder/asm_dec_offsets.c
vp8/decoder/dboolhuff.c
vp8/decoder/dboolhuff.h
vp8/decoder/decodemv.c
@@ -81,7 +80,7 @@ vp8/decoder/onyxd_if.c
vp8/decoder/onyxd_int.h
vp8/decoder/threading.c
vp8/decoder/treereader.h
-vp8/encoder/asm_enc_offsets.c
+vp8/decoder/vp8_asm_dec_offsets.c
vp8/encoder/bitstream.c
vp8/encoder/bitstream.h
vp8/encoder/block.h
@@ -128,11 +127,91 @@ vp8/encoder/tokenize.c
vp8/encoder/tokenize.h
vp8/encoder/treewriter.c
vp8/encoder/treewriter.h
+vp8/encoder/vp8_asm_enc_offsets.c
vp8/vp8_common.mk
vp8/vp8_cx_iface.c
vp8/vp8cx.mk
vp8/vp8_dx_iface.c
vp8/vp8dx.mk
+vp9/common/generic/vp9_systemdependent.c
+vp9/common/vp9_alloccommon.c
+vp9/common/vp9_alloccommon.h
+vp9/common/vp9_asm_com_offsets.c
+vp9/common/vp9_blockd.h
+vp9/common/vp9_common.h
+vp9/common/vp9_convolve.c
+vp9/common/vp9_convolve.h
+vp9/common/vp9_debugmodes.c
+vp9/common/vp9_default_coef_probs.h
+vp9/common/vp9_entropy.c
+vp9/common/vp9_entropy.h
+vp9/common/vp9_entropymode.c
+vp9/common/vp9_entropymode.h
+vp9/common/vp9_entropymv.c
+vp9/common/vp9_entropymv.h
+vp9/common/vp9_enums.h
+vp9/common/vp9_extend.c
+vp9/common/vp9_extend.h
+vp9/common/vp9_filter.c
+vp9/common/vp9_filter.h
+vp9/common/vp9_findnearmv.c
+vp9/common/vp9_findnearmv.h
+vp9/common/vp9_idct.c
+vp9/common/vp9_idct.h
+vp9/common/vp9_loopfilter.c
+vp9/common/vp9_loopfilter_filters.c
+vp9/common/vp9_loopfilter.h
+vp9/common/vp9_mbpitch.c
+vp9/common/vp9_modecont.c
+vp9/common/vp9_modecontext.c
+vp9/common/vp9_modecont.h
+vp9/common/vp9_mv.h
+vp9/common/vp9_mvref_common.c
+vp9/common/vp9_mvref_common.h
+vp9/common/vp9_onyxc_int.h
+vp9/common/vp9_onyx.h
+vp9/common/vp9_ppflags.h
+vp9/common/vp9_pragmas.h
+vp9/common/vp9_pred_common.c
+vp9/common/vp9_pred_common.h
+vp9/common/vp9_quant_common.c
+vp9/common/vp9_quant_common.h
+vp9/common/vp9_reconinter.c
+vp9/common/vp9_reconinter.h
+vp9/common/vp9_reconintra.c
+vp9/common/vp9_reconintra.h
+vp9/common/vp9_rtcd.c
+vp9/common/vp9_rtcd_defs.sh
+vp9/common/vp9_sadmxn.h
+vp9/common/vp9_seg_common.c
+vp9/common/vp9_seg_common.h
+vp9/common/vp9_subpelvar.h
+vp9/common/vp9_systemdependent.h
+vp9/common/vp9_textblit.h
+vp9/common/vp9_tile_common.c
+vp9/common/vp9_tile_common.h
+vp9/common/vp9_treecoder.c
+vp9/common/vp9_treecoder.h
+vp9/decoder/vp9_asm_dec_offsets.c
+vp9/decoder/vp9_dboolhuff.c
+vp9/decoder/vp9_dboolhuff.h
+vp9/decoder/vp9_decodemv.c
+vp9/decoder/vp9_decodemv.h
+vp9/decoder/vp9_decodframe.c
+vp9/decoder/vp9_decodframe.h
+vp9/decoder/vp9_detokenize.c
+vp9/decoder/vp9_detokenize.h
+vp9/decoder/vp9_idct_blk.c
+vp9/decoder/vp9_idct_blk.h
+vp9/decoder/vp9_onyxd.h
+vp9/decoder/vp9_onyxd_if.c
+vp9/decoder/vp9_onyxd_int.h
+vp9/decoder/vp9_read_bit_buffer.h
+vp9/decoder/vp9_treereader.h
+vp9/vp9_common.mk
+vp9/vp9_dx_iface.c
+vp9/vp9dx.mk
+vp9/vp9_iface_common.h
vpx_config.c
vpx/internal/vpx_codec_internal.h
vpx_mem/include/vpx_mem_intrnl.h
@@ -140,17 +219,20 @@ vpx_mem/vpx_mem.c
vpx_mem/vpx_mem.h
vpx_mem/vpx_mem.mk
vpx_ports/asm_offsets.h
+vpx_ports/emmintrin_compat.h
vpx_ports/mem.h
+vpx_ports/vpx_once.h
vpx_ports/vpx_ports.mk
vpx_ports/vpx_timer.h
vpx_scale/generic/gen_scalers.c
-vpx_scale/generic/vpxscale.c
+vpx_scale/generic/vpx_scale.c
vpx_scale/generic/yv12config.c
vpx_scale/generic/yv12extend.c
-vpx_scale/generic/yv12extend_generic.h
-vpx_scale/scale_mode.h
-vpx_scale/vpxscale.h
+vpx_scale/vpx_scale_asm_offsets.c
+vpx_scale/vpx_scale.h
vpx_scale/vpx_scale.mk
+vpx_scale/vpx_scale_rtcd.c
+vpx_scale/vpx_scale_rtcd.sh
vpx_scale/yv12config.h
vpx/src/vpx_codec.c
vpx/src/vpx_decoder.c
diff --git a/mips-dspr2/vpx_rtcd.h b/mips-dspr2/vp8_rtcd.h
index 4e212f8..b9fc986 100644
--- a/mips-dspr2/vpx_rtcd.h
+++ b/mips-dspr2/vp8_rtcd.h
@@ -1,5 +1,5 @@
-#ifndef VPX_RTCD_
-#define VPX_RTCD_
+#ifndef VP8_RTCD_H_
+#define VP8_RTCD_H_
#ifdef RTCD_C
#define RTCD_EXTERN
@@ -7,7 +7,9 @@
#define RTCD_EXTERN extern
#endif
-#include "vp8/common/blockd.h"
+/*
+ * VP8
+ */
struct blockd;
struct macroblockd;
@@ -20,6 +22,9 @@ struct variance_vtable;
union int_mv;
struct yv12_buffer_config;
+void vp8_clear_system_state_c();
+#define vp8_clear_system_state vp8_clear_system_state_c
+
void vp8_dequantize_b_c(struct blockd*, short *dqc);
#define vp8_dequantize_b vp8_dequantize_b_c
@@ -97,7 +102,7 @@ void vp8_build_intra_predictors_mby_s_c(struct macroblockd *x, unsigned char * y
void vp8_build_intra_predictors_mbuv_s_c(struct macroblockd *x, unsigned char * uabove_row, unsigned char * vabove_row, unsigned char *uleft, unsigned char *vleft, int left_stride, unsigned char * upred_ptr, unsigned char * vpred_ptr, int pred_stride);
#define vp8_build_intra_predictors_mbuv_s vp8_build_intra_predictors_mbuv_s_c
-void vp8_intra4x4_predict_c(unsigned char *Above, unsigned char *yleft, int left_stride, B_PREDICTION_MODE b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
+void vp8_intra4x4_predict_c(unsigned char *Above, unsigned char *yleft, int left_stride, int b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
#define vp8_intra4x4_predict vp8_intra4x4_predict_c
void vp8_sixtap_predict16x16_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
@@ -302,82 +307,7 @@ void vp8_yv12_copy_partial_frame_c(struct yv12_buffer_config *src_ybc, struct yv
int vp8_denoiser_filter_c(struct yv12_buffer_config* mc_running_avg, struct yv12_buffer_config* running_avg, struct macroblock* signal, unsigned int motion_magnitude2, int y_offset, int uv_offset);
#define vp8_denoiser_filter vp8_denoiser_filter_c
-void vp8_horizontal_line_4_5_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_4_5_scale vp8_horizontal_line_4_5_scale_c
-
-void vp8_vertical_band_4_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_4_5_scale vp8_vertical_band_4_5_scale_c
-
-void vp8_last_vertical_band_4_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_4_5_scale vp8_last_vertical_band_4_5_scale_c
-
-void vp8_horizontal_line_2_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_2_3_scale vp8_horizontal_line_2_3_scale_c
-
-void vp8_vertical_band_2_3_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_3_scale vp8_vertical_band_2_3_scale_c
-
-void vp8_last_vertical_band_2_3_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_2_3_scale vp8_last_vertical_band_2_3_scale_c
-
-void vp8_horizontal_line_3_5_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_3_5_scale vp8_horizontal_line_3_5_scale_c
-
-void vp8_vertical_band_3_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_3_5_scale vp8_vertical_band_3_5_scale_c
-
-void vp8_last_vertical_band_3_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_3_5_scale vp8_last_vertical_band_3_5_scale_c
-
-void vp8_horizontal_line_3_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_3_4_scale vp8_horizontal_line_3_4_scale_c
-
-void vp8_vertical_band_3_4_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_3_4_scale vp8_vertical_band_3_4_scale_c
-
-void vp8_last_vertical_band_3_4_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_3_4_scale vp8_last_vertical_band_3_4_scale_c
-
-void vp8_horizontal_line_1_2_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_1_2_scale vp8_horizontal_line_1_2_scale_c
-
-void vp8_vertical_band_1_2_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_1_2_scale vp8_vertical_band_1_2_scale_c
-
-void vp8_last_vertical_band_1_2_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_1_2_scale vp8_last_vertical_band_1_2_scale_c
-
-void vp8_horizontal_line_5_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_5_4_scale vp8_horizontal_line_5_4_scale_c
-
-void vp8_vertical_band_5_4_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_5_4_scale vp8_vertical_band_5_4_scale_c
-
-void vp8_horizontal_line_5_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_5_3_scale vp8_horizontal_line_5_3_scale_c
-
-void vp8_vertical_band_5_3_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_5_3_scale vp8_vertical_band_5_3_scale_c
-
-void vp8_horizontal_line_2_1_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_2_1_scale vp8_horizontal_line_2_1_scale_c
-
-void vp8_vertical_band_2_1_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_1_scale vp8_vertical_band_2_1_scale_c
-
-void vp8_vertical_band_2_1_scale_i_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_1_scale_i vp8_vertical_band_2_1_scale_i_c
-
-void vp8_yv12_extend_frame_borders_c(struct yv12_buffer_config *ybf);
-#define vp8_yv12_extend_frame_borders vp8_yv12_extend_frame_borders_c
-
-void vp8_yv12_copy_frame_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
-#define vp8_yv12_copy_frame vp8_yv12_copy_frame_c
-
-void vp8_yv12_copy_y_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
-#define vp8_yv12_copy_y vp8_yv12_copy_y_c
-
-void vpx_rtcd(void);
+void vp8_rtcd(void);
#include "vpx_config.h"
#ifdef RTCD_C
diff --git a/mips-dspr2/vp9_rtcd.h b/mips-dspr2/vp9_rtcd.h
new file mode 100644
index 0000000..2905eae
--- /dev/null
+++ b/mips-dspr2/vp9_rtcd.h
@@ -0,0 +1,191 @@
+#ifndef VP9_RTCD_H_
+#define VP9_RTCD_H_
+
+#ifdef RTCD_C
+#define RTCD_EXTERN
+#else
+#define RTCD_EXTERN extern
+#endif
+
+/*
+ * VP9
+ */
+
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_enums.h"
+
+struct loop_filter_info;
+struct macroblockd;
+struct loop_filter_info;
+
+/* Encoder forward decls */
+struct macroblock;
+struct vp9_variance_vtable;
+
+#define DEC_MVCOSTS int *mvjcost, int *mvcost[2]
+union int_mv;
+struct yv12_buffer_config;
+
+void vp9_idct_add_16x16_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add_16x16 vp9_idct_add_16x16_c
+
+void vp9_idct_add_8x8_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add_8x8 vp9_idct_add_8x8_c
+
+void vp9_idct_add_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add vp9_idct_add_c
+
+void vp9_idct_add_32x32_c(int16_t *q, uint8_t *dst, int stride, int eob);
+#define vp9_idct_add_32x32 vp9_idct_add_32x32_c
+
+void vp9_copy_mem16x16_c(const uint8_t *src, int src_pitch, uint8_t *dst, int dst_pitch);
+void vp9_copy_mem16x16_dspr2(const uint8_t *src, int src_pitch, uint8_t *dst, int dst_pitch);
+#define vp9_copy_mem16x16 vp9_copy_mem16x16_dspr2
+
+void vp9_copy_mem8x8_c(const uint8_t *src, int src_pitch, uint8_t *dst, int dst_pitch);
+void vp9_copy_mem8x8_dspr2(const uint8_t *src, int src_pitch, uint8_t *dst, int dst_pitch);
+#define vp9_copy_mem8x8 vp9_copy_mem8x8_dspr2
+
+void vp9_copy_mem8x4_c(const uint8_t *src, int src_pitch, uint8_t *dst, int dst_pitch);
+#define vp9_copy_mem8x4 vp9_copy_mem8x4_c
+
+void vp9_build_intra_predictors_c(uint8_t *src, int src_stride, uint8_t *pred, int y_stride, int mode, int bw, int bh, int up_available, int left_available, int right_available);
+#define vp9_build_intra_predictors vp9_build_intra_predictors_c
+
+void vp9_build_intra_predictors_sby_s_c(struct macroblockd *x, enum BLOCK_SIZE_TYPE bsize);
+#define vp9_build_intra_predictors_sby_s vp9_build_intra_predictors_sby_s_c
+
+void vp9_build_intra_predictors_sbuv_s_c(struct macroblockd *x, enum BLOCK_SIZE_TYPE bsize);
+#define vp9_build_intra_predictors_sbuv_s vp9_build_intra_predictors_sbuv_s_c
+
+void vp9_intra4x4_predict_c(struct macroblockd *xd, int block, enum BLOCK_SIZE_TYPE bsize, int b_mode, uint8_t *predictor, int pre_stride);
+#define vp9_intra4x4_predict vp9_intra4x4_predict_c
+
+void vp9_add_constant_residual_8x8_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_8x8 vp9_add_constant_residual_8x8_c
+
+void vp9_add_constant_residual_16x16_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_16x16 vp9_add_constant_residual_16x16_c
+
+void vp9_add_constant_residual_32x32_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_32x32 vp9_add_constant_residual_32x32_c
+
+void vp9_mb_lpf_vertical_edge_w_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
+#define vp9_mb_lpf_vertical_edge_w vp9_mb_lpf_vertical_edge_w_c
+
+void vp9_mbloop_filter_vertical_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_mbloop_filter_vertical_edge vp9_mbloop_filter_vertical_edge_c
+
+void vp9_loop_filter_vertical_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_loop_filter_vertical_edge vp9_loop_filter_vertical_edge_c
+
+void vp9_mb_lpf_horizontal_edge_w_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
+#define vp9_mb_lpf_horizontal_edge_w vp9_mb_lpf_horizontal_edge_w_c
+
+void vp9_mbloop_filter_horizontal_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_mbloop_filter_horizontal_edge vp9_mbloop_filter_horizontal_edge_c
+
+void vp9_loop_filter_horizontal_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_loop_filter_horizontal_edge vp9_loop_filter_horizontal_edge_c
+
+void vp9_blend_mb_inner_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_mb_inner vp9_blend_mb_inner_c
+
+void vp9_blend_mb_outer_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_mb_outer vp9_blend_mb_outer_c
+
+void vp9_blend_b_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_b vp9_blend_b_c
+
+void vp9_convolve8_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8 vp9_convolve8_c
+
+void vp9_convolve8_horiz_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_horiz vp9_convolve8_horiz_c
+
+void vp9_convolve8_vert_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_vert vp9_convolve8_vert_c
+
+void vp9_convolve8_avg_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg vp9_convolve8_avg_c
+
+void vp9_convolve8_avg_horiz_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg_horiz vp9_convolve8_avg_horiz_c
+
+void vp9_convolve8_avg_vert_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg_vert vp9_convolve8_avg_vert_c
+
+void vp9_short_idct4x4_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct4x4_1_add vp9_short_idct4x4_1_add_c
+
+void vp9_short_idct4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct4x4_add vp9_short_idct4x4_add_c
+
+void vp9_short_idct8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct8x8_add vp9_short_idct8x8_add_c
+
+void vp9_short_idct10_8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct10_8x8_add vp9_short_idct10_8x8_add_c
+
+void vp9_short_idct1_8x8_c(int16_t *input, int16_t *output);
+#define vp9_short_idct1_8x8 vp9_short_idct1_8x8_c
+
+void vp9_short_idct16x16_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct16x16_add vp9_short_idct16x16_add_c
+
+void vp9_short_idct10_16x16_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct10_16x16_add vp9_short_idct10_16x16_add_c
+
+void vp9_short_idct1_16x16_c(int16_t *input, int16_t *output);
+#define vp9_short_idct1_16x16 vp9_short_idct1_16x16_c
+
+void vp9_short_idct32x32_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct32x32_add vp9_short_idct32x32_add_c
+
+void vp9_short_idct1_32x32_c(int16_t *input, int16_t *output);
+#define vp9_short_idct1_32x32 vp9_short_idct1_32x32_c
+
+void vp9_short_idct10_32x32_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct10_32x32_add vp9_short_idct10_32x32_add_c
+
+void vp9_short_iht4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride, int tx_type);
+#define vp9_short_iht4x4_add vp9_short_iht4x4_add_c
+
+void vp9_short_iht8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride, int tx_type);
+#define vp9_short_iht8x8_add vp9_short_iht8x8_add_c
+
+void vp9_short_iht16x16_add_c(int16_t *input, uint8_t *output, int pitch, int tx_type);
+#define vp9_short_iht16x16_add vp9_short_iht16x16_add_c
+
+void vp9_idct4_1d_c(int16_t *input, int16_t *output);
+#define vp9_idct4_1d vp9_idct4_1d_c
+
+void vp9_dc_only_idct_add_c(int input_dc, uint8_t *pred_ptr, uint8_t *dst_ptr, int pitch, int stride);
+#define vp9_dc_only_idct_add vp9_dc_only_idct_add_c
+
+void vp9_short_iwalsh4x4_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_iwalsh4x4_1_add vp9_short_iwalsh4x4_1_add_c
+
+void vp9_short_iwalsh4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_iwalsh4x4_add vp9_short_iwalsh4x4_add_c
+
+unsigned int vp9_sad32x3_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, int max_sad);
+#define vp9_sad32x3 vp9_sad32x3_c
+
+unsigned int vp9_sad3x32_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, int max_sad);
+#define vp9_sad3x32 vp9_sad3x32_c
+
+void vp9_rtcd(void);
+#include "vpx_config.h"
+
+#ifdef RTCD_C
+static void setup_rtcd_internal(void)
+{
+
+#if HAVE_DSPR2
+void dsputil_static_init();
+dsputil_static_init();
+#endif
+}
+#endif
+#endif
diff --git a/mips-dspr2/vpx_config.c b/mips-dspr2/vpx_config.c
index 42fc4cb..cf19239 100644
--- a/mips-dspr2/vpx_config.c
+++ b/mips-dspr2/vpx_config.c
@@ -5,5 +5,5 @@
/* tree. An additional intellectual property rights grant can be found */
/* in the file PATENTS. All contributing project authors may */
/* be found in the AUTHORS file in the root of the source tree. */
-static const char* const cfg = "--force-target=mips32-android-gcc --disable-runtime-cpu-detect --sdk-path=/usr/local/google/home/johannkoenig/android-ndk --disable-examples --disable-docs --enable-dspr2 --enable-realtime-only";
+static const char* const cfg = "--force-target=mips32-android-gcc --disable-runtime-cpu-detect --sdk-path=/usr/local/google/home/johannkoenig/android-ndk --disable-vp9-encoder --enable-dspr2 --disable-examples --disable-docs --enable-realtime-only";
const char *vpx_codec_build_config(void) {return cfg;}
diff --git a/mips-dspr2/vpx_config.h b/mips-dspr2/vpx_config.h
index b03bd7c..0ca4657 100644
--- a/mips-dspr2/vpx_config.h
+++ b/mips-dspr2/vpx_config.h
@@ -9,6 +9,7 @@
#ifndef VPX_CONFIG_H
#define VPX_CONFIG_H
#define RESTRICT
+#define INLINE __inline__ __attribute__((always_inline))
#define ARCH_ARM 0
#define ARCH_MIPS 1
#define ARCH_X86 0
@@ -34,7 +35,7 @@
#define HAVE_SYS_MMAN_H 1
#define HAVE_UNISTD_H 1
#define CONFIG_EXTERNAL_BUILD 0
-#define CONFIG_INSTALL_DOCS 1
+#define CONFIG_INSTALL_DOCS 0
#define CONFIG_INSTALL_BINS 1
#define CONFIG_INSTALL_LIBS 1
#define CONFIG_INSTALL_SRCS 0
@@ -61,7 +62,10 @@
#define CONFIG_INTERNAL_STATS 0
#define CONFIG_VP8_ENCODER 1
#define CONFIG_VP8_DECODER 1
+#define CONFIG_VP9_ENCODER 0
+#define CONFIG_VP9_DECODER 1
#define CONFIG_VP8 1
+#define CONFIG_VP9 1
#define CONFIG_ENCODERS 1
#define CONFIG_DECODERS 1
#define CONFIG_STATIC_MSVCRT 0
@@ -77,4 +81,11 @@
#define CONFIG_UNIT_TESTS 0
#define CONFIG_MULTI_RES_ENCODING 0
#define CONFIG_TEMPORAL_DENOISING 1
+#define CONFIG_EXPERIMENTAL 0
+#define CONFIG_DECRYPT 0
+#define CONFIG_ONESHOTQ 0
+#define CONFIG_MULTIPLE_ARF 0
+#define CONFIG_NON420 0
+#define CONFIG_ALPHA 0
+#define CONFIG_BALANCED_COEFTREE 0
#endif /* VPX_CONFIG_H */
diff --git a/mips-dspr2/vpx_scale_rtcd.h b/mips-dspr2/vpx_scale_rtcd.h
new file mode 100644
index 0000000..7af466a
--- /dev/null
+++ b/mips-dspr2/vpx_scale_rtcd.h
@@ -0,0 +1,58 @@
+#ifndef VPX_SCALE_RTCD_H_
+#define VPX_SCALE_RTCD_H_
+
+#ifdef RTCD_C
+#define RTCD_EXTERN
+#else
+#define RTCD_EXTERN extern
+#endif
+
+struct yv12_buffer_config;
+
+void vp8_horizontal_line_5_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_5_4_scale vp8_horizontal_line_5_4_scale_c
+
+void vp8_vertical_band_5_4_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_5_4_scale vp8_vertical_band_5_4_scale_c
+
+void vp8_horizontal_line_5_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_5_3_scale vp8_horizontal_line_5_3_scale_c
+
+void vp8_vertical_band_5_3_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_5_3_scale vp8_vertical_band_5_3_scale_c
+
+void vp8_horizontal_line_2_1_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_2_1_scale vp8_horizontal_line_2_1_scale_c
+
+void vp8_vertical_band_2_1_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_2_1_scale vp8_vertical_band_2_1_scale_c
+
+void vp8_vertical_band_2_1_scale_i_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_2_1_scale_i vp8_vertical_band_2_1_scale_i_c
+
+void vp8_yv12_extend_frame_borders_c(struct yv12_buffer_config *ybf);
+#define vp8_yv12_extend_frame_borders vp8_yv12_extend_frame_borders_c
+
+void vp8_yv12_copy_frame_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
+#define vp8_yv12_copy_frame vp8_yv12_copy_frame_c
+
+void vp8_yv12_copy_y_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
+#define vp8_yv12_copy_y vp8_yv12_copy_y_c
+
+void vp9_extend_frame_borders_c(struct yv12_buffer_config *ybf, int subsampling_x, int subsampling_y);
+#define vp9_extend_frame_borders vp9_extend_frame_borders_c
+
+void vpx_scale_rtcd(void);
+#include "vpx_config.h"
+
+#ifdef RTCD_C
+static void setup_rtcd_internal(void)
+{
+
+#if HAVE_DSPR2
+void dsputil_static_init();
+dsputil_static_init();
+#endif
+}
+#endif
+#endif
diff --git a/mips-dspr2/vpx_version.h b/mips-dspr2/vpx_version.h
index 663dd49..512851c 100644
--- a/mips-dspr2/vpx_version.h
+++ b/mips-dspr2/vpx_version.h
@@ -1,7 +1,7 @@
#define VERSION_MAJOR 1
-#define VERSION_MINOR 1
+#define VERSION_MINOR 2
#define VERSION_PATCH 0
#define VERSION_EXTRA ""
#define VERSION_PACKED ((VERSION_MAJOR<<16)|(VERSION_MINOR<<8)|(VERSION_PATCH))
-#define VERSION_STRING_NOSP "v1.1.0"
-#define VERSION_STRING " v1.1.0"
+#define VERSION_STRING_NOSP "v1.2.0"
+#define VERSION_STRING " v1.2.0"
diff --git a/mips/.bins b/mips/.bins
deleted file mode 100644
index e69de29..0000000
--- a/mips/.bins
+++ /dev/null
diff --git a/mips/.docs b/mips/.docs
deleted file mode 100644
index e69de29..0000000
--- a/mips/.docs
+++ /dev/null
diff --git a/mips/.libs b/mips/.libs
deleted file mode 100644
index e69de29..0000000
--- a/mips/.libs
+++ /dev/null
diff --git a/mips/libvpx_srcs.txt b/mips/libvpx_srcs.txt
index 5756427..8c1ec80 100644
--- a/mips/libvpx_srcs.txt
+++ b/mips/libvpx_srcs.txt
@@ -4,7 +4,6 @@ CHANGELOG
libs.mk
vp8/common/alloccommon.c
vp8/common/alloccommon.h
-vp8/common/asm_com_offsets.c
vp8/common/blockd.c
vp8/common/blockd.h
vp8/common/coefupdateprobs.h
@@ -61,8 +60,8 @@ vp8/common/treecoder.c
vp8/common/treecoder.h
vp8/common/variance_c.c
vp8/common/variance.h
+vp8/common/vp8_asm_com_offsets.c
vp8/common/vp8_entropymodedata.h
-vp8/decoder/asm_dec_offsets.c
vp8/decoder/dboolhuff.c
vp8/decoder/dboolhuff.h
vp8/decoder/decodemv.c
@@ -75,7 +74,7 @@ vp8/decoder/onyxd_if.c
vp8/decoder/onyxd_int.h
vp8/decoder/threading.c
vp8/decoder/treereader.h
-vp8/encoder/asm_enc_offsets.c
+vp8/decoder/vp8_asm_dec_offsets.c
vp8/encoder/bitstream.c
vp8/encoder/bitstream.h
vp8/encoder/block.h
@@ -122,11 +121,91 @@ vp8/encoder/tokenize.c
vp8/encoder/tokenize.h
vp8/encoder/treewriter.c
vp8/encoder/treewriter.h
+vp8/encoder/vp8_asm_enc_offsets.c
vp8/vp8_common.mk
vp8/vp8_cx_iface.c
vp8/vp8cx.mk
vp8/vp8_dx_iface.c
vp8/vp8dx.mk
+vp9/common/generic/vp9_systemdependent.c
+vp9/common/vp9_alloccommon.c
+vp9/common/vp9_alloccommon.h
+vp9/common/vp9_asm_com_offsets.c
+vp9/common/vp9_blockd.h
+vp9/common/vp9_common.h
+vp9/common/vp9_convolve.c
+vp9/common/vp9_convolve.h
+vp9/common/vp9_debugmodes.c
+vp9/common/vp9_default_coef_probs.h
+vp9/common/vp9_entropy.c
+vp9/common/vp9_entropy.h
+vp9/common/vp9_entropymode.c
+vp9/common/vp9_entropymode.h
+vp9/common/vp9_entropymv.c
+vp9/common/vp9_entropymv.h
+vp9/common/vp9_enums.h
+vp9/common/vp9_extend.c
+vp9/common/vp9_extend.h
+vp9/common/vp9_filter.c
+vp9/common/vp9_filter.h
+vp9/common/vp9_findnearmv.c
+vp9/common/vp9_findnearmv.h
+vp9/common/vp9_idct.c
+vp9/common/vp9_idct.h
+vp9/common/vp9_loopfilter.c
+vp9/common/vp9_loopfilter_filters.c
+vp9/common/vp9_loopfilter.h
+vp9/common/vp9_mbpitch.c
+vp9/common/vp9_modecont.c
+vp9/common/vp9_modecontext.c
+vp9/common/vp9_modecont.h
+vp9/common/vp9_mv.h
+vp9/common/vp9_mvref_common.c
+vp9/common/vp9_mvref_common.h
+vp9/common/vp9_onyxc_int.h
+vp9/common/vp9_onyx.h
+vp9/common/vp9_ppflags.h
+vp9/common/vp9_pragmas.h
+vp9/common/vp9_pred_common.c
+vp9/common/vp9_pred_common.h
+vp9/common/vp9_quant_common.c
+vp9/common/vp9_quant_common.h
+vp9/common/vp9_reconinter.c
+vp9/common/vp9_reconinter.h
+vp9/common/vp9_reconintra.c
+vp9/common/vp9_reconintra.h
+vp9/common/vp9_rtcd.c
+vp9/common/vp9_rtcd_defs.sh
+vp9/common/vp9_sadmxn.h
+vp9/common/vp9_seg_common.c
+vp9/common/vp9_seg_common.h
+vp9/common/vp9_subpelvar.h
+vp9/common/vp9_systemdependent.h
+vp9/common/vp9_textblit.h
+vp9/common/vp9_tile_common.c
+vp9/common/vp9_tile_common.h
+vp9/common/vp9_treecoder.c
+vp9/common/vp9_treecoder.h
+vp9/decoder/vp9_asm_dec_offsets.c
+vp9/decoder/vp9_dboolhuff.c
+vp9/decoder/vp9_dboolhuff.h
+vp9/decoder/vp9_decodemv.c
+vp9/decoder/vp9_decodemv.h
+vp9/decoder/vp9_decodframe.c
+vp9/decoder/vp9_decodframe.h
+vp9/decoder/vp9_detokenize.c
+vp9/decoder/vp9_detokenize.h
+vp9/decoder/vp9_idct_blk.c
+vp9/decoder/vp9_idct_blk.h
+vp9/decoder/vp9_onyxd.h
+vp9/decoder/vp9_onyxd_if.c
+vp9/decoder/vp9_onyxd_int.h
+vp9/decoder/vp9_read_bit_buffer.h
+vp9/decoder/vp9_treereader.h
+vp9/vp9_common.mk
+vp9/vp9_dx_iface.c
+vp9/vp9dx.mk
+vp9/vp9_iface_common.h
vpx_config.c
vpx/internal/vpx_codec_internal.h
vpx_mem/include/vpx_mem_intrnl.h
@@ -134,17 +213,20 @@ vpx_mem/vpx_mem.c
vpx_mem/vpx_mem.h
vpx_mem/vpx_mem.mk
vpx_ports/asm_offsets.h
+vpx_ports/emmintrin_compat.h
vpx_ports/mem.h
+vpx_ports/vpx_once.h
vpx_ports/vpx_ports.mk
vpx_ports/vpx_timer.h
vpx_scale/generic/gen_scalers.c
-vpx_scale/generic/vpxscale.c
+vpx_scale/generic/vpx_scale.c
vpx_scale/generic/yv12config.c
vpx_scale/generic/yv12extend.c
-vpx_scale/generic/yv12extend_generic.h
-vpx_scale/scale_mode.h
-vpx_scale/vpxscale.h
+vpx_scale/vpx_scale_asm_offsets.c
+vpx_scale/vpx_scale.h
vpx_scale/vpx_scale.mk
+vpx_scale/vpx_scale_rtcd.c
+vpx_scale/vpx_scale_rtcd.sh
vpx_scale/yv12config.h
vpx/src/vpx_codec.c
vpx/src/vpx_decoder.c
diff --git a/mips/vpx_rtcd.h b/mips/vp8_rtcd.h
index fe84d62..5f63677 100644
--- a/mips/vpx_rtcd.h
+++ b/mips/vp8_rtcd.h
@@ -1,5 +1,5 @@
-#ifndef VPX_RTCD_
-#define VPX_RTCD_
+#ifndef VP8_RTCD_H_
+#define VP8_RTCD_H_
#ifdef RTCD_C
#define RTCD_EXTERN
@@ -7,7 +7,9 @@
#define RTCD_EXTERN extern
#endif
-#include "vp8/common/blockd.h"
+/*
+ * VP8
+ */
struct blockd;
struct macroblockd;
@@ -20,6 +22,9 @@ struct variance_vtable;
union int_mv;
struct yv12_buffer_config;
+void vp8_clear_system_state_c();
+#define vp8_clear_system_state vp8_clear_system_state_c
+
void vp8_dequantize_b_c(struct blockd*, short *dqc);
#define vp8_dequantize_b vp8_dequantize_b_c
@@ -83,7 +88,7 @@ void vp8_build_intra_predictors_mby_s_c(struct macroblockd *x, unsigned char * y
void vp8_build_intra_predictors_mbuv_s_c(struct macroblockd *x, unsigned char * uabove_row, unsigned char * vabove_row, unsigned char *uleft, unsigned char *vleft, int left_stride, unsigned char * upred_ptr, unsigned char * vpred_ptr, int pred_stride);
#define vp8_build_intra_predictors_mbuv_s vp8_build_intra_predictors_mbuv_s_c
-void vp8_intra4x4_predict_c(unsigned char *Above, unsigned char *yleft, int left_stride, B_PREDICTION_MODE b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
+void vp8_intra4x4_predict_c(unsigned char *Above, unsigned char *yleft, int left_stride, int b_mode, unsigned char *dst, int dst_stride, unsigned char top_left);
#define vp8_intra4x4_predict vp8_intra4x4_predict_c
void vp8_sixtap_predict16x16_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
@@ -284,82 +289,7 @@ void vp8_yv12_copy_partial_frame_c(struct yv12_buffer_config *src_ybc, struct yv
int vp8_denoiser_filter_c(struct yv12_buffer_config* mc_running_avg, struct yv12_buffer_config* running_avg, struct macroblock* signal, unsigned int motion_magnitude2, int y_offset, int uv_offset);
#define vp8_denoiser_filter vp8_denoiser_filter_c
-void vp8_horizontal_line_4_5_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_4_5_scale vp8_horizontal_line_4_5_scale_c
-
-void vp8_vertical_band_4_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_4_5_scale vp8_vertical_band_4_5_scale_c
-
-void vp8_last_vertical_band_4_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_4_5_scale vp8_last_vertical_band_4_5_scale_c
-
-void vp8_horizontal_line_2_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_2_3_scale vp8_horizontal_line_2_3_scale_c
-
-void vp8_vertical_band_2_3_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_3_scale vp8_vertical_band_2_3_scale_c
-
-void vp8_last_vertical_band_2_3_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_2_3_scale vp8_last_vertical_band_2_3_scale_c
-
-void vp8_horizontal_line_3_5_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_3_5_scale vp8_horizontal_line_3_5_scale_c
-
-void vp8_vertical_band_3_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_3_5_scale vp8_vertical_band_3_5_scale_c
-
-void vp8_last_vertical_band_3_5_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_3_5_scale vp8_last_vertical_band_3_5_scale_c
-
-void vp8_horizontal_line_3_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_3_4_scale vp8_horizontal_line_3_4_scale_c
-
-void vp8_vertical_band_3_4_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_3_4_scale vp8_vertical_band_3_4_scale_c
-
-void vp8_last_vertical_band_3_4_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_3_4_scale vp8_last_vertical_band_3_4_scale_c
-
-void vp8_horizontal_line_1_2_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_1_2_scale vp8_horizontal_line_1_2_scale_c
-
-void vp8_vertical_band_1_2_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_1_2_scale vp8_vertical_band_1_2_scale_c
-
-void vp8_last_vertical_band_1_2_scale_c(unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_last_vertical_band_1_2_scale vp8_last_vertical_band_1_2_scale_c
-
-void vp8_horizontal_line_5_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_5_4_scale vp8_horizontal_line_5_4_scale_c
-
-void vp8_vertical_band_5_4_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_5_4_scale vp8_vertical_band_5_4_scale_c
-
-void vp8_horizontal_line_5_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_5_3_scale vp8_horizontal_line_5_3_scale_c
-
-void vp8_vertical_band_5_3_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_5_3_scale vp8_vertical_band_5_3_scale_c
-
-void vp8_horizontal_line_2_1_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
-#define vp8_horizontal_line_2_1_scale vp8_horizontal_line_2_1_scale_c
-
-void vp8_vertical_band_2_1_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_1_scale vp8_vertical_band_2_1_scale_c
-
-void vp8_vertical_band_2_1_scale_i_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
-#define vp8_vertical_band_2_1_scale_i vp8_vertical_band_2_1_scale_i_c
-
-void vp8_yv12_extend_frame_borders_c(struct yv12_buffer_config *ybf);
-#define vp8_yv12_extend_frame_borders vp8_yv12_extend_frame_borders_c
-
-void vp8_yv12_copy_frame_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
-#define vp8_yv12_copy_frame vp8_yv12_copy_frame_c
-
-void vp8_yv12_copy_y_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
-#define vp8_yv12_copy_y vp8_yv12_copy_y_c
-
-void vpx_rtcd(void);
+void vp8_rtcd(void);
#include "vpx_config.h"
#ifdef RTCD_C
diff --git a/mips/vp9_rtcd.h b/mips/vp9_rtcd.h
new file mode 100644
index 0000000..1d7b4d2
--- /dev/null
+++ b/mips/vp9_rtcd.h
@@ -0,0 +1,189 @@
+#ifndef VP9_RTCD_H_
+#define VP9_RTCD_H_
+
+#ifdef RTCD_C
+#define RTCD_EXTERN
+#else
+#define RTCD_EXTERN extern
+#endif
+
+/*
+ * VP9
+ */
+
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_enums.h"
+
+struct loop_filter_info;
+struct macroblockd;
+struct loop_filter_info;
+
+/* Encoder forward decls */
+struct macroblock;
+struct vp9_variance_vtable;
+
+#define DEC_MVCOSTS int *mvjcost, int *mvcost[2]
+union int_mv;
+struct yv12_buffer_config;
+
+void vp9_idct_add_16x16_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add_16x16 vp9_idct_add_16x16_c
+
+void vp9_idct_add_8x8_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add_8x8 vp9_idct_add_8x8_c
+
+void vp9_idct_add_c(int16_t *input, uint8_t *dest, int stride, int eob);
+#define vp9_idct_add vp9_idct_add_c
+
+void vp9_idct_add_32x32_c(int16_t *q, uint8_t *dst, int stride, int eob);
+#define vp9_idct_add_32x32 vp9_idct_add_32x32_c
+
+void vp9_copy_mem16x16_c(const uint8_t *src, int src_pitch, uint8_t *dst, int dst_pitch);
+#define vp9_copy_mem16x16 vp9_copy_mem16x16_c
+
+void vp9_copy_mem8x8_c(const uint8_t *src, int src_pitch, uint8_t *dst, int dst_pitch);
+#define vp9_copy_mem8x8 vp9_copy_mem8x8_c
+
+void vp9_copy_mem8x4_c(const uint8_t *src, int src_pitch, uint8_t *dst, int dst_pitch);
+#define vp9_copy_mem8x4 vp9_copy_mem8x4_c
+
+void vp9_build_intra_predictors_c(uint8_t *src, int src_stride, uint8_t *pred, int y_stride, int mode, int bw, int bh, int up_available, int left_available, int right_available);
+#define vp9_build_intra_predictors vp9_build_intra_predictors_c
+
+void vp9_build_intra_predictors_sby_s_c(struct macroblockd *x, enum BLOCK_SIZE_TYPE bsize);
+#define vp9_build_intra_predictors_sby_s vp9_build_intra_predictors_sby_s_c
+
+void vp9_build_intra_predictors_sbuv_s_c(struct macroblockd *x, enum BLOCK_SIZE_TYPE bsize);
+#define vp9_build_intra_predictors_sbuv_s vp9_build_intra_predictors_sbuv_s_c
+
+void vp9_intra4x4_predict_c(struct macroblockd *xd, int block, enum BLOCK_SIZE_TYPE bsize, int b_mode, uint8_t *predictor, int pre_stride);
+#define vp9_intra4x4_predict vp9_intra4x4_predict_c
+
+void vp9_add_constant_residual_8x8_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_8x8 vp9_add_constant_residual_8x8_c
+
+void vp9_add_constant_residual_16x16_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_16x16 vp9_add_constant_residual_16x16_c
+
+void vp9_add_constant_residual_32x32_c(const int16_t diff, uint8_t *dest, int stride);
+#define vp9_add_constant_residual_32x32 vp9_add_constant_residual_32x32_c
+
+void vp9_mb_lpf_vertical_edge_w_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
+#define vp9_mb_lpf_vertical_edge_w vp9_mb_lpf_vertical_edge_w_c
+
+void vp9_mbloop_filter_vertical_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_mbloop_filter_vertical_edge vp9_mbloop_filter_vertical_edge_c
+
+void vp9_loop_filter_vertical_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_loop_filter_vertical_edge vp9_loop_filter_vertical_edge_c
+
+void vp9_mb_lpf_horizontal_edge_w_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
+#define vp9_mb_lpf_horizontal_edge_w vp9_mb_lpf_horizontal_edge_w_c
+
+void vp9_mbloop_filter_horizontal_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_mbloop_filter_horizontal_edge vp9_mbloop_filter_horizontal_edge_c
+
+void vp9_loop_filter_horizontal_edge_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count);
+#define vp9_loop_filter_horizontal_edge vp9_loop_filter_horizontal_edge_c
+
+void vp9_blend_mb_inner_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_mb_inner vp9_blend_mb_inner_c
+
+void vp9_blend_mb_outer_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_mb_outer vp9_blend_mb_outer_c
+
+void vp9_blend_b_c(uint8_t *y, uint8_t *u, uint8_t *v, int y1, int u1, int v1, int alpha, int stride);
+#define vp9_blend_b vp9_blend_b_c
+
+void vp9_convolve8_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8 vp9_convolve8_c
+
+void vp9_convolve8_horiz_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_horiz vp9_convolve8_horiz_c
+
+void vp9_convolve8_vert_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_vert vp9_convolve8_vert_c
+
+void vp9_convolve8_avg_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg vp9_convolve8_avg_c
+
+void vp9_convolve8_avg_horiz_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg_horiz vp9_convolve8_avg_horiz_c
+
+void vp9_convolve8_avg_vert_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
+#define vp9_convolve8_avg_vert vp9_convolve8_avg_vert_c
+
+void vp9_short_idct4x4_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct4x4_1_add vp9_short_idct4x4_1_add_c
+
+void vp9_short_idct4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct4x4_add vp9_short_idct4x4_add_c
+
+void vp9_short_idct8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct8x8_add vp9_short_idct8x8_add_c
+
+void vp9_short_idct10_8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct10_8x8_add vp9_short_idct10_8x8_add_c
+
+void vp9_short_idct1_8x8_c(int16_t *input, int16_t *output);
+#define vp9_short_idct1_8x8 vp9_short_idct1_8x8_c
+
+void vp9_short_idct16x16_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct16x16_add vp9_short_idct16x16_add_c
+
+void vp9_short_idct10_16x16_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct10_16x16_add vp9_short_idct10_16x16_add_c
+
+void vp9_short_idct1_16x16_c(int16_t *input, int16_t *output);
+#define vp9_short_idct1_16x16 vp9_short_idct1_16x16_c
+
+void vp9_short_idct32x32_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct32x32_add vp9_short_idct32x32_add_c
+
+void vp9_short_idct1_32x32_c(int16_t *input, int16_t *output);
+#define vp9_short_idct1_32x32 vp9_short_idct1_32x32_c
+
+void vp9_short_idct10_32x32_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_idct10_32x32_add vp9_short_idct10_32x32_add_c
+
+void vp9_short_iht4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride, int tx_type);
+#define vp9_short_iht4x4_add vp9_short_iht4x4_add_c
+
+void vp9_short_iht8x8_add_c(int16_t *input, uint8_t *dest, int dest_stride, int tx_type);
+#define vp9_short_iht8x8_add vp9_short_iht8x8_add_c
+
+void vp9_short_iht16x16_add_c(int16_t *input, uint8_t *output, int pitch, int tx_type);
+#define vp9_short_iht16x16_add vp9_short_iht16x16_add_c
+
+void vp9_idct4_1d_c(int16_t *input, int16_t *output);
+#define vp9_idct4_1d vp9_idct4_1d_c
+
+void vp9_dc_only_idct_add_c(int input_dc, uint8_t *pred_ptr, uint8_t *dst_ptr, int pitch, int stride);
+#define vp9_dc_only_idct_add vp9_dc_only_idct_add_c
+
+void vp9_short_iwalsh4x4_1_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_iwalsh4x4_1_add vp9_short_iwalsh4x4_1_add_c
+
+void vp9_short_iwalsh4x4_add_c(int16_t *input, uint8_t *dest, int dest_stride);
+#define vp9_short_iwalsh4x4_add vp9_short_iwalsh4x4_add_c
+
+unsigned int vp9_sad32x3_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, int max_sad);
+#define vp9_sad32x3 vp9_sad32x3_c
+
+unsigned int vp9_sad3x32_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, int max_sad);
+#define vp9_sad3x32 vp9_sad3x32_c
+
+void vp9_rtcd(void);
+#include "vpx_config.h"
+
+#ifdef RTCD_C
+static void setup_rtcd_internal(void)
+{
+
+#if HAVE_DSPR2
+void dsputil_static_init();
+dsputil_static_init();
+#endif
+}
+#endif
+#endif
diff --git a/mips/vpx_config.c b/mips/vpx_config.c
index fa93c2c..84f0e8b 100644
--- a/mips/vpx_config.c
+++ b/mips/vpx_config.c
@@ -5,5 +5,5 @@
/* tree. An additional intellectual property rights grant can be found */
/* in the file PATENTS. All contributing project authors may */
/* be found in the AUTHORS file in the root of the source tree. */
-static const char* const cfg = "--force-target=mips32-android-gcc --disable-runtime-cpu-detect --sdk-path=/usr/local/google/home/johannkoenig/android-ndk --disable-examples --disable-docs --enable-realtime-only";
+static const char* const cfg = "--force-target=mips32-android-gcc --disable-runtime-cpu-detect --sdk-path=/usr/local/google/home/johannkoenig/android-ndk --disable-vp9-encoder --disable-examples --disable-docs --enable-realtime-only";
const char *vpx_codec_build_config(void) {return cfg;}
diff --git a/mips/vpx_config.h b/mips/vpx_config.h
index 9f51b09..49eab1e 100644
--- a/mips/vpx_config.h
+++ b/mips/vpx_config.h
@@ -9,6 +9,7 @@
#ifndef VPX_CONFIG_H
#define VPX_CONFIG_H
#define RESTRICT
+#define INLINE __inline__ __attribute__((always_inline))
#define ARCH_ARM 0
#define ARCH_MIPS 1
#define ARCH_X86 0
@@ -34,7 +35,7 @@
#define HAVE_SYS_MMAN_H 1
#define HAVE_UNISTD_H 1
#define CONFIG_EXTERNAL_BUILD 0
-#define CONFIG_INSTALL_DOCS 1
+#define CONFIG_INSTALL_DOCS 0
#define CONFIG_INSTALL_BINS 1
#define CONFIG_INSTALL_LIBS 1
#define CONFIG_INSTALL_SRCS 0
@@ -61,7 +62,10 @@
#define CONFIG_INTERNAL_STATS 0
#define CONFIG_VP8_ENCODER 1
#define CONFIG_VP8_DECODER 1
+#define CONFIG_VP9_ENCODER 0
+#define CONFIG_VP9_DECODER 1
#define CONFIG_VP8 1
+#define CONFIG_VP9 1
#define CONFIG_ENCODERS 1
#define CONFIG_DECODERS 1
#define CONFIG_STATIC_MSVCRT 0
@@ -77,4 +81,11 @@
#define CONFIG_UNIT_TESTS 0
#define CONFIG_MULTI_RES_ENCODING 0
#define CONFIG_TEMPORAL_DENOISING 1
+#define CONFIG_EXPERIMENTAL 0
+#define CONFIG_DECRYPT 0
+#define CONFIG_ONESHOTQ 0
+#define CONFIG_MULTIPLE_ARF 0
+#define CONFIG_NON420 0
+#define CONFIG_ALPHA 0
+#define CONFIG_BALANCED_COEFTREE 0
#endif /* VPX_CONFIG_H */
diff --git a/mips/vpx_scale_rtcd.h b/mips/vpx_scale_rtcd.h
new file mode 100644
index 0000000..7af466a
--- /dev/null
+++ b/mips/vpx_scale_rtcd.h
@@ -0,0 +1,58 @@
+#ifndef VPX_SCALE_RTCD_H_
+#define VPX_SCALE_RTCD_H_
+
+#ifdef RTCD_C
+#define RTCD_EXTERN
+#else
+#define RTCD_EXTERN extern
+#endif
+
+struct yv12_buffer_config;
+
+void vp8_horizontal_line_5_4_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_5_4_scale vp8_horizontal_line_5_4_scale_c
+
+void vp8_vertical_band_5_4_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_5_4_scale vp8_vertical_band_5_4_scale_c
+
+void vp8_horizontal_line_5_3_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_5_3_scale vp8_horizontal_line_5_3_scale_c
+
+void vp8_vertical_band_5_3_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_5_3_scale vp8_vertical_band_5_3_scale_c
+
+void vp8_horizontal_line_2_1_scale_c(const unsigned char *source, unsigned int source_width, unsigned char *dest, unsigned int dest_width);
+#define vp8_horizontal_line_2_1_scale vp8_horizontal_line_2_1_scale_c
+
+void vp8_vertical_band_2_1_scale_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_2_1_scale vp8_vertical_band_2_1_scale_c
+
+void vp8_vertical_band_2_1_scale_i_c(unsigned char *source, unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width);
+#define vp8_vertical_band_2_1_scale_i vp8_vertical_band_2_1_scale_i_c
+
+void vp8_yv12_extend_frame_borders_c(struct yv12_buffer_config *ybf);
+#define vp8_yv12_extend_frame_borders vp8_yv12_extend_frame_borders_c
+
+void vp8_yv12_copy_frame_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
+#define vp8_yv12_copy_frame vp8_yv12_copy_frame_c
+
+void vp8_yv12_copy_y_c(struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc);
+#define vp8_yv12_copy_y vp8_yv12_copy_y_c
+
+void vp9_extend_frame_borders_c(struct yv12_buffer_config *ybf, int subsampling_x, int subsampling_y);
+#define vp9_extend_frame_borders vp9_extend_frame_borders_c
+
+void vpx_scale_rtcd(void);
+#include "vpx_config.h"
+
+#ifdef RTCD_C
+static void setup_rtcd_internal(void)
+{
+
+#if HAVE_DSPR2
+void dsputil_static_init();
+dsputil_static_init();
+#endif
+}
+#endif
+#endif
diff --git a/mips/vpx_version.h b/mips/vpx_version.h
index 663dd49..512851c 100644
--- a/mips/vpx_version.h
+++ b/mips/vpx_version.h
@@ -1,7 +1,7 @@
#define VERSION_MAJOR 1
-#define VERSION_MINOR 1
+#define VERSION_MINOR 2
#define VERSION_PATCH 0
#define VERSION_EXTRA ""
#define VERSION_PACKED ((VERSION_MAJOR<<16)|(VERSION_MINOR<<8)|(VERSION_PATCH))
-#define VERSION_STRING_NOSP "v1.1.0"
-#define VERSION_STRING " v1.1.0"
+#define VERSION_STRING_NOSP "v1.2.0"
+#define VERSION_STRING " v1.2.0"