summaryrefslogtreecommitdiffstats
path: root/libvpx/vp8
diff options
context:
space:
mode:
Diffstat (limited to 'libvpx/vp8')
-rw-r--r--libvpx/vp8/common/filter.c2
-rw-r--r--libvpx/vp8/common/filter.h6
-rw-r--r--libvpx/vp8/common/findnearmv.h2
-rw-r--r--libvpx/vp8/common/idct_blk.c7
-rw-r--r--libvpx/vp8/common/reconinter.c20
-rw-r--r--libvpx/vp8/common/x86/filter_x86.c2
-rw-r--r--libvpx/vp8/common/x86/filter_x86.h10
-rw-r--r--libvpx/vp8/common/x86/idct_blk_mmx.c17
-rw-r--r--libvpx/vp8/common/x86/vp8_asm_stubs.c8
-rw-r--r--libvpx/vp8/decoder/decodemv.c12
-rw-r--r--libvpx/vp8/decoder/decodframe.c17
-rw-r--r--libvpx/vp8/decoder/threading.c13
-rw-r--r--libvpx/vp8/encoder/bitstream.c8
-rw-r--r--libvpx/vp8/encoder/dct.c16
-rw-r--r--libvpx/vp8/encoder/firstpass.c18
-rw-r--r--libvpx/vp8/encoder/mcomp.c38
-rw-r--r--libvpx/vp8/encoder/onyx_if.c3
-rw-r--r--libvpx/vp8/encoder/ratectrl.c15
-rw-r--r--libvpx/vp8/encoder/rdopt.c2
19 files changed, 110 insertions, 106 deletions
diff --git a/libvpx/vp8/common/filter.c b/libvpx/vp8/common/filter.c
index 1901ea3..25266f8 100644
--- a/libvpx/vp8/common/filter.c
+++ b/libvpx/vp8/common/filter.c
@@ -9,9 +9,7 @@
*/
-#include <stdlib.h>
#include "filter.h"
-#include "vpx_ports/mem.h"
DECLARE_ALIGNED(16, const short, vp8_bilinear_filters[8][2]) =
{
diff --git a/libvpx/vp8/common/filter.h b/libvpx/vp8/common/filter.h
index b7591f2..ccda7c8 100644
--- a/libvpx/vp8/common/filter.h
+++ b/libvpx/vp8/common/filter.h
@@ -12,11 +12,13 @@
#ifndef FILTER_H
#define FILTER_H
+#include "vpx_ports/mem.h"
+
#define BLOCK_HEIGHT_WIDTH 4
#define VP8_FILTER_WEIGHT 128
#define VP8_FILTER_SHIFT 7
-extern const short vp8_bilinear_filters[8][2];
-extern const short vp8_sub_pel_filters[8][6];
+extern DECLARE_ALIGNED(16, const short, vp8_bilinear_filters[8][2]);
+extern DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters[8][6]);
#endif
diff --git a/libvpx/vp8/common/findnearmv.h b/libvpx/vp8/common/findnearmv.h
index 06ef060..c60e463 100644
--- a/libvpx/vp8/common/findnearmv.h
+++ b/libvpx/vp8/common/findnearmv.h
@@ -124,7 +124,7 @@ static int above_block_mv(const MODE_INFO *cur_mb, int b, int mi_stride)
b += 16;
}
- return (cur_mb->bmi + b - 4)->mv.as_int;
+ return (cur_mb->bmi + (b - 4))->mv.as_int;
}
static B_PREDICTION_MODE left_block_mode(const MODE_INFO *cur_mb, int b)
{
diff --git a/libvpx/vp8/common/idct_blk.c b/libvpx/vp8/common/idct_blk.c
index 8edfffb..65d5002 100644
--- a/libvpx/vp8/common/idct_blk.c
+++ b/libvpx/vp8/common/idct_blk.c
@@ -10,6 +10,7 @@
#include "vpx_config.h"
#include "vp8_rtcd.h"
+#include "vpx_mem/vpx_mem.h"
void vp8_dequant_idct_add_c(short *input, short *dq,
unsigned char *dest, int stride);
@@ -32,7 +33,7 @@ void vp8_dequant_idct_add_y_block_c
else
{
vp8_dc_only_idct_add_c (q[0]*dq[0], dst, stride, dst, stride);
- ((int *)q)[0] = 0;
+ vpx_memset(q, 0, 2 * sizeof(q[0]));
}
q += 16;
@@ -58,7 +59,7 @@ void vp8_dequant_idct_add_uv_block_c
else
{
vp8_dc_only_idct_add_c (q[0]*dq[0], dstu, stride, dstu, stride);
- ((int *)q)[0] = 0;
+ vpx_memset(q, 0, 2 * sizeof(q[0]));
}
q += 16;
@@ -77,7 +78,7 @@ void vp8_dequant_idct_add_uv_block_c
else
{
vp8_dc_only_idct_add_c (q[0]*dq[0], dstv, stride, dstv, stride);
- ((int *)q)[0] = 0;
+ vpx_memset(q, 0, 2 * sizeof(q[0]));
}
q += 16;
diff --git a/libvpx/vp8/common/reconinter.c b/libvpx/vp8/common/reconinter.c
index 43f84d0..bac3c94 100644
--- a/libvpx/vp8/common/reconinter.c
+++ b/libvpx/vp8/common/reconinter.c
@@ -138,14 +138,10 @@ void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, unsigned char *base_pre,
{
for (r = 0; r < 4; r++)
{
-#if !(CONFIG_FAST_UNALIGNED)
pred_ptr[0] = ptr[0];
pred_ptr[1] = ptr[1];
pred_ptr[2] = ptr[2];
pred_ptr[3] = ptr[3];
-#else
- *(uint32_t *)pred_ptr = *(uint32_t *)ptr ;
-#endif
pred_ptr += pitch;
ptr += pre_stride;
}
@@ -196,16 +192,12 @@ static void build_inter_predictors_b(BLOCKD *d, unsigned char *dst, int dst_stri
{
for (r = 0; r < 4; r++)
{
-#if !(CONFIG_FAST_UNALIGNED)
dst[0] = ptr[0];
dst[1] = ptr[1];
dst[2] = ptr[2];
dst[3] = ptr[3];
-#else
- *(uint32_t *)dst = *(uint32_t *)ptr ;
-#endif
- dst += dst_stride;
- ptr += pre_stride;
+ dst += dst_stride;
+ ptr += pre_stride;
}
}
}
@@ -270,7 +262,7 @@ void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x)
+ x->block[yoffset+4].bmi.mv.as_mv.row
+ x->block[yoffset+5].bmi.mv.as_mv.row;
- temp += 4 + ((temp >> (sizeof(int) * CHAR_BIT - 1)) << 3);
+ temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8);
x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask;
@@ -279,7 +271,7 @@ void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x)
+ x->block[yoffset+4].bmi.mv.as_mv.col
+ x->block[yoffset+5].bmi.mv.as_mv.col;
- temp += 4 + ((temp >> (sizeof(int) * CHAR_BIT - 1)) << 3);
+ temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8);
x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask;
@@ -558,7 +550,7 @@ void build_4x4uvmvs(MACROBLOCKD *x)
+ x->mode_info_context->bmi[yoffset + 4].mv.as_mv.row
+ x->mode_info_context->bmi[yoffset + 5].mv.as_mv.row;
- temp += 4 + ((temp >> (sizeof(int) * CHAR_BIT - 1)) << 3);
+ temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8);
x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask;
@@ -567,7 +559,7 @@ void build_4x4uvmvs(MACROBLOCKD *x)
+ x->mode_info_context->bmi[yoffset + 4].mv.as_mv.col
+ x->mode_info_context->bmi[yoffset + 5].mv.as_mv.col;
- temp += 4 + ((temp >> (sizeof(int) * CHAR_BIT - 1)) << 3);
+ temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8);
x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask;
diff --git a/libvpx/vp8/common/x86/filter_x86.c b/libvpx/vp8/common/x86/filter_x86.c
index ebab814..7f496ed 100644
--- a/libvpx/vp8/common/x86/filter_x86.c
+++ b/libvpx/vp8/common/x86/filter_x86.c
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "vpx_ports/mem.h"
+#include "vp8/common/x86/filter_x86.h"
DECLARE_ALIGNED(16, const short, vp8_bilinear_filters_x86_4[8][8]) =
{
diff --git a/libvpx/vp8/common/x86/filter_x86.h b/libvpx/vp8/common/x86/filter_x86.h
index efcc4dc..cfadaee 100644
--- a/libvpx/vp8/common/x86/filter_x86.h
+++ b/libvpx/vp8/common/x86/filter_x86.h
@@ -11,9 +11,15 @@
#ifndef FILTER_X86_H
#define FILTER_X86_H
+#include "vpx_ports/mem.h"
+
/* x86 assembly specific copy of vp8/common/filter.c:vp8_bilinear_filters with
* duplicated values */
-extern const short vp8_bilinear_filters_x86_4[8][8]; /* duplicated 4x */
-extern const short vp8_bilinear_filters_x86_8[8][16]; /* duplicated 8x */
+
+/* duplicated 4x */
+extern DECLARE_ALIGNED(16, const short, vp8_bilinear_filters_x86_4[8][8]);
+
+/* duplicated 8x */
+extern DECLARE_ALIGNED(16, const short, vp8_bilinear_filters_x86_8[8][16]);
#endif /* FILTER_X86_H */
diff --git a/libvpx/vp8/common/x86/idct_blk_mmx.c b/libvpx/vp8/common/x86/idct_blk_mmx.c
index 49b2013..a1e4ce6 100644
--- a/libvpx/vp8/common/x86/idct_blk_mmx.c
+++ b/libvpx/vp8/common/x86/idct_blk_mmx.c
@@ -11,6 +11,7 @@
#include "vpx_config.h"
#include "vp8_rtcd.h"
#include "vp8/common/blockd.h"
+#include "vpx_mem/vpx_mem.h"
extern void vp8_dequantize_b_impl_mmx(short *sq, short *dq, short *q);
@@ -35,7 +36,7 @@ void vp8_dequant_idct_add_y_block_mmx
else if (eobs[0] == 1)
{
vp8_dc_only_idct_add_mmx (q[0]*dq[0], dst, stride, dst, stride);
- ((int *)q)[0] = 0;
+ vpx_memset(q, 0, 2 * sizeof(q[0]));
}
if (eobs[1] > 1)
@@ -44,7 +45,7 @@ void vp8_dequant_idct_add_y_block_mmx
{
vp8_dc_only_idct_add_mmx (q[16]*dq[0], dst+4, stride,
dst+4, stride);
- ((int *)(q+16))[0] = 0;
+ vpx_memset(q + 16, 0, 2 * sizeof(q[0]));
}
if (eobs[2] > 1)
@@ -53,7 +54,7 @@ void vp8_dequant_idct_add_y_block_mmx
{
vp8_dc_only_idct_add_mmx (q[32]*dq[0], dst+8, stride,
dst+8, stride);
- ((int *)(q+32))[0] = 0;
+ vpx_memset(q + 32, 0, 2 * sizeof(q[0]));
}
if (eobs[3] > 1)
@@ -62,7 +63,7 @@ void vp8_dequant_idct_add_y_block_mmx
{
vp8_dc_only_idct_add_mmx (q[48]*dq[0], dst+12, stride,
dst+12, stride);
- ((int *)(q+48))[0] = 0;
+ vpx_memset(q + 48, 0, 2 * sizeof(q[0]));
}
q += 64;
@@ -84,7 +85,7 @@ void vp8_dequant_idct_add_uv_block_mmx
else if (eobs[0] == 1)
{
vp8_dc_only_idct_add_mmx (q[0]*dq[0], dstu, stride, dstu, stride);
- ((int *)q)[0] = 0;
+ vpx_memset(q, 0, 2 * sizeof(q[0]));
}
if (eobs[1] > 1)
@@ -93,7 +94,7 @@ void vp8_dequant_idct_add_uv_block_mmx
{
vp8_dc_only_idct_add_mmx (q[16]*dq[0], dstu+4, stride,
dstu+4, stride);
- ((int *)(q+16))[0] = 0;
+ vpx_memset(q + 16, 0, 2 * sizeof(q[0]));
}
q += 32;
@@ -108,7 +109,7 @@ void vp8_dequant_idct_add_uv_block_mmx
else if (eobs[0] == 1)
{
vp8_dc_only_idct_add_mmx (q[0]*dq[0], dstv, stride, dstv, stride);
- ((int *)q)[0] = 0;
+ vpx_memset(q, 0, 2 * sizeof(q[0]));
}
if (eobs[1] > 1)
@@ -117,7 +118,7 @@ void vp8_dequant_idct_add_uv_block_mmx
{
vp8_dc_only_idct_add_mmx (q[16]*dq[0], dstv+4, stride,
dstv+4, stride);
- ((int *)(q+16))[0] = 0;
+ vpx_memset(q + 16, 0, 2 * sizeof(q[0]));
}
q += 32;
diff --git a/libvpx/vp8/common/x86/vp8_asm_stubs.c b/libvpx/vp8/common/x86/vp8_asm_stubs.c
index c0416b7..b409293 100644
--- a/libvpx/vp8/common/x86/vp8_asm_stubs.c
+++ b/libvpx/vp8/common/x86/vp8_asm_stubs.c
@@ -611,16 +611,12 @@ void vp8_sixtap_predict4x4_ssse3
for (r = 0; r < 4; r++)
{
- #if !(CONFIG_FAST_UNALIGNED)
dst_ptr[0] = src_ptr[0];
dst_ptr[1] = src_ptr[1];
dst_ptr[2] = src_ptr[2];
dst_ptr[3] = src_ptr[3];
- #else
- *(uint32_t *)dst_ptr = *(uint32_t *)src_ptr ;
- #endif
- dst_ptr += dst_pitch;
- src_ptr += src_pixels_per_line;
+ dst_ptr += dst_pitch;
+ src_ptr += src_pixels_per_line;
}
}
}
diff --git a/libvpx/vp8/decoder/decodemv.c b/libvpx/vp8/decoder/decodemv.c
index 8027a07..35a22c7 100644
--- a/libvpx/vp8/decoder/decodemv.c
+++ b/libvpx/vp8/decoder/decodemv.c
@@ -110,8 +110,8 @@ static int read_mvcomponent(vp8_reader *r, const MV_CONTEXT *mvc)
static void read_mv(vp8_reader *r, MV *mv, const MV_CONTEXT *mvc)
{
- mv->row = (short)(read_mvcomponent(r, mvc) << 1);
- mv->col = (short)(read_mvcomponent(r, ++mvc) << 1);
+ mv->row = (short)(read_mvcomponent(r, mvc) * 2);
+ mv->col = (short)(read_mvcomponent(r, ++mvc) * 2);
}
@@ -292,9 +292,9 @@ static void decode_split_mv(vp8_reader *const bc, MODE_INFO *mi,
blockmv.as_int = 0;
if( vp8_read(bc, prob[2]) )
{
- blockmv.as_mv.row = read_mvcomponent(bc, &mvc[0]) << 1;
+ blockmv.as_mv.row = read_mvcomponent(bc, &mvc[0]) * 2;
blockmv.as_mv.row += best_mv.as_mv.row;
- blockmv.as_mv.col = read_mvcomponent(bc, &mvc[1]) << 1;
+ blockmv.as_mv.col = read_mvcomponent(bc, &mvc[1]) * 2;
blockmv.as_mv.col += best_mv.as_mv.col;
}
}
@@ -512,15 +512,15 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi)
else
{
mbmi->mode = NEARMV;
- vp8_clamp_mv2(&near_mvs[CNT_NEAR], &pbi->mb);
mbmi->mv.as_int = near_mvs[CNT_NEAR].as_int;
+ vp8_clamp_mv2(&mbmi->mv, &pbi->mb);
}
}
else
{
mbmi->mode = NEARESTMV;
- vp8_clamp_mv2(&near_mvs[CNT_NEAREST], &pbi->mb);
mbmi->mv.as_int = near_mvs[CNT_NEAREST].as_int;
+ vp8_clamp_mv2(&mbmi->mv, &pbi->mb);
}
}
else
diff --git a/libvpx/vp8/decoder/decodframe.c b/libvpx/vp8/decoder/decodframe.c
index 51eeb02..16da78a 100644
--- a/libvpx/vp8/decoder/decodframe.c
+++ b/libvpx/vp8/decoder/decodframe.c
@@ -211,7 +211,7 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
(b->qcoeff[0] * DQC[0],
dst, dst_stride,
dst, dst_stride);
- ((int *)b->qcoeff)[0] = 0;
+ vpx_memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
}
}
}
@@ -248,21 +248,14 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
vp8_short_inv_walsh4x4(&b->dqcoeff[0],
xd->qcoeff);
- ((int *)b->qcoeff)[0] = 0;
- ((int *)b->qcoeff)[1] = 0;
- ((int *)b->qcoeff)[2] = 0;
- ((int *)b->qcoeff)[3] = 0;
- ((int *)b->qcoeff)[4] = 0;
- ((int *)b->qcoeff)[5] = 0;
- ((int *)b->qcoeff)[6] = 0;
- ((int *)b->qcoeff)[7] = 0;
+ vpx_memset(b->qcoeff, 0, 16 * sizeof(b->qcoeff[0]));
}
else
{
b->dqcoeff[0] = b->qcoeff[0] * xd->dequant_y2[0];
vp8_short_inv_walsh4x4_1(&b->dqcoeff[0],
xd->qcoeff);
- ((int *)b->qcoeff)[0] = 0;
+ vpx_memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
}
/* override the dc dequant constant in order to preserve the
@@ -576,7 +569,7 @@ static void decode_mb_rows(VP8D_COMP *pbi)
xd->left_available = 0;
- xd->mb_to_top_edge = -((mb_row * 16)) << 3;
+ xd->mb_to_top_edge = -((mb_row * 16) << 3);
xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
xd->recon_above[0] = dst_buffer[0] + recon_yoffset;
@@ -1026,7 +1019,7 @@ int vp8_decode_frame(VP8D_COMP *pbi)
const unsigned char *clear = data;
if (pbi->decrypt_cb)
{
- int n = data_end - data;
+ int n = (int)(data_end - data);
if (n > 10) n = 10;
pbi->decrypt_cb(pbi->decrypt_state, data, clear_buffer, n);
clear = clear_buffer;
diff --git a/libvpx/vp8/decoder/threading.c b/libvpx/vp8/decoder/threading.c
index 7303189..fe290cf 100644
--- a/libvpx/vp8/decoder/threading.c
+++ b/libvpx/vp8/decoder/threading.c
@@ -227,7 +227,7 @@ static void mt_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
{
vp8_dc_only_idct_add(b->qcoeff[0] * DQC[0],
dst, dst_stride, dst, dst_stride);
- ((int *)b->qcoeff)[0] = 0;
+ vpx_memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
}
}
}
@@ -264,21 +264,14 @@ static void mt_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
vp8_short_inv_walsh4x4(&b->dqcoeff[0],
xd->qcoeff);
- ((int *)b->qcoeff)[0] = 0;
- ((int *)b->qcoeff)[1] = 0;
- ((int *)b->qcoeff)[2] = 0;
- ((int *)b->qcoeff)[3] = 0;
- ((int *)b->qcoeff)[4] = 0;
- ((int *)b->qcoeff)[5] = 0;
- ((int *)b->qcoeff)[6] = 0;
- ((int *)b->qcoeff)[7] = 0;
+ vpx_memset(b->qcoeff, 0, 16 * sizeof(b->qcoeff[0]));
}
else
{
b->dqcoeff[0] = b->qcoeff[0] * xd->dequant_y2[0];
vp8_short_inv_walsh4x4_1(&b->dqcoeff[0],
xd->qcoeff);
- ((int *)b->qcoeff)[0] = 0;
+ vpx_memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
}
/* override the dc dequant constant in order to preserve the
diff --git a/libvpx/vp8/encoder/bitstream.c b/libvpx/vp8/encoder/bitstream.c
index 5f0c1f7..78e54e2 100644
--- a/libvpx/vp8/encoder/bitstream.c
+++ b/libvpx/vp8/encoder/bitstream.c
@@ -432,7 +432,7 @@ static void write_mv_ref
assert(NEARESTMV <= m && m <= SPLITMV);
#endif
vp8_write_token(w, vp8_mv_ref_tree, p,
- vp8_mv_ref_encoding_array - NEARESTMV + m);
+ vp8_mv_ref_encoding_array + (m - NEARESTMV));
}
static void write_sub_mv_ref
@@ -444,7 +444,7 @@ static void write_sub_mv_ref
assert(LEFT4X4 <= m && m <= NEW4X4);
#endif
vp8_write_token(w, vp8_sub_mv_ref_tree, p,
- vp8_sub_mv_ref_encoding_array - LEFT4X4 + m);
+ vp8_sub_mv_ref_encoding_array + (m - LEFT4X4));
}
static void write_mv
@@ -577,7 +577,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
*/
xd->mb_to_left_edge = -((mb_col * 16) << 3);
xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
- xd->mb_to_top_edge = -((mb_row * 16)) << 3;
+ xd->mb_to_top_edge = -((mb_row * 16) << 3);
xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
#ifdef VP8_ENTROPY_STATS
@@ -1062,7 +1062,7 @@ int vp8_update_coef_context(VP8_COMP *cpi)
if (cpi->common.frame_type == KEY_FRAME)
{
/* Reset to default counts/probabilities at key frames */
- vp8_copy(cpi->coef_counts, default_coef_counts);
+ vp8_copy(cpi->mb.coef_counts, default_coef_counts);
}
if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS)
diff --git a/libvpx/vp8/encoder/dct.c b/libvpx/vp8/encoder/dct.c
index b5a11ae..091554a 100644
--- a/libvpx/vp8/encoder/dct.c
+++ b/libvpx/vp8/encoder/dct.c
@@ -20,10 +20,10 @@ void vp8_short_fdct4x4_c(short *input, short *output, int pitch)
for (i = 0; i < 4; i++)
{
- a1 = ((ip[0] + ip[3])<<3);
- b1 = ((ip[1] + ip[2])<<3);
- c1 = ((ip[1] - ip[2])<<3);
- d1 = ((ip[0] - ip[3])<<3);
+ a1 = ((ip[0] + ip[3]) * 8);
+ b1 = ((ip[1] + ip[2]) * 8);
+ c1 = ((ip[1] - ip[2]) * 8);
+ d1 = ((ip[0] - ip[3]) * 8);
op[0] = a1 + b1;
op[2] = a1 - b1;
@@ -72,10 +72,10 @@ void vp8_short_walsh4x4_c(short *input, short *output, int pitch)
for (i = 0; i < 4; i++)
{
- a1 = ((ip[0] + ip[2])<<2);
- d1 = ((ip[1] + ip[3])<<2);
- c1 = ((ip[1] - ip[3])<<2);
- b1 = ((ip[0] - ip[2])<<2);
+ a1 = ((ip[0] + ip[2]) * 4);
+ d1 = ((ip[1] + ip[3]) * 4);
+ c1 = ((ip[1] - ip[3]) * 4);
+ b1 = ((ip[0] - ip[2]) * 4);
op[0] = a1 + d1 + (a1!=0);
op[1] = b1 + c1;
diff --git a/libvpx/vp8/encoder/firstpass.c b/libvpx/vp8/encoder/firstpass.c
index ded0c43..968c7f3 100644
--- a/libvpx/vp8/encoder/firstpass.c
+++ b/libvpx/vp8/encoder/firstpass.c
@@ -711,8 +711,8 @@ skip_motion_search:
neutral_count++;
}
- d->bmi.mv.as_mv.row <<= 3;
- d->bmi.mv.as_mv.col <<= 3;
+ d->bmi.mv.as_mv.row *= 8;
+ d->bmi.mv.as_mv.col *= 8;
this_error = motion_error;
vp8_set_mbmode_and_mvs(x, NEWMV, &d->bmi.mv);
vp8_encode_inter16x16y(x);
@@ -909,13 +909,16 @@ extern const int vp8_bits_per_mb[2][QINDEX_RANGE];
static double bitcost( double prob )
{
- return -(log( prob ) / log( 2.0 ));
+ if (prob > 0.000122)
+ return -log(prob) / log(2.0);
+ else
+ return 13.0;
}
static int64_t estimate_modemvcost(VP8_COMP *cpi,
FIRSTPASS_STATS * fpstats)
{
int mv_cost;
- int mode_cost;
+ int64_t mode_cost;
double av_pct_inter = fpstats->pcnt_inter / fpstats->count;
double av_pct_motion = fpstats->pcnt_motion / fpstats->count;
@@ -937,10 +940,9 @@ static int64_t estimate_modemvcost(VP8_COMP *cpi,
/* Crude estimate of overhead cost from modes
* << 9 is the normalization to (bits * 512) used in vp8_bits_per_mb
*/
- mode_cost =
- (int)( ( ((av_pct_inter - av_pct_motion) * zz_cost) +
- (av_pct_motion * motion_cost) +
- (av_intra * intra_cost) ) * cpi->common.MBs ) << 9;
+ mode_cost =((((av_pct_inter - av_pct_motion) * zz_cost) +
+ (av_pct_motion * motion_cost) +
+ (av_intra * intra_cost)) * cpi->common.MBs) * 512;
return mv_cost + mode_cost;
}
diff --git a/libvpx/vp8/encoder/mcomp.c b/libvpx/vp8/encoder/mcomp.c
index 83c3989..0b11ea6 100644
--- a/libvpx/vp8/encoder/mcomp.c
+++ b/libvpx/vp8/encoder/mcomp.c
@@ -210,7 +210,7 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
unsigned char *z = (*(b->base_src) + b->src);
int rr = ref_mv->as_mv.row >> 1, rc = ref_mv->as_mv.col >> 1;
- int br = bestmv->as_mv.row << 2, bc = bestmv->as_mv.col << 2;
+ int br = bestmv->as_mv.row * 4, bc = bestmv->as_mv.col * 4;
int tr = br, tc = bc;
unsigned int besterr;
unsigned int left, right, up, down, diag;
@@ -220,10 +220,14 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
unsigned int quarteriters = 4;
int thismse;
- int minc = MAX(x->mv_col_min << 2, (ref_mv->as_mv.col >> 1) - ((1 << mvlong_width) - 1));
- int maxc = MIN(x->mv_col_max << 2, (ref_mv->as_mv.col >> 1) + ((1 << mvlong_width) - 1));
- int minr = MAX(x->mv_row_min << 2, (ref_mv->as_mv.row >> 1) - ((1 << mvlong_width) - 1));
- int maxr = MIN(x->mv_row_max << 2, (ref_mv->as_mv.row >> 1) + ((1 << mvlong_width) - 1));
+ int minc = MAX(x->mv_col_min * 4,
+ (ref_mv->as_mv.col >> 1) - ((1 << mvlong_width) - 1));
+ int maxc = MIN(x->mv_col_max * 4,
+ (ref_mv->as_mv.col >> 1) + ((1 << mvlong_width) - 1));
+ int minr = MAX(x->mv_row_min * 4,
+ (ref_mv->as_mv.row >> 1) - ((1 << mvlong_width) - 1));
+ int maxr = MIN(x->mv_row_max * 4,
+ (ref_mv->as_mv.row >> 1) + ((1 << mvlong_width) - 1));
int y_stride;
int offset;
@@ -254,8 +258,8 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col;
/* central mv */
- bestmv->as_mv.row <<= 3;
- bestmv->as_mv.col <<= 3;
+ bestmv->as_mv.row *= 8;
+ bestmv->as_mv.col *= 8;
/* calculate central point error */
besterr = vfp->vf(y, y_stride, z, b->src_stride, sse1);
@@ -337,8 +341,8 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
tc = bc;
}
- bestmv->as_mv.row = br << 1;
- bestmv->as_mv.col = bc << 1;
+ bestmv->as_mv.row = br * 2;
+ bestmv->as_mv.col = bc * 2;
if ((abs(bestmv->as_mv.col - ref_mv->as_mv.col) > (MAX_FULL_PEL_VAL<<3)) ||
(abs(bestmv->as_mv.row - ref_mv->as_mv.row) > (MAX_FULL_PEL_VAL<<3)))
@@ -699,8 +703,8 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
#endif
/* central mv */
- bestmv->as_mv.row <<= 3;
- bestmv->as_mv.col <<= 3;
+ bestmv->as_mv.row *= 8;
+ bestmv->as_mv.col *= 8;
startmv = *bestmv;
/* calculate central point error */
@@ -1315,8 +1319,8 @@ int vp8_diamond_search_sadx4
(*num00)++;
}
- this_mv.as_mv.row = best_mv->as_mv.row << 3;
- this_mv.as_mv.col = best_mv->as_mv.col << 3;
+ this_mv.as_mv.row = best_mv->as_mv.row * 8;
+ this_mv.as_mv.col = best_mv->as_mv.col * 8;
return fn_ptr->vf(what, what_stride, best_address, in_what_stride, &thissad)
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
@@ -1709,8 +1713,8 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
}
}
- this_mv.as_mv.row = best_mv->as_mv.row << 3;
- this_mv.as_mv.col = best_mv->as_mv.col << 3;
+ this_mv.as_mv.row = best_mv->as_mv.row * 8;
+ this_mv.as_mv.col = best_mv->as_mv.col * 8;
return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, &thissad)
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
@@ -1905,8 +1909,8 @@ int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
}
}
- this_mv.as_mv.row = ref_mv->as_mv.row << 3;
- this_mv.as_mv.col = ref_mv->as_mv.col << 3;
+ this_mv.as_mv.row = ref_mv->as_mv.row * 8;
+ this_mv.as_mv.col = ref_mv->as_mv.col * 8;
return fn_ptr->vf(what, what_stride, best_address, in_what_stride, &thissad)
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
diff --git a/libvpx/vp8/encoder/onyx_if.c b/libvpx/vp8/encoder/onyx_if.c
index 7c07975..4b60cfd 100644
--- a/libvpx/vp8/encoder/onyx_if.c
+++ b/libvpx/vp8/encoder/onyx_if.c
@@ -3574,7 +3574,8 @@ static void encode_frame_to_data_rate
for (i=cpi->current_layer+1; i<cpi->oxcf.number_of_layers; i++)
{
LAYER_CONTEXT *lc = &cpi->layer_context[i];
- lc->bits_off_target += cpi->av_per_frame_bandwidth;
+ lc->bits_off_target += (int)(lc->target_bandwidth /
+ lc->framerate);
if (lc->bits_off_target > lc->maximum_buffer_size)
lc->bits_off_target = lc->maximum_buffer_size;
lc->buffer_level = lc->bits_off_target;
diff --git a/libvpx/vp8/encoder/ratectrl.c b/libvpx/vp8/encoder/ratectrl.c
index 1e8259c..fe4db13 100644
--- a/libvpx/vp8/encoder/ratectrl.c
+++ b/libvpx/vp8/encoder/ratectrl.c
@@ -956,6 +956,21 @@ static void calc_pframe_target_size(VP8_COMP *cpi)
if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size)
cpi->bits_off_target = (int)cpi->oxcf.maximum_buffer_size;
cpi->buffer_level = cpi->bits_off_target;
+
+ if (cpi->oxcf.number_of_layers > 1) {
+ unsigned int i;
+
+ // Propagate bits saved by dropping the frame to higher layers.
+ for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers;
+ i++) {
+ LAYER_CONTEXT *lc = &cpi->layer_context[i];
+ lc->bits_off_target += (int)(lc->target_bandwidth /
+ lc->framerate);
+ if (lc->bits_off_target > lc->maximum_buffer_size)
+ lc->bits_off_target = lc->maximum_buffer_size;
+ lc->buffer_level = lc->bits_off_target;
+ }
+ }
}
}
diff --git a/libvpx/vp8/encoder/rdopt.c b/libvpx/vp8/encoder/rdopt.c
index 521e84f..5016cc4 100644
--- a/libvpx/vp8/encoder/rdopt.c
+++ b/libvpx/vp8/encoder/rdopt.c
@@ -935,7 +935,7 @@ int vp8_cost_mv_ref(MB_PREDICTION_MODE m, const int near_mv_ref_ct[4])
assert(NEARESTMV <= m && m <= SPLITMV);
vp8_mv_ref_probs(p, near_mv_ref_ct);
return vp8_cost_token(vp8_mv_ref_tree, p,
- vp8_mv_ref_encoding_array - NEARESTMV + m);
+ vp8_mv_ref_encoding_array + (m - NEARESTMV));
}
void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv)