summaryrefslogtreecommitdiffstats
path: root/libvpx/vp9/encoder/vp9_bitstream.c
diff options
context:
space:
mode:
authorhkuang <hkuang@google.com>2013-07-25 11:11:39 -0700
committerhkuang <hkuang@google.com>2013-07-25 12:03:12 -0700
commit91037db265ecdd914a26e056cf69207b4f50924e (patch)
treec78c618cf6d0ffb187e2734d524bca19698b3c0d /libvpx/vp9/encoder/vp9_bitstream.c
parentba164dffc5a6795bce97fae02b51ccf3330e15e4 (diff)
downloadandroid_external_libvpx-91037db265ecdd914a26e056cf69207b4f50924e.tar.gz
android_external_libvpx-91037db265ecdd914a26e056cf69207b4f50924e.tar.bz2
android_external_libvpx-91037db265ecdd914a26e056cf69207b4f50924e.zip
Roll latest libvpx into Android.
Make the VP9 decoding 2X faster than the old one. Checkout is from master branch(hash:242157c756314827ad9244952c7253e8900b9626). Change-Id: Ibe67b3ee19f82b87df2416826b63a67f7f79b63a
Diffstat (limited to 'libvpx/vp9/encoder/vp9_bitstream.c')
-rw-r--r--libvpx/vp9/encoder/vp9_bitstream.c868
1 files changed, 284 insertions, 584 deletions
diff --git a/libvpx/vp9/encoder/vp9_bitstream.c b/libvpx/vp9/encoder/vp9_bitstream.c
index 09ab2db..ad0f6c5 100644
--- a/libvpx/vp9/encoder/vp9_bitstream.c
+++ b/libvpx/vp9/encoder/vp9_bitstream.c
@@ -32,6 +32,7 @@
#include "vp9/encoder/vp9_encodemv.h"
#include "vp9/encoder/vp9_bitstream.h"
#include "vp9/encoder/vp9_segmentation.h"
+#include "vp9/encoder/vp9_subexp.h"
#include "vp9/encoder/vp9_write_bit_buffer.h"
@@ -48,8 +49,6 @@ vp9_coeff_stats tree_update_hist[TX_SIZE_MAX_SB][BLOCK_TYPES];
extern unsigned int active_section;
#endif
-#define vp9_cost_upd ((int)(vp9_cost_one(upd) - vp9_cost_zero(upd)) >> 8)
-#define vp9_cost_upd256 ((int)(vp9_cost_one(upd) - vp9_cost_zero(upd)))
#ifdef MODE_STATS
int64_t tx_count_32x32p_stats[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB];
@@ -155,8 +154,6 @@ void write_switchable_interp_stats() {
}
#endif
-static int update_bits[255];
-
static INLINE void write_be32(uint8_t *p, int value) {
p[0] = value >> 24;
p[1] = value >> 16;
@@ -164,248 +161,11 @@ static INLINE void write_be32(uint8_t *p, int value) {
p[3] = value;
}
-
-
-int recenter_nonneg(int v, int m) {
- if (v > (m << 1))
- return v;
- else if (v >= m)
- return ((v - m) << 1);
- else
- return ((m - v) << 1) - 1;
-}
-
-static int get_unsigned_bits(unsigned num_values) {
- int cat = 0;
- if ((num_values--) <= 1) return 0;
- while (num_values > 0) {
- cat++;
- num_values >>= 1;
- }
- return cat;
-}
-
void vp9_encode_unsigned_max(struct vp9_write_bit_buffer *wb,
int data, int max) {
vp9_wb_write_literal(wb, data, get_unsigned_bits(max));
}
-void encode_uniform(vp9_writer *w, int v, int n) {
- int l = get_unsigned_bits(n);
- int m;
- if (l == 0)
- return;
- m = (1 << l) - n;
- if (v < m) {
- vp9_write_literal(w, v, l - 1);
- } else {
- vp9_write_literal(w, m + ((v - m) >> 1), l - 1);
- vp9_write_literal(w, (v - m) & 1, 1);
- }
-}
-
-int count_uniform(int v, int n) {
- int l = get_unsigned_bits(n);
- int m;
- if (l == 0) return 0;
- m = (1 << l) - n;
- if (v < m)
- return l - 1;
- else
- return l;
-}
-
-void encode_term_subexp(vp9_writer *w, int word, int k, int num_syms) {
- int i = 0;
- int mk = 0;
- while (1) {
- int b = (i ? k + i - 1 : k);
- int a = (1 << b);
- if (num_syms <= mk + 3 * a) {
- encode_uniform(w, word - mk, num_syms - mk);
- break;
- } else {
- int t = (word >= mk + a);
- vp9_write_literal(w, t, 1);
- if (t) {
- i = i + 1;
- mk += a;
- } else {
- vp9_write_literal(w, word - mk, b);
- break;
- }
- }
- }
-}
-
-int count_term_subexp(int word, int k, int num_syms) {
- int count = 0;
- int i = 0;
- int mk = 0;
- while (1) {
- int b = (i ? k + i - 1 : k);
- int a = (1 << b);
- if (num_syms <= mk + 3 * a) {
- count += count_uniform(word - mk, num_syms - mk);
- break;
- } else {
- int t = (word >= mk + a);
- count++;
- if (t) {
- i = i + 1;
- mk += a;
- } else {
- count += b;
- break;
- }
- }
- }
- return count;
-}
-
-static void compute_update_table() {
- int i;
- for (i = 0; i < 254; i++)
- update_bits[i] = count_term_subexp(i, SUBEXP_PARAM, 255);
-}
-
-static int split_index(int i, int n, int modulus) {
- int max1 = (n - 1 - modulus / 2) / modulus + 1;
- if (i % modulus == modulus / 2) i = i / modulus;
- else i = max1 + i - (i + modulus - modulus / 2) / modulus;
- return i;
-}
-
-static int remap_prob(int v, int m) {
- const int n = 255;
- const int modulus = MODULUS_PARAM;
- int i;
- v--;
- m--;
- if ((m << 1) <= n)
- i = recenter_nonneg(v, m) - 1;
- else
- i = recenter_nonneg(n - 1 - v, n - 1 - m) - 1;
-
- i = split_index(i, n - 1, modulus);
- return i;
-}
-
-static void write_prob_diff_update(vp9_writer *w,
- vp9_prob newp, vp9_prob oldp) {
- int delp = remap_prob(newp, oldp);
- encode_term_subexp(w, delp, SUBEXP_PARAM, 255);
-}
-
-static int prob_diff_update_cost(vp9_prob newp, vp9_prob oldp) {
- int delp = remap_prob(newp, oldp);
- return update_bits[delp] * 256;
-}
-
-static int prob_update_savings(const unsigned int *ct,
- const vp9_prob oldp, const vp9_prob newp,
- const vp9_prob upd) {
- const int old_b = cost_branch256(ct, oldp);
- const int new_b = cost_branch256(ct, newp);
- const int update_b = 2048 + vp9_cost_upd256;
- return old_b - new_b - update_b;
-}
-
-static int prob_diff_update_savings_search(const unsigned int *ct,
- const vp9_prob oldp, vp9_prob *bestp,
- const vp9_prob upd) {
- const int old_b = cost_branch256(ct, oldp);
- int new_b, update_b, savings, bestsavings, step;
- vp9_prob newp, bestnewp;
-
- bestsavings = 0;
- bestnewp = oldp;
-
- step = (*bestp > oldp ? -1 : 1);
- for (newp = *bestp; newp != oldp; newp += step) {
- new_b = cost_branch256(ct, newp);
- update_b = prob_diff_update_cost(newp, oldp) + vp9_cost_upd256;
- savings = old_b - new_b - update_b;
- if (savings > bestsavings) {
- bestsavings = savings;
- bestnewp = newp;
- }
- }
- *bestp = bestnewp;
- return bestsavings;
-}
-
-static int prob_diff_update_savings_search_model(const unsigned int *ct,
- const vp9_prob *oldp,
- vp9_prob *bestp,
- const vp9_prob upd,
- int b, int r) {
- int i, old_b, new_b, update_b, savings, bestsavings, step;
- int newp;
- vp9_prob bestnewp, newplist[ENTROPY_NODES], oldplist[ENTROPY_NODES];
- vp9_model_to_full_probs(oldp, oldplist);
- vpx_memcpy(newplist, oldp, sizeof(vp9_prob) * UNCONSTRAINED_NODES);
- for (i = UNCONSTRAINED_NODES, old_b = 0; i < ENTROPY_NODES; ++i)
- old_b += cost_branch256(ct + 2 * i, oldplist[i]);
- old_b += cost_branch256(ct + 2 * PIVOT_NODE, oldplist[PIVOT_NODE]);
-
- bestsavings = 0;
- bestnewp = oldp[PIVOT_NODE];
-
- step = (*bestp > oldp[PIVOT_NODE] ? -1 : 1);
- newp = *bestp;
- for (; newp != oldp[PIVOT_NODE]; newp += step) {
- if (newp < 1 || newp > 255) continue;
- newplist[PIVOT_NODE] = newp;
- vp9_model_to_full_probs(newplist, newplist);
- for (i = UNCONSTRAINED_NODES, new_b = 0; i < ENTROPY_NODES; ++i)
- new_b += cost_branch256(ct + 2 * i, newplist[i]);
- new_b += cost_branch256(ct + 2 * PIVOT_NODE, newplist[PIVOT_NODE]);
- update_b = prob_diff_update_cost(newp, oldp[PIVOT_NODE]) +
- vp9_cost_upd256;
- savings = old_b - new_b - update_b;
- if (savings > bestsavings) {
- bestsavings = savings;
- bestnewp = newp;
- }
- }
- *bestp = bestnewp;
- return bestsavings;
-}
-
-static void vp9_cond_prob_update(vp9_writer *bc, vp9_prob *oldp, vp9_prob upd,
- unsigned int *ct) {
- vp9_prob newp;
- int savings;
- newp = get_binary_prob(ct[0], ct[1]);
- assert(newp >= 1);
- savings = prob_update_savings(ct, *oldp, newp, upd);
- if (savings > 0) {
- vp9_write(bc, 1, upd);
- vp9_write_prob(bc, newp);
- *oldp = newp;
- } else {
- vp9_write(bc, 0, upd);
- }
-}
-
-static void vp9_cond_prob_diff_update(vp9_writer *bc, vp9_prob *oldp,
- vp9_prob upd,
- unsigned int *ct) {
- vp9_prob newp;
- int savings;
- newp = get_binary_prob(ct[0], ct[1]);
- assert(newp >= 1);
- savings = prob_diff_update_savings_search(ct, *oldp, &newp, upd);
- if (savings > 0) {
- vp9_write(bc, 1, upd);
- write_prob_diff_update(bc, newp, *oldp);
- *oldp = newp;
- } else {
- vp9_write(bc, 0, upd);
- }
-}
-
static void update_mode(
vp9_writer *w,
int n,
@@ -440,16 +200,39 @@ static void update_mbintra_mode_probs(VP9_COMP* const cpi,
(unsigned int *)cpi->y_mode_count[j]);
}
-void vp9_update_skip_probs(VP9_COMP *cpi, vp9_writer *bc) {
- VP9_COMMON *const pc = &cpi->common;
- int k;
+static void write_selected_txfm_size(const VP9_COMP *cpi, TX_SIZE tx_size,
+ BLOCK_SIZE_TYPE bsize, vp9_writer *w) {
+ const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+ const vp9_prob *tx_probs = get_tx_probs2(xd, &cpi->common.fc.tx_probs);
+ vp9_write(w, tx_size != TX_4X4, tx_probs[0]);
+ if (bsize >= BLOCK_SIZE_MB16X16 && tx_size != TX_4X4) {
+ vp9_write(w, tx_size != TX_8X8, tx_probs[1]);
+ if (bsize >= BLOCK_SIZE_SB32X32 && tx_size != TX_8X8)
+ vp9_write(w, tx_size != TX_16X16, tx_probs[2]);
+ }
+}
- for (k = 0; k < MBSKIP_CONTEXTS; ++k) {
- vp9_cond_prob_diff_update(bc, &pc->fc.mbskip_probs[k],
- VP9_MODE_UPDATE_PROB, pc->fc.mbskip_count[k]);
+static int write_skip_coeff(const VP9_COMP *cpi, int segment_id, MODE_INFO *m,
+ vp9_writer *w) {
+ const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+ if (vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_SKIP)) {
+ return 1;
+ } else {
+ const int skip_coeff = m->mbmi.mb_skip_coeff;
+ vp9_write(w, skip_coeff, vp9_get_pred_prob_mbskip(&cpi->common, xd));
+ return skip_coeff;
}
}
+void vp9_update_skip_probs(VP9_COMP *cpi, vp9_writer *w) {
+ VP9_COMMON *cm = &cpi->common;
+ int k;
+
+ for (k = 0; k < MBSKIP_CONTEXTS; ++k)
+ vp9_cond_prob_diff_update(w, &cm->fc.mbskip_probs[k],
+ VP9_MODE_UPDATE_PROB, cm->counts.mbskip[k]);
+}
+
static void write_intra_mode(vp9_writer *bc, int m, const vp9_prob *p) {
write_token(bc, vp9_intra_mode_tree, p, vp9_intra_mode_encodings + m);
}
@@ -465,7 +248,7 @@ static void update_switchable_interp_probs(VP9_COMP *const cpi,
vp9_tree_probs_from_distribution(
vp9_switchable_interp_tree,
new_prob[j], branch_ct[j],
- pc->fc.switchable_interp_count[j], 0);
+ pc->counts.switchable_interp[j], 0);
}
for (j = 0; j <= VP9_SWITCHABLE_FILTERS; ++j) {
for (i = 0; i < VP9_SWITCHABLE_FILTERS - 1; ++i) {
@@ -486,7 +269,7 @@ static void update_inter_mode_probs(VP9_COMMON *pc, vp9_writer* const bc) {
for (j = 0; j < VP9_INTER_MODES - 1; j++) {
vp9_cond_prob_diff_update(bc, &pc->fc.inter_mode_probs[i][j],
VP9_MODE_UPDATE_PROB,
- pc->fc.inter_mode_counts[i][j]);
+ pc->counts.inter_mode[i][j]);
}
}
}
@@ -519,22 +302,13 @@ static void pack_mb_tokens(vp9_writer* const bc,
assert(pp != 0);
/* skip one or two nodes */
-#if !CONFIG_BALANCED_COEFTREE
if (p->skip_eob_node) {
n -= p->skip_eob_node;
i = 2 * p->skip_eob_node;
}
-#endif
do {
const int bb = (v >> --n) & 1;
-#if CONFIG_BALANCED_COEFTREE
- if (i == 2 && p->skip_eob_node) {
- i += 2;
- assert(bb == 1);
- continue;
- }
-#endif
vp9_write(bc, bb, pp[i >> 1]);
i = vp9_coef_tree[i + bb];
} while (n);
@@ -563,22 +337,18 @@ static void pack_mb_tokens(vp9_writer* const bc,
*tp = p;
}
-static void write_sb_mv_ref(vp9_writer *bc, MB_PREDICTION_MODE m,
+static void write_sb_mv_ref(vp9_writer *w, MB_PREDICTION_MODE mode,
const vp9_prob *p) {
-#if CONFIG_DEBUG
- assert(NEARESTMV <= m && m <= NEWMV);
-#endif
- write_token(bc, vp9_sb_mv_ref_tree, p,
- vp9_sb_mv_ref_encoding_array - NEARESTMV + m);
+ assert(is_inter_mode(mode));
+ write_token(w, vp9_inter_mode_tree, p,
+ &vp9_inter_mode_encodings[mode - NEARESTMV]);
}
-// This function writes the current macro block's segnment id to the bitstream
-// It should only be called if a segment map update is indicated.
-static void write_mb_segid(vp9_writer *bc,
- const MB_MODE_INFO *mi, const MACROBLOCKD *xd) {
- if (xd->segmentation_enabled && xd->update_mb_segmentation_map)
- treed_write(bc, vp9_segment_tree, xd->mb_segment_tree_probs,
- mi->segment_id, 3);
+
+static void write_segment_id(vp9_writer *w, const struct segmentation *seg,
+ int segment_id) {
+ if (seg->enabled && seg->update_map)
+ treed_write(w, vp9_segment_tree, seg->tree_probs, segment_id, 3);
}
// This function encodes the reference frame
@@ -588,7 +358,7 @@ static void encode_ref_frame(VP9_COMP *cpi, vp9_writer *bc) {
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *mi = &xd->mode_info_context->mbmi;
const int segment_id = mi->segment_id;
- int seg_ref_active = vp9_segfeature_active(xd, segment_id,
+ int seg_ref_active = vp9_segfeature_active(&xd->seg, segment_id,
SEG_LVL_REF_FRAME);
// If segment level coding of this signal is disabled...
// or the segment allows multiple reference frame options
@@ -597,7 +367,7 @@ static void encode_ref_frame(VP9_COMP *cpi, vp9_writer *bc) {
// (if not specified at the frame/segment level)
if (pc->comp_pred_mode == HYBRID_PREDICTION) {
vp9_write(bc, mi->ref_frame[1] > INTRA_FRAME,
- vp9_get_pred_prob(pc, xd, PRED_COMP_INTER_INTER));
+ vp9_get_pred_prob_comp_inter_inter(pc, xd));
} else {
assert((mi->ref_frame[1] <= INTRA_FRAME) ==
(pc->comp_pred_mode == SINGLE_PREDICTION_ONLY));
@@ -605,17 +375,17 @@ static void encode_ref_frame(VP9_COMP *cpi, vp9_writer *bc) {
if (mi->ref_frame[1] > INTRA_FRAME) {
vp9_write(bc, mi->ref_frame[0] == GOLDEN_FRAME,
- vp9_get_pred_prob(pc, xd, PRED_COMP_REF_P));
+ vp9_get_pred_prob_comp_ref_p(pc, xd));
} else {
vp9_write(bc, mi->ref_frame[0] != LAST_FRAME,
- vp9_get_pred_prob(pc, xd, PRED_SINGLE_REF_P1));
+ vp9_get_pred_prob_single_ref_p1(pc, xd));
if (mi->ref_frame[0] != LAST_FRAME)
vp9_write(bc, mi->ref_frame[0] != GOLDEN_FRAME,
- vp9_get_pred_prob(pc, xd, PRED_SINGLE_REF_P2));
+ vp9_get_pred_prob_single_ref_p2(pc, xd));
}
} else {
assert(mi->ref_frame[1] <= INTRA_FRAME);
- assert(vp9_get_segdata(xd, segment_id, SEG_LVL_REF_FRAME) ==
+ assert(vp9_get_segdata(&xd->seg, segment_id, SEG_LVL_REF_FRAME) ==
mi->ref_frame[0]);
}
@@ -629,60 +399,42 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
const nmv_context *nmvc = &pc->fc.nmvc;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
+ struct segmentation *seg = &xd->seg;
MB_MODE_INFO *const mi = &m->mbmi;
const MV_REFERENCE_FRAME rf = mi->ref_frame[0];
const MB_PREDICTION_MODE mode = mi->mode;
const int segment_id = mi->segment_id;
int skip_coeff;
+ const BLOCK_SIZE_TYPE bsize = mi->sb_type;
- xd->prev_mode_info_context = pc->prev_mi + (m - pc->mi);
x->partition_info = x->pi + (m - pc->mi);
#ifdef ENTROPY_STATS
active_section = 9;
#endif
- if (cpi->mb.e_mbd.update_mb_segmentation_map) {
- // Is temporal coding of the segment map enabled
- if (pc->temporal_update) {
- unsigned char prediction_flag = vp9_get_pred_flag(xd, PRED_SEG_ID);
- vp9_prob pred_prob = vp9_get_pred_prob(pc, xd, PRED_SEG_ID);
-
- // Code the segment id prediction flag for this mb
- vp9_write(bc, prediction_flag, pred_prob);
-
- // If the mb segment id wasn't predicted code explicitly
- if (!prediction_flag)
- write_mb_segid(bc, mi, &cpi->mb.e_mbd);
+ if (seg->update_map) {
+ if (seg->temporal_update) {
+ const int pred_flag = mi->seg_id_predicted;
+ vp9_prob pred_prob = vp9_get_pred_prob_seg_id(xd);
+ vp9_write(bc, pred_flag, pred_prob);
+ if (!pred_flag)
+ write_segment_id(bc, seg, segment_id);
} else {
- // Normal unpredicted coding
- write_mb_segid(bc, mi, &cpi->mb.e_mbd);
+ write_segment_id(bc, seg, segment_id);
}
}
- if (vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)) {
- skip_coeff = 1;
- } else {
- skip_coeff = m->mbmi.mb_skip_coeff;
- vp9_write(bc, skip_coeff,
- vp9_get_pred_prob(pc, xd, PRED_MBSKIP));
- }
+ skip_coeff = write_skip_coeff(cpi, segment_id, m, bc);
- if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME))
+ if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
vp9_write(bc, rf != INTRA_FRAME,
- vp9_get_pred_prob(pc, xd, PRED_INTRA_INTER));
+ vp9_get_pred_prob_intra_inter(pc, xd));
- if (mi->sb_type >= BLOCK_SIZE_SB8X8 && pc->txfm_mode == TX_MODE_SELECT &&
+ if (bsize >= BLOCK_SIZE_SB8X8 && pc->tx_mode == TX_MODE_SELECT &&
!(rf != INTRA_FRAME &&
- (skip_coeff || vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)))) {
- TX_SIZE sz = mi->txfm_size;
- const vp9_prob *tx_probs = vp9_get_pred_probs(pc, xd, PRED_TX_SIZE);
- vp9_write(bc, sz != TX_4X4, tx_probs[0]);
- if (mi->sb_type >= BLOCK_SIZE_MB16X16 && sz != TX_4X4) {
- vp9_write(bc, sz != TX_8X8, tx_probs[1]);
- if (mi->sb_type >= BLOCK_SIZE_SB32X32 && sz != TX_8X8)
- vp9_write(bc, sz != TX_16X16, tx_probs[2]);
- }
+ (skip_coeff || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) {
+ write_selected_txfm_size(cpi, mi->txfm_size, bsize, bc);
}
if (rf == INTRA_FRAME) {
@@ -690,28 +442,24 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
active_section = 6;
#endif
- if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8) {
- const BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type;
+ if (bsize >= BLOCK_SIZE_SB8X8) {
const int bwl = b_width_log2(bsize), bhl = b_height_log2(bsize);
const int bsl = MIN(bwl, bhl);
write_intra_mode(bc, mode, pc->fc.y_mode_prob[MIN(3, bsl)]);
} else {
int idx, idy;
- int bw = 1 << b_width_log2(mi->sb_type);
- int bh = 1 << b_height_log2(mi->sb_type);
- for (idy = 0; idy < 2; idy += bh)
- for (idx = 0; idx < 2; idx += bw) {
- MB_PREDICTION_MODE bm = m->bmi[idy * 2 + idx].as_mode.first;
+ int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
+ int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
+ for (idy = 0; idy < 2; idy += num_4x4_blocks_high)
+ for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
+ const MB_PREDICTION_MODE bm = m->bmi[idy * 2 + idx].as_mode;
write_intra_mode(bc, bm, pc->fc.y_mode_prob[0]);
}
}
- write_intra_mode(bc, mi->uv_mode,
- pc->fc.uv_mode_prob[mode]);
+ write_intra_mode(bc, mi->uv_mode, pc->fc.uv_mode_prob[mode]);
} else {
vp9_prob *mv_ref_p;
-
encode_ref_frame(cpi, bc);
-
mv_ref_p = cpi->common.fc.inter_mode_probs[mi->mb_mode_context[rf]];
#ifdef ENTROPY_STATS
@@ -719,8 +467,8 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
#endif
// If segment skip is not enabled code the mode.
- if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)) {
- if (mi->sb_type >= BLOCK_SIZE_SB8X8) {
+ if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
+ if (bsize >= BLOCK_SIZE_SB8X8) {
write_sb_mv_ref(bc, mode, mv_ref_p);
vp9_accum_mv_refs(&cpi->common, mode, mi->mb_mode_context[rf]);
}
@@ -728,38 +476,37 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
if (cpi->common.mcomp_filter_type == SWITCHABLE) {
write_token(bc, vp9_switchable_interp_tree,
- vp9_get_pred_probs(&cpi->common, xd,
- PRED_SWITCHABLE_INTERP),
+ vp9_get_pred_probs_switchable_interp(&cpi->common, xd),
vp9_switchable_interp_encodings +
vp9_switchable_interp_map[mi->interp_filter]);
} else {
assert(mi->interp_filter == cpi->common.mcomp_filter_type);
}
- if (xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
+ if (bsize < BLOCK_SIZE_SB8X8) {
int j;
MB_PREDICTION_MODE blockmode;
int_mv blockmv;
- int bwl = b_width_log2(mi->sb_type), bw = 1 << bwl;
- int bhl = b_height_log2(mi->sb_type), bh = 1 << bhl;
+ int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
+ int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
int idx, idy;
- for (idy = 0; idy < 2; idy += bh) {
- for (idx = 0; idx < 2; idx += bw) {
+ for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
+ for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
j = idy * 2 + idx;
- blockmode = cpi->mb.partition_info->bmi[j].mode;
- blockmv = cpi->mb.partition_info->bmi[j].mv;
+ blockmode = x->partition_info->bmi[j].mode;
+ blockmv = m->bmi[j].as_mv[0];
write_sb_mv_ref(bc, blockmode, mv_ref_p);
vp9_accum_mv_refs(&cpi->common, blockmode, mi->mb_mode_context[rf]);
if (blockmode == NEWMV) {
#ifdef ENTROPY_STATS
active_section = 11;
#endif
- vp9_encode_mv(bc, &blockmv.as_mv, &mi->best_mv.as_mv,
+ vp9_encode_mv(cpi, bc, &blockmv.as_mv, &mi->best_mv.as_mv,
nmvc, xd->allow_high_precision_mv);
if (mi->ref_frame[1] > INTRA_FRAME)
- vp9_encode_mv(bc,
- &cpi->mb.partition_info->bmi[j].second_mv.as_mv,
+ vp9_encode_mv(cpi, bc,
+ &m->bmi[j].as_mv[1].as_mv,
&mi->best_second_mv.as_mv,
nmvc, xd->allow_high_precision_mv);
}
@@ -769,12 +516,12 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
#ifdef ENTROPY_STATS
active_section = 5;
#endif
- vp9_encode_mv(bc,
+ vp9_encode_mv(cpi, bc,
&mi->mv[0].as_mv, &mi->best_mv.as_mv,
nmvc, xd->allow_high_precision_mv);
if (mi->ref_frame[1] > INTRA_FRAME)
- vp9_encode_mv(bc,
+ vp9_encode_mv(cpi, bc,
&mi->mv[1].as_mv, &mi->best_second_mv.as_mv,
nmvc, xd->allow_high_precision_mv);
}
@@ -789,54 +536,40 @@ static void write_mb_modes_kf(const VP9_COMP *cpi,
const int ym = m->mbmi.mode;
const int mis = c->mode_info_stride;
const int segment_id = m->mbmi.segment_id;
- int skip_coeff;
- if (xd->update_mb_segmentation_map)
- write_mb_segid(bc, &m->mbmi, xd);
+ if (xd->seg.update_map)
+ write_segment_id(bc, &xd->seg, m->mbmi.segment_id);
- if (vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)) {
- skip_coeff = 1;
- } else {
- skip_coeff = m->mbmi.mb_skip_coeff;
- vp9_write(bc, skip_coeff, vp9_get_pred_prob(c, xd, PRED_MBSKIP));
- }
+ write_skip_coeff(cpi, segment_id, m, bc);
- if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8 && c->txfm_mode == TX_MODE_SELECT) {
- TX_SIZE sz = m->mbmi.txfm_size;
- const vp9_prob *tx_probs = vp9_get_pred_probs(c, xd, PRED_TX_SIZE);
- vp9_write(bc, sz != TX_4X4, tx_probs[0]);
- if (m->mbmi.sb_type >= BLOCK_SIZE_MB16X16 && sz != TX_4X4) {
- vp9_write(bc, sz != TX_8X8, tx_probs[1]);
- if (m->mbmi.sb_type >= BLOCK_SIZE_SB32X32 && sz != TX_8X8)
- vp9_write(bc, sz != TX_16X16, tx_probs[2]);
- }
- }
+ if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8 && c->tx_mode == TX_MODE_SELECT)
+ write_selected_txfm_size(cpi, m->mbmi.txfm_size, m->mbmi.sb_type, bc);
if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8) {
const MB_PREDICTION_MODE A = above_block_mode(m, 0, mis);
const MB_PREDICTION_MODE L = xd->left_available ?
left_block_mode(m, 0) : DC_PRED;
- write_intra_mode(bc, ym, c->kf_y_mode_prob[A][L]);
+ write_intra_mode(bc, ym, vp9_kf_y_mode_prob[A][L]);
} else {
int idx, idy;
- int bw = 1 << b_width_log2(m->mbmi.sb_type);
- int bh = 1 << b_height_log2(m->mbmi.sb_type);
- for (idy = 0; idy < 2; idy += bh) {
- for (idx = 0; idx < 2; idx += bw) {
+ int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[m->mbmi.sb_type];
+ int num_4x4_blocks_high = num_4x4_blocks_high_lookup[m->mbmi.sb_type];
+ for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
+ for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
int i = idy * 2 + idx;
const MB_PREDICTION_MODE A = above_block_mode(m, i, mis);
const MB_PREDICTION_MODE L = (xd->left_available || idx) ?
left_block_mode(m, i) : DC_PRED;
- const int bm = m->bmi[i].as_mode.first;
+ const int bm = m->bmi[i].as_mode;
#ifdef ENTROPY_STATS
++intra_mode_stats[A][L][bm];
#endif
- write_intra_mode(bc, bm, c->kf_y_mode_prob[A][L]);
+ write_intra_mode(bc, bm, vp9_kf_y_mode_prob[A][L]);
}
}
}
- write_intra_mode(bc, m->mbmi.uv_mode, c->kf_uv_mode_prob[ym]);
+ write_intra_mode(bc, m->mbmi.uv_mode, vp9_kf_uv_mode_prob[ym]);
}
static void write_modes_b(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
@@ -875,30 +608,16 @@ static void write_modes_sb(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
const int mis = cm->mode_info_stride;
- int bwl, bhl;
int bsl = b_width_log2(bsize);
int bs = (1 << bsl) / 4; // mode_info step for subsize
int n;
- PARTITION_TYPE partition;
+ PARTITION_TYPE partition = PARTITION_NONE;
BLOCK_SIZE_TYPE subsize;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
- bwl = b_width_log2(m->mbmi.sb_type);
- bhl = b_height_log2(m->mbmi.sb_type);
-
- // parse the partition type
- if ((bwl == bsl) && (bhl == bsl))
- partition = PARTITION_NONE;
- else if ((bwl == bsl) && (bhl < bsl))
- partition = PARTITION_HORZ;
- else if ((bwl < bsl) && (bhl == bsl))
- partition = PARTITION_VERT;
- else if ((bwl < bsl) && (bhl < bsl))
- partition = PARTITION_SPLIT;
- else
- assert(0);
+ partition = partition_lookup[bsl][m->mbmi.sb_type];
if (bsize < BLOCK_SIZE_SB8X8)
if (xd->ab_index > 0)
@@ -906,9 +625,8 @@ static void write_modes_sb(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
if (bsize >= BLOCK_SIZE_SB8X8) {
int pl;
- int idx = check_bsize_coverage(cm, xd, mi_row, mi_col, bsize);
- xd->left_seg_context = cm->left_seg_context + (mi_row & MI_MASK);
- xd->above_seg_context = cm->above_seg_context + mi_col;
+ const int idx = check_bsize_coverage(cm, xd, mi_row, mi_col, bsize);
+ set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
// encode the partition information
if (idx == 0)
@@ -968,14 +686,12 @@ static void write_modes(VP9_COMP *cpi, vp9_writer* const bc,
m_ptr += c->cur_tile_mi_col_start + c->cur_tile_mi_row_start * mis;
- for (mi_row = c->cur_tile_mi_row_start;
- mi_row < c->cur_tile_mi_row_end;
+ for (mi_row = c->cur_tile_mi_row_start; mi_row < c->cur_tile_mi_row_end;
mi_row += 8, m_ptr += 8 * mis) {
m = m_ptr;
- vpx_memset(c->left_seg_context, 0, sizeof(c->left_seg_context));
- for (mi_col = c->cur_tile_mi_col_start;
- mi_col < c->cur_tile_mi_col_end;
- mi_col += 64 / MI_SIZE, m += 64 / MI_SIZE)
+ vp9_zero(c->left_seg_context);
+ for (mi_col = c->cur_tile_mi_col_start; mi_col < c->cur_tile_mi_col_end;
+ mi_col += MI_BLOCK_SIZE, m += MI_BLOCK_SIZE)
write_modes_sb(cpi, m, bc, tok, tok_end, mi_row, mi_col,
BLOCK_SIZE_SB64X64);
}
@@ -1014,7 +730,7 @@ static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE txfm_size) {
vp9_coeff_probs_model *coef_probs = cpi->frame_coef_probs[txfm_size];
vp9_coeff_count *coef_counts = cpi->coef_counts[txfm_size];
unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] =
- cpi->common.fc.eob_branch_counts[txfm_size];
+ cpi->common.counts.eob_branch[txfm_size];
vp9_coeff_stats *coef_branch_ct = cpi->frame_branch_ct[txfm_size];
vp9_prob full_probs[ENTROPY_NODES];
int i, j, k, l;
@@ -1031,19 +747,11 @@ static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE txfm_size) {
coef_counts[i][j][k][l], 0);
vpx_memcpy(coef_probs[i][j][k][l], full_probs,
sizeof(vp9_prob) * UNCONSTRAINED_NODES);
-#if CONFIG_BALANCED_COEFTREE
- coef_branch_ct[i][j][k][l][1][1] = eob_branch_ct[i][j][k][l] -
- coef_branch_ct[i][j][k][l][1][0];
- coef_probs[i][j][k][l][1] =
- get_binary_prob(coef_branch_ct[i][j][k][l][1][0],
- coef_branch_ct[i][j][k][l][1][1]);
-#else
coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] -
coef_branch_ct[i][j][k][l][0][0];
coef_probs[i][j][k][l][0] =
get_binary_prob(coef_branch_ct[i][j][k][l][0][0],
coef_branch_ct[i][j][k][l][0][1]);
-#endif
#ifdef ENTROPY_STATS
if (!cpi->dummy_packing) {
int t;
@@ -1096,11 +804,11 @@ static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi,
if (l >= 3 && k == 0)
continue;
if (t == PIVOT_NODE)
- s = prob_diff_update_savings_search_model(
+ s = vp9_prob_diff_update_savings_search_model(
frame_branch_ct[i][j][k][l][0],
old_frame_coef_probs[i][j][k][l], &newp, upd, i, j);
else
- s = prob_diff_update_savings_search(
+ s = vp9_prob_diff_update_savings_search(
frame_branch_ct[i][j][k][l][t], oldp, &newp, upd);
if (s > 0 && newp != oldp)
u = 1;
@@ -1137,11 +845,11 @@ static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi,
if (l >= 3 && k == 0)
continue;
if (t == PIVOT_NODE)
- s = prob_diff_update_savings_search_model(
+ s = vp9_prob_diff_update_savings_search_model(
frame_branch_ct[i][j][k][l][0],
old_frame_coef_probs[i][j][k][l], &newp, upd, i, j);
else
- s = prob_diff_update_savings_search(
+ s = vp9_prob_diff_update_savings_search(
frame_branch_ct[i][j][k][l][t],
*oldp, &newp, upd);
if (s > 0 && newp != *oldp)
@@ -1153,7 +861,7 @@ static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi,
#endif
if (u) {
/* send/use new probability */
- write_prob_diff_update(bc, newp, *oldp);
+ vp9_write_prob_diff_update(bc, newp, *oldp);
*oldp = newp;
}
}
@@ -1164,7 +872,7 @@ static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi,
}
static void update_coef_probs(VP9_COMP* const cpi, vp9_writer* const bc) {
- const TXFM_MODE txfm_mode = cpi->common.txfm_mode;
+ const TX_MODE tx_mode = cpi->common.tx_mode;
vp9_clear_system_state();
@@ -1174,39 +882,39 @@ static void update_coef_probs(VP9_COMP* const cpi, vp9_writer* const bc) {
update_coef_probs_common(bc, cpi, TX_4X4);
// do not do this if not even allowed
- if (txfm_mode > ONLY_4X4)
+ if (tx_mode > ONLY_4X4)
update_coef_probs_common(bc, cpi, TX_8X8);
- if (txfm_mode > ALLOW_8X8)
+ if (tx_mode > ALLOW_8X8)
update_coef_probs_common(bc, cpi, TX_16X16);
- if (txfm_mode > ALLOW_16X16)
+ if (tx_mode > ALLOW_16X16)
update_coef_probs_common(bc, cpi, TX_32X32);
}
-static void encode_loopfilter(VP9_COMMON *pc, MACROBLOCKD *xd,
+static void encode_loopfilter(struct loopfilter *lf,
struct vp9_write_bit_buffer *wb) {
int i;
// Encode the loop filter level and type
- vp9_wb_write_literal(wb, pc->filter_level, 6);
- vp9_wb_write_literal(wb, pc->sharpness_level, 3);
+ vp9_wb_write_literal(wb, lf->filter_level, 6);
+ vp9_wb_write_literal(wb, lf->sharpness_level, 3);
// Write out loop filter deltas applied at the MB level based on mode or
// ref frame (if they are enabled).
- vp9_wb_write_bit(wb, xd->mode_ref_lf_delta_enabled);
+ vp9_wb_write_bit(wb, lf->mode_ref_delta_enabled);
- if (xd->mode_ref_lf_delta_enabled) {
+ if (lf->mode_ref_delta_enabled) {
// Do the deltas need to be updated
- vp9_wb_write_bit(wb, xd->mode_ref_lf_delta_update);
- if (xd->mode_ref_lf_delta_update) {
+ vp9_wb_write_bit(wb, lf->mode_ref_delta_update);
+ if (lf->mode_ref_delta_update) {
// Send update
for (i = 0; i < MAX_REF_LF_DELTAS; i++) {
- const int delta = xd->ref_lf_deltas[i];
+ const int delta = lf->ref_deltas[i];
// Frame level data
- if (delta != xd->last_ref_lf_deltas[i]) {
- xd->last_ref_lf_deltas[i] = delta;
+ if (delta != lf->last_ref_deltas[i]) {
+ lf->last_ref_deltas[i] = delta;
vp9_wb_write_bit(wb, 1);
assert(delta != 0);
@@ -1219,9 +927,9 @@ static void encode_loopfilter(VP9_COMMON *pc, MACROBLOCKD *xd,
// Send update
for (i = 0; i < MAX_MODE_LF_DELTAS; i++) {
- const int delta = xd->mode_lf_deltas[i];
- if (delta != xd->last_mode_lf_deltas[i]) {
- xd->last_mode_lf_deltas[i] = delta;
+ const int delta = lf->mode_deltas[i];
+ if (delta != lf->last_mode_deltas[i]) {
+ lf->last_mode_deltas[i] = delta;
vp9_wb_write_bit(wb, 1);
assert(delta != 0);
@@ -1255,23 +963,23 @@ static void encode_quantization(VP9_COMMON *cm,
static void encode_segmentation(VP9_COMP *cpi,
- struct vp9_write_bit_buffer *wb) {
+ struct vp9_write_bit_buffer *wb) {
int i, j;
- VP9_COMMON *const cm = &cpi->common;
- MACROBLOCKD *const xd = &cpi->mb.e_mbd;
- vp9_wb_write_bit(wb, xd->segmentation_enabled);
- if (!xd->segmentation_enabled)
+ struct segmentation *seg = &cpi->mb.e_mbd.seg;
+
+ vp9_wb_write_bit(wb, seg->enabled);
+ if (!seg->enabled)
return;
// Segmentation map
- vp9_wb_write_bit(wb, xd->update_mb_segmentation_map);
- if (xd->update_mb_segmentation_map) {
+ vp9_wb_write_bit(wb, seg->update_map);
+ if (seg->update_map) {
// Select the coding strategy (temporal or spatial)
vp9_choose_segmap_coding_method(cpi);
// Write out probabilities used to decode unpredicted macro-block segments
- for (i = 0; i < MB_SEG_TREE_PROBS; i++) {
- const int prob = xd->mb_segment_tree_probs[i];
+ for (i = 0; i < SEG_TREE_PROBS; i++) {
+ const int prob = seg->tree_probs[i];
const int update = prob != MAX_PROB;
vp9_wb_write_bit(wb, update);
if (update)
@@ -1279,10 +987,10 @@ static void encode_segmentation(VP9_COMP *cpi,
}
// Write out the chosen coding method.
- vp9_wb_write_bit(wb, cm->temporal_update);
- if (cm->temporal_update) {
+ vp9_wb_write_bit(wb, seg->temporal_update);
+ if (seg->temporal_update) {
for (i = 0; i < PREDICTION_PROBS; i++) {
- const int prob = cm->segment_pred_probs[i];
+ const int prob = seg->pred_probs[i];
const int update = prob != MAX_PROB;
vp9_wb_write_bit(wb, update);
if (update)
@@ -1292,16 +1000,16 @@ static void encode_segmentation(VP9_COMP *cpi,
}
// Segmentation data
- vp9_wb_write_bit(wb, xd->update_mb_segmentation_data);
- if (xd->update_mb_segmentation_data) {
- vp9_wb_write_bit(wb, xd->mb_segment_abs_delta);
+ vp9_wb_write_bit(wb, seg->update_data);
+ if (seg->update_data) {
+ vp9_wb_write_bit(wb, seg->abs_delta);
- for (i = 0; i < MAX_MB_SEGMENTS; i++) {
+ for (i = 0; i < MAX_SEGMENTS; i++) {
for (j = 0; j < SEG_LVL_MAX; j++) {
- const int active = vp9_segfeature_active(xd, i, j);
+ const int active = vp9_segfeature_active(seg, i, j);
vp9_wb_write_bit(wb, active);
if (active) {
- const int data = vp9_get_segdata(xd, i, j);
+ const int data = vp9_get_segdata(seg, i, j);
const int data_max = vp9_seg_feature_data_max(j);
if (vp9_is_segfeature_signed(j)) {
@@ -1321,12 +1029,12 @@ static void encode_txfm_probs(VP9_COMP *cpi, vp9_writer *w) {
VP9_COMMON *const cm = &cpi->common;
// Mode
- vp9_write_literal(w, MIN(cm->txfm_mode, ALLOW_32X32), 2);
- if (cm->txfm_mode >= ALLOW_32X32)
- vp9_write_bit(w, cm->txfm_mode == TX_MODE_SELECT);
+ vp9_write_literal(w, MIN(cm->tx_mode, ALLOW_32X32), 2);
+ if (cm->tx_mode >= ALLOW_32X32)
+ vp9_write_bit(w, cm->tx_mode == TX_MODE_SELECT);
// Probabilities
- if (cm->txfm_mode == TX_MODE_SELECT) {
+ if (cm->tx_mode == TX_MODE_SELECT) {
int i, j;
unsigned int ct_8x8p[TX_SIZE_MAX_SB - 3][2];
unsigned int ct_16x16p[TX_SIZE_MAX_SB - 2][2];
@@ -1334,28 +1042,26 @@ static void encode_txfm_probs(VP9_COMP *cpi, vp9_writer *w) {
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
- tx_counts_to_branch_counts_8x8(cm->fc.tx_count_8x8p[i],
+ tx_counts_to_branch_counts_8x8(cm->counts.tx.p8x8[i],
ct_8x8p);
- for (j = 0; j < TX_SIZE_MAX_SB - 3; j++) {
- vp9_cond_prob_diff_update(w, &cm->fc.tx_probs_8x8p[i][j],
+ for (j = 0; j < TX_SIZE_MAX_SB - 3; j++)
+ vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p8x8[i][j],
VP9_MODE_UPDATE_PROB, ct_8x8p[j]);
- }
}
+
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
- tx_counts_to_branch_counts_16x16(cm->fc.tx_count_16x16p[i],
+ tx_counts_to_branch_counts_16x16(cm->counts.tx.p16x16[i],
ct_16x16p);
- for (j = 0; j < TX_SIZE_MAX_SB - 2; j++) {
- vp9_cond_prob_diff_update(w, &cm->fc.tx_probs_16x16p[i][j],
+ for (j = 0; j < TX_SIZE_MAX_SB - 2; j++)
+ vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p16x16[i][j],
VP9_MODE_UPDATE_PROB, ct_16x16p[j]);
- }
}
+
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
- tx_counts_to_branch_counts_32x32(cm->fc.tx_count_32x32p[i],
- ct_32x32p);
- for (j = 0; j < TX_SIZE_MAX_SB - 1; j++) {
- vp9_cond_prob_diff_update(w, &cm->fc.tx_probs_32x32p[i][j],
+ tx_counts_to_branch_counts_32x32(cm->counts.tx.p32x32[i], ct_32x32p);
+ for (j = 0; j < TX_SIZE_MAX_SB - 1; j++)
+ vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p32x32[i][j],
VP9_MODE_UPDATE_PROB, ct_32x32p[j]);
- }
}
#ifdef MODE_STATS
if (!cpi->dummy_packing)
@@ -1381,7 +1087,7 @@ static void fix_mcomp_filter_type(VP9_COMP *cpi) {
for (i = 0; i < VP9_SWITCHABLE_FILTERS; ++i) {
count[i] = 0;
for (j = 0; j <= VP9_SWITCHABLE_FILTERS; ++j)
- count[i] += cm->fc.switchable_interp_count[j][i];
+ count[i] += cm->counts.switchable_interp[j][i];
c += (count[i] > 0);
}
if (c == 1) {
@@ -1397,18 +1103,18 @@ static void fix_mcomp_filter_type(VP9_COMP *cpi) {
}
static void write_tile_info(VP9_COMMON *cm, struct vp9_write_bit_buffer *wb) {
- int min_log2_tiles, delta_log2_tiles, n_tile_bits, n;
- vp9_get_tile_n_bits(cm, &min_log2_tiles, &delta_log2_tiles);
- n_tile_bits = cm->log2_tile_columns - min_log2_tiles;
- for (n = 0; n < delta_log2_tiles; n++) {
- if (n_tile_bits--) {
- vp9_wb_write_bit(wb, 1);
- } else {
- vp9_wb_write_bit(wb, 0);
- break;
- }
- }
+ int min_log2_tile_cols, max_log2_tile_cols, ones;
+ vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
+ // columns
+ ones = cm->log2_tile_cols - min_log2_tile_cols;
+ while (ones--)
+ vp9_wb_write_bit(wb, 1);
+
+ if (cm->log2_tile_cols < max_log2_tile_cols)
+ vp9_wb_write_bit(wb, 0);
+
+ // rows
vp9_wb_write_bit(wb, cm->log2_tile_rows != 0);
if (cm->log2_tile_rows != 0)
vp9_wb_write_bit(wb, cm->log2_tile_rows != 1);
@@ -1449,6 +1155,57 @@ static int get_refresh_mask(VP9_COMP *cpi) {
}
}
+static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) {
+ VP9_COMMON *const cm = &cpi->common;
+ vp9_writer residual_bc;
+
+ int tile_row, tile_col;
+ TOKENEXTRA *tok[4][1 << 6], *tok_end;
+ size_t total_size = 0;
+ const int tile_cols = 1 << cm->log2_tile_cols;
+ const int tile_rows = 1 << cm->log2_tile_rows;
+
+ vpx_memset(cm->above_seg_context, 0, sizeof(PARTITION_CONTEXT) *
+ mi_cols_aligned_to_sb(cm->mi_cols));
+
+ tok[0][0] = cpi->tok;
+ for (tile_row = 0; tile_row < tile_rows; tile_row++) {
+ if (tile_row)
+ tok[tile_row][0] = tok[tile_row - 1][tile_cols - 1] +
+ cpi->tok_count[tile_row - 1][tile_cols - 1];
+
+ for (tile_col = 1; tile_col < tile_cols; tile_col++)
+ tok[tile_row][tile_col] = tok[tile_row][tile_col - 1] +
+ cpi->tok_count[tile_row][tile_col - 1];
+ }
+
+ for (tile_row = 0; tile_row < tile_rows; tile_row++) {
+ vp9_get_tile_row_offsets(cm, tile_row);
+ for (tile_col = 0; tile_col < tile_cols; tile_col++) {
+ vp9_get_tile_col_offsets(cm, tile_col);
+ tok_end = tok[tile_row][tile_col] + cpi->tok_count[tile_row][tile_col];
+
+ if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1)
+ vp9_start_encode(&residual_bc, data_ptr + total_size + 4);
+ else
+ vp9_start_encode(&residual_bc, data_ptr + total_size);
+
+ write_modes(cpi, &residual_bc, &tok[tile_row][tile_col], tok_end);
+ assert(tok[tile_row][tile_col] == tok_end);
+ vp9_stop_encode(&residual_bc);
+ if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) {
+ // size of this tile
+ write_be32(data_ptr + total_size, residual_bc.pos);
+ total_size += 4;
+ }
+
+ total_size += residual_bc.pos;
+ }
+ }
+
+ return total_size;
+}
+
static void write_display_size(VP9_COMP *cpi, struct vp9_write_bit_buffer *wb) {
VP9_COMMON *const cm = &cpi->common;
@@ -1562,7 +1319,7 @@ static void write_uncompressed_header(VP9_COMP *cpi,
int i;
vp9_wb_write_literal(wb, get_refresh_mask(cpi), NUM_REF_FRAMES);
for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) {
- vp9_wb_write_literal(wb, refs[i], NUM_REF_FRAMES_LG2);
+ vp9_wb_write_literal(wb, refs[i], NUM_REF_FRAMES_LOG2);
vp9_wb_write_bit(wb, cm->ref_frame_sign_bias[LAST_FRAME + i]);
}
@@ -1580,66 +1337,27 @@ static void write_uncompressed_header(VP9_COMP *cpi,
vp9_wb_write_bit(wb, cm->frame_parallel_decoding_mode);
}
- vp9_wb_write_literal(wb, cm->frame_context_idx, NUM_FRAME_CONTEXTS_LG2);
+ vp9_wb_write_literal(wb, cm->frame_context_idx, NUM_FRAME_CONTEXTS_LOG2);
- encode_loopfilter(cm, xd, wb);
+ encode_loopfilter(&xd->lf, wb);
encode_quantization(cm, wb);
encode_segmentation(cpi, wb);
write_tile_info(cm, wb);
}
-void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) {
- int i, bytes_packed;
- VP9_COMMON *const pc = &cpi->common;
- vp9_writer header_bc, residual_bc;
+static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) {
+ VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+ FRAME_CONTEXT *const fc = &cm->fc;
+ vp9_writer header_bc;
- uint8_t *cx_data = dest;
- struct vp9_write_bit_buffer wb = {dest, 0};
- struct vp9_write_bit_buffer first_partition_size_wb;
-
- write_uncompressed_header(cpi, &wb);
- first_partition_size_wb = wb;
- vp9_wb_write_literal(&wb, 0, 16); // don't know in advance first part. size
-
- bytes_packed = vp9_rb_bytes_written(&wb);
- cx_data += bytes_packed;
+ vp9_start_encode(&header_bc, data);
- compute_update_table();
-
- vp9_start_encode(&header_bc, cx_data);
-
-#ifdef ENTROPY_STATS
- if (pc->frame_type == INTER_FRAME)
- active_section = 0;
+ if (xd->lossless)
+ cm->tx_mode = ONLY_4X4;
else
- active_section = 7;
-#endif
-
- vp9_clear_system_state(); // __asm emms;
-
- vp9_copy(pc->fc.pre_coef_probs, pc->fc.coef_probs);
- vp9_copy(pc->fc.pre_y_mode_prob, pc->fc.y_mode_prob);
- vp9_copy(pc->fc.pre_uv_mode_prob, pc->fc.uv_mode_prob);
- vp9_copy(pc->fc.pre_partition_prob, pc->fc.partition_prob[INTER_FRAME]);
- pc->fc.pre_nmvc = pc->fc.nmvc;
- vp9_copy(pc->fc.pre_switchable_interp_prob, pc->fc.switchable_interp_prob);
- vp9_copy(pc->fc.pre_inter_mode_probs, pc->fc.inter_mode_probs);
- vp9_copy(pc->fc.pre_intra_inter_prob, pc->fc.intra_inter_prob);
- vp9_copy(pc->fc.pre_comp_inter_prob, pc->fc.comp_inter_prob);
- vp9_copy(pc->fc.pre_comp_ref_prob, pc->fc.comp_ref_prob);
- vp9_copy(pc->fc.pre_single_ref_prob, pc->fc.single_ref_prob);
- vp9_copy(pc->fc.pre_tx_probs_8x8p, pc->fc.tx_probs_8x8p);
- vp9_copy(pc->fc.pre_tx_probs_16x16p, pc->fc.tx_probs_16x16p);
- vp9_copy(pc->fc.pre_tx_probs_32x32p, pc->fc.tx_probs_32x32p);
- vp9_copy(pc->fc.pre_mbskip_probs, pc->fc.mbskip_probs);
-
- if (xd->lossless) {
- pc->txfm_mode = ONLY_4X4;
- } else {
encode_txfm_probs(cpi, &header_bc);
- }
update_coef_probs(cpi, &header_bc);
@@ -1649,124 +1367,106 @@ void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) {
vp9_update_skip_probs(cpi, &header_bc);
- if (pc->frame_type != KEY_FRAME) {
+ if (cm->frame_type != KEY_FRAME) {
+ int i;
#ifdef ENTROPY_STATS
active_section = 1;
#endif
- update_inter_mode_probs(pc, &header_bc);
- vp9_zero(cpi->common.fc.inter_mode_counts);
+ update_inter_mode_probs(cm, &header_bc);
+ vp9_zero(cm->counts.inter_mode);
- if (pc->mcomp_filter_type == SWITCHABLE)
+ if (cm->mcomp_filter_type == SWITCHABLE)
update_switchable_interp_probs(cpi, &header_bc);
for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
- vp9_cond_prob_diff_update(&header_bc, &pc->fc.intra_inter_prob[i],
+ vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i],
VP9_MODE_UPDATE_PROB,
cpi->intra_inter_count[i]);
- if (pc->allow_comp_inter_inter) {
+ if (cm->allow_comp_inter_inter) {
const int comp_pred_mode = cpi->common.comp_pred_mode;
- const int use_compound_pred = (comp_pred_mode != SINGLE_PREDICTION_ONLY);
- const int use_hybrid_pred = (comp_pred_mode == HYBRID_PREDICTION);
+ const int use_compound_pred = comp_pred_mode != SINGLE_PREDICTION_ONLY;
+ const int use_hybrid_pred = comp_pred_mode == HYBRID_PREDICTION;
vp9_write_bit(&header_bc, use_compound_pred);
if (use_compound_pred) {
vp9_write_bit(&header_bc, use_hybrid_pred);
- if (use_hybrid_pred) {
+ if (use_hybrid_pred)
for (i = 0; i < COMP_INTER_CONTEXTS; i++)
- vp9_cond_prob_diff_update(&header_bc, &pc->fc.comp_inter_prob[i],
+ vp9_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i],
VP9_MODE_UPDATE_PROB,
cpi->comp_inter_count[i]);
- }
}
}
- if (pc->comp_pred_mode != COMP_PREDICTION_ONLY) {
+ if (cm->comp_pred_mode != COMP_PREDICTION_ONLY) {
for (i = 0; i < REF_CONTEXTS; i++) {
- vp9_cond_prob_diff_update(&header_bc, &pc->fc.single_ref_prob[i][0],
+ vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0],
VP9_MODE_UPDATE_PROB,
cpi->single_ref_count[i][0]);
- vp9_cond_prob_diff_update(&header_bc, &pc->fc.single_ref_prob[i][1],
+ vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1],
VP9_MODE_UPDATE_PROB,
cpi->single_ref_count[i][1]);
}
}
- if (pc->comp_pred_mode != SINGLE_PREDICTION_ONLY) {
+ if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY)
for (i = 0; i < REF_CONTEXTS; i++)
- vp9_cond_prob_diff_update(&header_bc, &pc->fc.comp_ref_prob[i],
+ vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i],
VP9_MODE_UPDATE_PROB,
cpi->comp_ref_count[i]);
- }
update_mbintra_mode_probs(cpi, &header_bc);
for (i = 0; i < NUM_PARTITION_CONTEXTS; ++i) {
- vp9_prob Pnew[PARTITION_TYPES - 1];
+ vp9_prob pnew[PARTITION_TYPES - 1];
unsigned int bct[PARTITION_TYPES - 1][2];
update_mode(&header_bc, PARTITION_TYPES, vp9_partition_encodings,
- vp9_partition_tree, Pnew,
- pc->fc.partition_prob[pc->frame_type][i], bct,
+ vp9_partition_tree, pnew,
+ fc->partition_prob[cm->frame_type][i], bct,
(unsigned int *)cpi->partition_count[i]);
}
vp9_write_nmv_probs(cpi, xd->allow_high_precision_mv, &header_bc);
}
-
vp9_stop_encode(&header_bc);
+ assert(header_bc.pos <= 0xffff);
+ return header_bc.pos;
+}
- // first partition size
- assert(header_bc.pos <= 0xffff);
- vp9_wb_write_literal(&first_partition_size_wb, header_bc.pos, 16);
- *size = bytes_packed + header_bc.pos;
-
- {
- int tile_row, tile_col, total_size = 0;
- unsigned char *data_ptr = cx_data + header_bc.pos;
- TOKENEXTRA *tok[4][1 << 6], *tok_end;
-
- vpx_memset(cpi->common.above_seg_context, 0, sizeof(PARTITION_CONTEXT) *
- mi_cols_aligned_to_sb(&cpi->common));
- tok[0][0] = cpi->tok;
- for (tile_row = 0; tile_row < pc->tile_rows; tile_row++) {
- if (tile_row) {
- tok[tile_row][0] = tok[tile_row - 1][pc->tile_columns - 1] +
- cpi->tok_count[tile_row - 1][pc->tile_columns - 1];
- }
- for (tile_col = 1; tile_col < pc->tile_columns; tile_col++) {
- tok[tile_row][tile_col] = tok[tile_row][tile_col - 1] +
- cpi->tok_count[tile_row][tile_col - 1];
- }
- }
+void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) {
+ uint8_t *data = dest;
+ size_t first_part_size;
+ struct vp9_write_bit_buffer wb = {data, 0};
+ struct vp9_write_bit_buffer saved_wb;
- for (tile_row = 0; tile_row < pc->tile_rows; tile_row++) {
- vp9_get_tile_row_offsets(pc, tile_row);
- for (tile_col = 0; tile_col < pc->tile_columns; tile_col++) {
- vp9_get_tile_col_offsets(pc, tile_col);
- tok_end = tok[tile_row][tile_col] + cpi->tok_count[tile_row][tile_col];
-
- if (tile_col < pc->tile_columns - 1 || tile_row < pc->tile_rows - 1)
- vp9_start_encode(&residual_bc, data_ptr + total_size + 4);
- else
- vp9_start_encode(&residual_bc, data_ptr + total_size);
- write_modes(cpi, &residual_bc, &tok[tile_row][tile_col], tok_end);
- assert(tok[tile_row][tile_col] == tok_end);
- vp9_stop_encode(&residual_bc);
- if (tile_col < pc->tile_columns - 1 || tile_row < pc->tile_rows - 1) {
- // size of this tile
- write_be32(data_ptr + total_size, residual_bc.pos);
- total_size += 4;
- }
+ write_uncompressed_header(cpi, &wb);
+ saved_wb = wb;
+ vp9_wb_write_literal(&wb, 0, 16); // don't know in advance first part. size
- total_size += residual_bc.pos;
- }
- }
+ data += vp9_rb_bytes_written(&wb);
- *size += total_size;
- }
+ vp9_compute_update_table();
+
+#ifdef ENTROPY_STATS
+ if (pc->frame_type == INTER_FRAME)
+ active_section = 0;
+ else
+ active_section = 7;
+#endif
+
+ vp9_clear_system_state(); // __asm emms;
+
+ first_part_size = write_compressed_header(cpi, data);
+ data += first_part_size;
+ vp9_wb_write_literal(&saved_wb, first_part_size, 16);
+
+ data += encode_tiles(cpi, data);
+
+ *size = data - dest;
}
#ifdef ENTROPY_STATS