summaryrefslogtreecommitdiffstats
path: root/libvpx/vp9/encoder/vp9_variance.c
diff options
context:
space:
mode:
authorhkuang <hkuang@google.com>2014-04-09 14:20:00 -0700
committerLajos Molnar <lajos@google.com>2014-04-10 17:18:16 +0000
commit4fb68e5dd4e93c7599dc905d861de11ac39c5585 (patch)
treea9a16f0806a169ec4291bcd60bbfefccebb338d4 /libvpx/vp9/encoder/vp9_variance.c
parent2ec72e65689c948e92b826ae1e867bf369e72f13 (diff)
downloadandroid_external_libvpx-4fb68e5dd4e93c7599dc905d861de11ac39c5585.tar.gz
android_external_libvpx-4fb68e5dd4e93c7599dc905d861de11ac39c5585.tar.bz2
android_external_libvpx-4fb68e5dd4e93c7599dc905d861de11ac39c5585.zip
Roll latest libvpx to fix hang when doing adaptive playback.
VP9 decoder will hang when switching from frames with 2 tiles to 4 tiles on a 4 core device. libvpx hash:4fffefe189a9123d4b04482c26a1be5eb632b397 (cherry picked from commit 6ac915abcdb404a00d927fe6308a47fcf09d9519) Bug: 13931133 Change-Id: I24a51fd572ca7e872bc440491e2c645a20e9a736
Diffstat (limited to 'libvpx/vp9/encoder/vp9_variance.c')
-rw-r--r--libvpx/vp9/encoder/vp9_variance.c49
1 files changed, 36 insertions, 13 deletions
diff --git a/libvpx/vp9/encoder/vp9_variance.c b/libvpx/vp9/encoder/vp9_variance.c
index 8bc3850..71867a9 100644
--- a/libvpx/vp9/encoder/vp9_variance.c
+++ b/libvpx/vp9/encoder/vp9_variance.c
@@ -216,7 +216,7 @@ unsigned int vp9_sub_pixel_avg_variance64x32_c(const uint8_t *src_ptr,
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 33, 64, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 64, 64, 32, 64, vfilter);
- comp_avg_pred(temp3, second_pred, 64, 32, temp2, 64);
+ vp9_comp_avg_pred(temp3, second_pred, 64, 32, temp2, 64);
return vp9_variance64x32(temp3, 64, dst_ptr, dst_pixels_per_line, sse);
}
@@ -273,7 +273,7 @@ unsigned int vp9_sub_pixel_avg_variance32x64_c(const uint8_t *src_ptr,
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 65, 32, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 32, 32, 64, 32, vfilter);
- comp_avg_pred(temp3, second_pred, 32, 64, temp2, 32);
+ vp9_comp_avg_pred(temp3, second_pred, 32, 64, temp2, 32);
return vp9_variance32x64(temp3, 32, dst_ptr, dst_pixels_per_line, sse);
}
@@ -330,7 +330,7 @@ unsigned int vp9_sub_pixel_avg_variance32x16_c(const uint8_t *src_ptr,
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 17, 32, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 32, 32, 16, 32, vfilter);
- comp_avg_pred(temp3, second_pred, 32, 16, temp2, 32);
+ vp9_comp_avg_pred(temp3, second_pred, 32, 16, temp2, 32);
return vp9_variance32x16(temp3, 32, dst_ptr, dst_pixels_per_line, sse);
}
@@ -387,7 +387,7 @@ unsigned int vp9_sub_pixel_avg_variance16x32_c(const uint8_t *src_ptr,
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 33, 16, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 16, 16, 32, 16, vfilter);
- comp_avg_pred(temp3, second_pred, 16, 32, temp2, 16);
+ vp9_comp_avg_pred(temp3, second_pred, 16, 32, temp2, 16);
return vp9_variance16x32(temp3, 16, dst_ptr, dst_pixels_per_line, sse);
}
@@ -417,6 +417,12 @@ unsigned int vp9_variance32x32_c(const uint8_t *src_ptr,
return (var - (((int64_t)avg * avg) >> 10));
}
+void vp9_get_sse_sum_16x16_c(const uint8_t *src_ptr, int source_stride,
+ const uint8_t *ref_ptr, int ref_stride,
+ unsigned int *sse, int *sum) {
+ variance(src_ptr, source_stride, ref_ptr, ref_stride, 16, 16, sse, sum);
+}
+
unsigned int vp9_variance16x16_c(const uint8_t *src_ptr,
int source_stride,
const uint8_t *ref_ptr,
@@ -614,7 +620,7 @@ unsigned int vp9_sub_pixel_avg_variance4x4_c(const uint8_t *src_ptr,
// Now filter Verticaly
var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4, vfilter);
- comp_avg_pred(temp3, second_pred, 4, 4, temp2, 4);
+ vp9_comp_avg_pred(temp3, second_pred, 4, 4, temp2, 4);
return vp9_variance4x4(temp3, 4, dst_ptr, dst_pixels_per_line, sse);
}
@@ -658,7 +664,7 @@ unsigned int vp9_sub_pixel_avg_variance8x8_c(const uint8_t *src_ptr,
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 9, 8, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 8, 8, 8, 8, vfilter);
- comp_avg_pred(temp3, second_pred, 8, 8, temp2, 8);
+ vp9_comp_avg_pred(temp3, second_pred, 8, 8, temp2, 8);
return vp9_variance8x8(temp3, 8, dst_ptr, dst_pixels_per_line, sse);
}
@@ -703,7 +709,7 @@ unsigned int vp9_sub_pixel_avg_variance16x16_c(const uint8_t *src_ptr,
1, 17, 16, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 16, 16, 16, 16, vfilter);
- comp_avg_pred(temp3, second_pred, 16, 16, temp2, 16);
+ vp9_comp_avg_pred(temp3, second_pred, 16, 16, temp2, 16);
return vp9_variance16x16(temp3, 16, dst_ptr, dst_pixels_per_line, sse);
}
@@ -747,7 +753,7 @@ unsigned int vp9_sub_pixel_avg_variance64x64_c(const uint8_t *src_ptr,
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 65, 64, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 64, 64, 64, 64, vfilter);
- comp_avg_pred(temp3, second_pred, 64, 64, temp2, 64);
+ vp9_comp_avg_pred(temp3, second_pred, 64, 64, temp2, 64);
return vp9_variance64x64(temp3, 64, dst_ptr, dst_pixels_per_line, sse);
}
@@ -791,7 +797,7 @@ unsigned int vp9_sub_pixel_avg_variance32x32_c(const uint8_t *src_ptr,
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 33, 32, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 32, 32, 32, 32, vfilter);
- comp_avg_pred(temp3, second_pred, 32, 32, temp2, 32);
+ vp9_comp_avg_pred(temp3, second_pred, 32, 32, temp2, 32);
return vp9_variance32x32(temp3, 32, dst_ptr, dst_pixels_per_line, sse);
}
@@ -955,7 +961,7 @@ unsigned int vp9_sub_pixel_avg_variance16x8_c(const uint8_t *src_ptr,
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 9, 16, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 16, 16, 8, 16, vfilter);
- comp_avg_pred(temp3, second_pred, 16, 8, temp2, 16);
+ vp9_comp_avg_pred(temp3, second_pred, 16, 8, temp2, 16);
return vp9_variance16x8(temp3, 16, dst_ptr, dst_pixels_per_line, sse);
}
@@ -999,7 +1005,7 @@ unsigned int vp9_sub_pixel_avg_variance8x16_c(const uint8_t *src_ptr,
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 17, 8, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 8, 8, 16, 8, vfilter);
- comp_avg_pred(temp3, second_pred, 8, 16, temp2, 8);
+ vp9_comp_avg_pred(temp3, second_pred, 8, 16, temp2, 8);
return vp9_variance8x16(temp3, 8, dst_ptr, dst_pixels_per_line, sse);
}
@@ -1043,7 +1049,7 @@ unsigned int vp9_sub_pixel_avg_variance8x4_c(const uint8_t *src_ptr,
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 5, 8, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 8, 8, 4, 8, vfilter);
- comp_avg_pred(temp3, second_pred, 8, 4, temp2, 8);
+ vp9_comp_avg_pred(temp3, second_pred, 8, 4, temp2, 8);
return vp9_variance8x4(temp3, 8, dst_ptr, dst_pixels_per_line, sse);
}
@@ -1089,6 +1095,23 @@ unsigned int vp9_sub_pixel_avg_variance4x8_c(const uint8_t *src_ptr,
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 9, 4, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 8, 4, vfilter);
- comp_avg_pred(temp3, second_pred, 4, 8, temp2, 4);
+ vp9_comp_avg_pred(temp3, second_pred, 4, 8, temp2, 4);
return vp9_variance4x8(temp3, 4, dst_ptr, dst_pixels_per_line, sse);
}
+
+
+void vp9_comp_avg_pred(uint8_t *comp_pred, const uint8_t *pred, int width,
+ int height, const uint8_t *ref, int ref_stride) {
+ int i, j;
+
+ for (i = 0; i < height; i++) {
+ for (j = 0; j < width; j++) {
+ int tmp;
+ tmp = pred[j] + ref[j];
+ comp_pred[j] = (tmp + 1) >> 1;
+ }
+ comp_pred += width;
+ pred += width;
+ ref += ref_stride;
+ }
+}