aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteve Kondik <steve@cyngn.com>2016-01-18 00:49:02 -0800
committerSteve Kondik <steve@cyngn.com>2016-01-18 00:49:02 -0800
commita86ba6643fd348ab473be955a0c57714665345d9 (patch)
tree2457c1aed6ce52de181e1c9b383890734ade6c4a
parentc09118c500be0aa1e10113d18e21d38e63722e5a (diff)
parentb8fa3ff95d884060f39c5d50e811404fe94b9301 (diff)
downloadandroid_external_ffmpeg-cm-13.0-old.tar.gz
android_external_ffmpeg-cm-13.0-old.tar.bz2
android_external_ffmpeg-cm-13.0-old.zip
Merge branch 'release/2.8' of https://github.com/FFmpeg/FFmpeg into cm-13.0cm-13.0-old
-rw-r--r--Changelog43
-rw-r--r--RELEASE2
-rwxr-xr-xconfigure2
-rw-r--r--doc/Doxyfile2
-rw-r--r--libavcodec/aacdec_template.c4
-rw-r--r--libavcodec/dvdec.c2
-rw-r--r--libavcodec/g2meet.c7
-rw-r--r--libavcodec/h264_slice.c4
-rw-r--r--libavcodec/ituh263enc.c6
-rw-r--r--libavcodec/mjpegdec.c5
-rw-r--r--libavcodec/motion_est.c16
-rw-r--r--libavcodec/motion_est.h3
-rw-r--r--libavcodec/mpeg12enc.c6
-rw-r--r--libavcodec/mpeg4video.h2
-rw-r--r--libavcodec/mpeg4videoenc.c10
-rw-r--r--libavcodec/mpegvideo_enc.c11
-rw-r--r--libavcodec/mss2.c7
-rw-r--r--libavcodec/pngenc.c13
-rw-r--r--libavcodec/put_bits.h20
-rw-r--r--libavcodec/snowenc.c2
-rw-r--r--libavcodec/wavpackenc.c7
-rw-r--r--libavfilter/vf_scale.c2
-rw-r--r--libavfilter/vf_zoompan.c2
-rw-r--r--libavformat/asfenc.c5
-rw-r--r--libavformat/avformat.h2
-rw-r--r--libavformat/aviobuf.c2
-rw-r--r--libavformat/hls.c12
-rw-r--r--libavformat/ivfenc.c5
-rw-r--r--libavformat/mov.c2
-rw-r--r--libavformat/utils.c10
-rw-r--r--libavutil/x86/float_dsp.asm6
-rw-r--r--libswscale/swscale_internal.h13
-rw-r--r--libswscale/utils.c31
-rw-r--r--libswscale/vscale.c42
-rw-r--r--libswscale/yuv2rgb.c89
35 files changed, 274 insertions, 123 deletions
diff --git a/Changelog b/Changelog
index b7f32b1d20..a967eebcdb 100644
--- a/Changelog
+++ b/Changelog
@@ -1,6 +1,49 @@
Entries are sorted chronologically from oldest to youngest within each release,
releases are sorted from youngest to oldest.
+
+version 2.8.5
+- avformat/hls: Even stricter URL checks
+- avformat/hls: More strict url checks
+- avcodec/pngenc: Fix mixed up linesizes
+- avcodec/pngenc: Replace memcpy by av_image_copy()
+- swscale/vscale: Check that 2 tap filters are bilinear before using bilinear code
+- swscale: Move VScalerContext into vscale.c
+- swscale/utils: Detect and skip unneeded sws_setColorspaceDetails() calls
+- swscale/yuv2rgb: Increase YUV2RGB table headroom
+- swscale/yuv2rgb: Factor YUVRGB_TABLE_LUMA_HEADROOM out
+- avformat/hls: forbid all protocols except http(s) & file
+- avformat/aviobuf: Fix end check in put_str16()
+- avformat/asfenc: Check pts
+- avcodec/mpeg4video: Check time_incr
+- avcodec/wavpackenc: Check the number of channels
+- avcodec/wavpackenc: Headers are per channel
+- avcodec/aacdec_template: Check id_map
+- avcodec/dvdec: Fix "left shift of negative value -254"
+- avcodec/g2meet: Check for ff_els_decode_bit() failure in epic_decode_run_length()
+- avcodec/mjpegdec: Fix negative shift
+- avcodec/mss2: Check for repeat overflow
+- avformat: Add integer fps from 31 to 60 to get_std_framerate()
+- avformat/ivfenc: fix division by zero
+- avcodec/mpegvideo_enc: Clip bits_per_raw_sample within valid range
+- avfilter/vf_scale: set proper out frame color range
+- avcodec/motion_est: Fix mv_penalty table size
+- avcodec/h264_slice: Fix integer overflow in implicit weight computation
+- swscale/utils: Use normal bilinear scaler if fast cannot be used due to tiny dimensions
+- avcodec/put_bits: Always check buffer end before writing
+- mjpegdec: extend check for incompatible values of s->rgb and s->ls
+- swscale/utils: Fix intermediate format for cascaded alpha downscaling
+- avformat/mov: Update handbrake_version threshold for full mp3 parsing
+- x86/float_dsp: zero extend offset from ff_scalarproduct_float_sse
+- avfilter/vf_zoompan: do not free frame we pushed to lavfi
+- nuv: sanitize negative fps rate
+- nutdec: reject negative value_len in read_sm_data
+- xwddec: prevent overflow of lsize * avctx->height
+- nutdec: only copy the header if it exists
+- exr: fix out of bounds read in get_code
+- on2avc: limit number of bits to 30 in get_egolomb
+
+
version 2.8.4
- rawdec: only exempt BIT0 with need_copy from buffer sanity check
- mlvdec: check that index_entries exist
diff --git a/RELEASE b/RELEASE
index 2701a226a2..766d70806b 100644
--- a/RELEASE
+++ b/RELEASE
@@ -1 +1 @@
-2.8.4
+2.8.5
diff --git a/configure b/configure
index 7f9fed31a4..ec4ff083dd 100755
--- a/configure
+++ b/configure
@@ -6141,7 +6141,7 @@ cat > $TMPH <<EOF
#define FFMPEG_CONFIG_H
#define FFMPEG_CONFIGURATION "$(c_escape $FFMPEG_CONFIGURATION)"
#define FFMPEG_LICENSE "$(c_escape $license)"
-#define CONFIG_THIS_YEAR 2015
+#define CONFIG_THIS_YEAR 2016
#define FFMPEG_DATADIR "$(eval c_escape $datadir)"
#define AVCONV_DATADIR "$(eval c_escape $datadir)"
#define CC_IDENT "$(c_escape ${cc_ident:-Unknown compiler})"
diff --git a/doc/Doxyfile b/doc/Doxyfile
index 5c4b4660f6..61bd8cbedc 100644
--- a/doc/Doxyfile
+++ b/doc/Doxyfile
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
# This could be handy for archiving the generated documentation or
# if some version control system is used.
-PROJECT_NUMBER = 2.8.4
+PROJECT_NUMBER = 2.8.5
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
# in the documentation. The maximum height of the logo should not exceed 55
diff --git a/libavcodec/aacdec_template.c b/libavcodec/aacdec_template.c
index fb1ce2cb06..ec8264e5f1 100644
--- a/libavcodec/aacdec_template.c
+++ b/libavcodec/aacdec_template.c
@@ -449,6 +449,10 @@ static int output_configure(AACContext *ac,
int type = layout_map[i][0];
int id = layout_map[i][1];
id_map[type][id] = type_counts[type]++;
+ if (id_map[type][id] >= MAX_ELEM_ID) {
+ avpriv_request_sample(ac->avctx, "Remapped id too large\n");
+ return AVERROR_PATCHWELCOME;
+ }
}
// Try to sniff a reasonable channel order, otherwise output the
// channels in the order the PCE declared them.
diff --git a/libavcodec/dvdec.c b/libavcodec/dvdec.c
index 679075e6a9..1a3983b501 100644
--- a/libavcodec/dvdec.c
+++ b/libavcodec/dvdec.c
@@ -347,7 +347,7 @@ retry:
dct_mode * 22 * 64 +
(quant + ff_dv_quant_offset[class1]) * 64];
}
- dc = dc << 2;
+ dc = dc * 4;
/* convert to unsigned because 128 is not added in the
* standard IDCT */
dc += 1024;
diff --git a/libavcodec/g2meet.c b/libavcodec/g2meet.c
index ba83c67337..7d052224c9 100644
--- a/libavcodec/g2meet.c
+++ b/libavcodec/g2meet.c
@@ -631,6 +631,8 @@ static int epic_decode_run_length(ePICContext *dc, int x, int y, int tile_width,
(NN != N) << 1 |
(NNW != NW);
WWneW = ff_els_decode_bit(&dc->els_ctx, &dc->W_ctx_rung[idx]);
+ if (WWneW < 0)
+ return WWneW;
}
if (WWneW)
@@ -837,10 +839,13 @@ static int epic_decode_tile(ePICContext *dc, uint8_t *out, int tile_height,
if (y < 2 || x < 2 || x == tile_width - 1) {
run = 1;
got_pixel = epic_handle_edges(dc, x, y, curr_row, above_row, &pix);
- } else
+ } else {
got_pixel = epic_decode_run_length(dc, x, y, tile_width,
curr_row, above_row,
above2_row, &pix, &run);
+ if (got_pixel < 0)
+ return got_pixel;
+ }
if (!got_pixel && !epic_predict_from_NW_NE(dc, x, y, run,
tile_width, curr_row,
diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c
index 8be803b7fd..63eb463e8e 100644
--- a/libavcodec/h264_slice.c
+++ b/libavcodec/h264_slice.c
@@ -783,7 +783,7 @@ static void implicit_weight_table(const H264Context *h, H264SliceContext *sl, in
cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure - 1];
}
if (sl->ref_count[0] == 1 && sl->ref_count[1] == 1 && !FRAME_MBAFF(h) &&
- sl->ref_list[0][0].poc + sl->ref_list[1][0].poc == 2 * cur_poc) {
+ sl->ref_list[0][0].poc + (int64_t)sl->ref_list[1][0].poc == 2 * cur_poc) {
sl->use_weight = 0;
sl->use_weight_chroma = 0;
return;
@@ -804,7 +804,7 @@ static void implicit_weight_table(const H264Context *h, H264SliceContext *sl, in
sl->chroma_log2_weight_denom = 5;
for (ref0 = ref_start; ref0 < ref_count0; ref0++) {
- int poc0 = sl->ref_list[0][ref0].poc;
+ int64_t poc0 = sl->ref_list[0][ref0].poc;
for (ref1 = ref_start; ref1 < ref_count1; ref1++) {
int w = 32;
if (!sl->ref_list[0][ref0].parent->long_ref && !sl->ref_list[1][ref1].parent->long_ref) {
diff --git a/libavcodec/ituh263enc.c b/libavcodec/ituh263enc.c
index 03f4011bfa..d9596c9f3b 100644
--- a/libavcodec/ituh263enc.c
+++ b/libavcodec/ituh263enc.c
@@ -45,7 +45,7 @@
/**
* Table of number of bits a motion vector component needs.
*/
-static uint8_t mv_penalty[MAX_FCODE+1][MAX_MV*2+1];
+static uint8_t mv_penalty[MAX_FCODE+1][MAX_DMV*2+1];
/**
* Minimal fcode that a motion vector component would need.
@@ -678,7 +678,7 @@ static av_cold void init_mv_penalty_and_fcode(MpegEncContext *s)
int mv;
for(f_code=1; f_code<=MAX_FCODE; f_code++){
- for(mv=-MAX_MV; mv<=MAX_MV; mv++){
+ for(mv=-MAX_DMV; mv<=MAX_DMV; mv++){
int len;
if(mv==0) len= ff_mvtab[0][1];
@@ -699,7 +699,7 @@ static av_cold void init_mv_penalty_and_fcode(MpegEncContext *s)
}
}
- mv_penalty[f_code][mv+MAX_MV]= len;
+ mv_penalty[f_code][mv+MAX_DMV]= len;
}
}
diff --git a/libavcodec/mjpegdec.c b/libavcodec/mjpegdec.c
index 3f81fdfc68..ba779949a0 100644
--- a/libavcodec/mjpegdec.c
+++ b/libavcodec/mjpegdec.c
@@ -632,7 +632,8 @@ unk_pixfmt:
av_log(s->avctx, AV_LOG_DEBUG, "decode_sof0: error, len(%d) mismatch\n", len);
}
- if (s->rgb && !s->lossless && !s->ls) {
+ if ((s->rgb && !s->lossless && !s->ls) ||
+ (!s->rgb && s->ls && s->nb_components > 1)) {
av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
return AVERROR_PATCHWELCOME;
}
@@ -998,7 +999,7 @@ static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int p
return -1;
left[i] = buffer[mb_x][i] =
- mask & (pred + (dc << point_transform));
+ mask & (pred + (dc * (1 << point_transform)));
}
if (s->restart_interval && !--s->restart_count) {
diff --git a/libavcodec/motion_est.c b/libavcodec/motion_est.c
index 9f71568efd..3df8276778 100644
--- a/libavcodec/motion_est.c
+++ b/libavcodec/motion_est.c
@@ -923,7 +923,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
c->penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_cmp);
c->sub_penalty_factor= get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_sub_cmp);
c->mb_penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->mb_cmp);
- c->current_mv_penalty= c->mv_penalty[s->f_code] + MAX_MV;
+ c->current_mv_penalty= c->mv_penalty[s->f_code] + MAX_DMV;
get_limits(s, 16*mb_x, 16*mb_y);
c->skip=0;
@@ -1090,7 +1090,7 @@ int ff_pre_estimate_p_frame_motion(MpegEncContext * s,
av_assert0(s->quarter_sample==0 || s->quarter_sample==1);
c->pre_penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_pre_cmp);
- c->current_mv_penalty= c->mv_penalty[s->f_code] + MAX_MV;
+ c->current_mv_penalty= c->mv_penalty[s->f_code] + MAX_DMV;
get_limits(s, 16*mb_x, 16*mb_y);
c->skip=0;
@@ -1139,7 +1139,7 @@ static int estimate_motion_b(MpegEncContext *s, int mb_x, int mb_y,
const int shift= 1+s->quarter_sample;
const int mot_stride = s->mb_stride;
const int mot_xy = mb_y*mot_stride + mb_x;
- uint8_t * const mv_penalty= c->mv_penalty[f_code] + MAX_MV;
+ uint8_t * const mv_penalty= c->mv_penalty[f_code] + MAX_DMV;
int mv_scale;
c->penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_cmp);
@@ -1203,8 +1203,8 @@ static inline int check_bidir_mv(MpegEncContext * s,
//FIXME better f_code prediction (max mv & distance)
//FIXME pointers
MotionEstContext * const c= &s->me;
- uint8_t * const mv_penalty_f= c->mv_penalty[s->f_code] + MAX_MV; // f_code of the prev frame
- uint8_t * const mv_penalty_b= c->mv_penalty[s->b_code] + MAX_MV; // f_code of the prev frame
+ uint8_t * const mv_penalty_f= c->mv_penalty[s->f_code] + MAX_DMV; // f_code of the prev frame
+ uint8_t * const mv_penalty_b= c->mv_penalty[s->b_code] + MAX_DMV; // f_code of the prev frame
int stride= c->stride;
uint8_t *dest_y = c->scratchpad;
uint8_t *ptr;
@@ -1417,7 +1417,7 @@ static inline int direct_search(MpegEncContext * s, int mb_x, int mb_y)
int mx, my, xmin, xmax, ymin, ymax;
int16_t (*mv_table)[2]= s->b_direct_mv_table;
- c->current_mv_penalty= c->mv_penalty[1] + MAX_MV;
+ c->current_mv_penalty= c->mv_penalty[1] + MAX_DMV;
ymin= xmin=(-32)>>shift;
ymax= xmax= 31>>shift;
@@ -1553,11 +1553,11 @@ void ff_estimate_b_frame_motion(MpegEncContext * s,
if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
//FIXME mb type penalty
c->skip=0;
- c->current_mv_penalty= c->mv_penalty[s->f_code] + MAX_MV;
+ c->current_mv_penalty= c->mv_penalty[s->f_code] + MAX_DMV;
fimin= interlaced_search(s, 0,
s->b_field_mv_table[0], s->b_field_select_table[0],
s->b_forw_mv_table[xy][0], s->b_forw_mv_table[xy][1], 0);
- c->current_mv_penalty= c->mv_penalty[s->b_code] + MAX_MV;
+ c->current_mv_penalty= c->mv_penalty[s->b_code] + MAX_DMV;
bimin= interlaced_search(s, 2,
s->b_field_mv_table[1], s->b_field_select_table[1],
s->b_back_mv_table[xy][0], s->b_back_mv_table[xy][1], 0);
diff --git a/libavcodec/motion_est.h b/libavcodec/motion_est.h
index e09f705eee..6c0c674b2e 100644
--- a/libavcodec/motion_est.h
+++ b/libavcodec/motion_est.h
@@ -30,6 +30,7 @@
struct MpegEncContext;
#define MAX_MV 4096
+#define MAX_DMV (2*MAX_MV)
#define FF_ME_ZERO 0
#define FF_ME_EPZS 1
@@ -84,7 +85,7 @@ typedef struct MotionEstContext {
op_pixels_func(*hpel_avg)[4];
qpel_mc_func(*qpel_put)[16];
qpel_mc_func(*qpel_avg)[16];
- uint8_t (*mv_penalty)[MAX_MV * 2 + 1]; ///< bit amount needed to encode a MV
+ uint8_t (*mv_penalty)[MAX_DMV * 2 + 1]; ///< bit amount needed to encode a MV
uint8_t *current_mv_penalty;
int (*sub_motion_search)(struct MpegEncContext *s,
int *mx_ptr, int *my_ptr, int dmin,
diff --git a/libavcodec/mpeg12enc.c b/libavcodec/mpeg12enc.c
index 6f87117058..780f21f359 100644
--- a/libavcodec/mpeg12enc.c
+++ b/libavcodec/mpeg12enc.c
@@ -53,7 +53,7 @@ static const uint8_t svcd_scan_offset_placeholder[] = {
0x81, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
};
-static uint8_t mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
+static uint8_t mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
static uint8_t fcode_tab[MAX_MV * 2 + 1];
static uint8_t uni_mpeg1_ac_vlc_len[64 * 64 * 2];
@@ -1053,7 +1053,7 @@ av_cold void ff_mpeg1_encode_init(MpegEncContext *s)
}
for (f_code = 1; f_code <= MAX_FCODE; f_code++)
- for (mv = -MAX_MV; mv <= MAX_MV; mv++) {
+ for (mv = -MAX_DMV; mv <= MAX_DMV; mv++) {
int len;
if (mv == 0) {
@@ -1076,7 +1076,7 @@ av_cold void ff_mpeg1_encode_init(MpegEncContext *s)
2 + bit_size;
}
- mv_penalty[f_code][mv + MAX_MV] = len;
+ mv_penalty[f_code][mv + MAX_DMV] = len;
}
diff --git a/libavcodec/mpeg4video.h b/libavcodec/mpeg4video.h
index 49bc13f87a..5998c7191c 100644
--- a/libavcodec/mpeg4video.h
+++ b/libavcodec/mpeg4video.h
@@ -140,7 +140,7 @@ void ff_mpeg4_encode_mb(MpegEncContext *s,
void ff_mpeg4_pred_ac(MpegEncContext *s, int16_t *block, int n,
int dir);
void ff_set_mpeg4_time(MpegEncContext *s);
-void ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number);
+int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number);
int ff_mpeg4_decode_picture_header(Mpeg4DecContext *ctx, GetBitContext *gb);
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s);
diff --git a/libavcodec/mpeg4videoenc.c b/libavcodec/mpeg4videoenc.c
index ffa08beb87..b3ad4f9042 100644
--- a/libavcodec/mpeg4videoenc.c
+++ b/libavcodec/mpeg4videoenc.c
@@ -1086,7 +1086,7 @@ static void mpeg4_encode_vol_header(MpegEncContext *s,
}
/* write mpeg4 VOP header */
-void ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
+int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
{
int time_incr;
int time_div, time_mod;
@@ -1112,6 +1112,12 @@ void ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
time_mod = FFUMOD(s->time, s->avctx->time_base.den);
time_incr = time_div - s->last_time_base;
av_assert0(time_incr >= 0);
+
+ // This limits the frame duration to max 1 hour
+ if (time_incr > 3600) {
+ av_log(s->avctx, AV_LOG_ERROR, "time_incr %d too large\n", time_incr);
+ return AVERROR(EINVAL);
+ }
while (time_incr--)
put_bits(&s->pb, 1, 1);
@@ -1137,6 +1143,8 @@ void ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
put_bits(&s->pb, 3, s->f_code); /* fcode_for */
if (s->pict_type == AV_PICTURE_TYPE_B)
put_bits(&s->pb, 3, s->b_code); /* fcode_back */
+
+ return 0;
}
static av_cold void init_uni_dc_tab(void)
diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c
index 04be96759b..ebd5abbf8a 100644
--- a/libavcodec/mpegvideo_enc.c
+++ b/libavcodec/mpegvideo_enc.c
@@ -76,7 +76,7 @@ static int sse_mb(MpegEncContext *s);
static void denoise_dct_c(MpegEncContext *s, int16_t *block);
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
-static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
+static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
const AVOption ff_mpv_generic_options[] = {
@@ -342,6 +342,7 @@ av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
break;
}
+ avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
s->bit_rate = avctx->bit_rate;
s->width = avctx->width;
s->height = avctx->height;
@@ -3765,9 +3766,11 @@ static int encode_picture(MpegEncContext *s, int picture_number)
ff_wmv2_encode_picture_header(s, picture_number);
else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
ff_msmpeg4_encode_picture_header(s, picture_number);
- else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
- ff_mpeg4_encode_picture_header(s, picture_number);
- else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
+ else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
+ ret = ff_mpeg4_encode_picture_header(s, picture_number);
+ if (ret < 0)
+ return ret;
+ } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
ret = ff_rv10_encode_picture_header(s, picture_number);
if (ret < 0)
return ret;
diff --git a/libavcodec/mss2.c b/libavcodec/mss2.c
index 74e52af6cd..c640934986 100644
--- a/libavcodec/mss2.c
+++ b/libavcodec/mss2.c
@@ -210,8 +210,13 @@ static int decode_555(GetByteContext *gB, uint16_t *dst, int stride,
last_symbol = b << 8 | bytestream2_get_byte(gB);
else if (b > 129) {
repeat = 0;
- while (b-- > 130)
+ while (b-- > 130) {
+ if (repeat >= (INT_MAX >> 8) - 1) {
+ av_log(NULL, AV_LOG_ERROR, "repeat overflow\n");
+ return AVERROR_INVALIDDATA;
+ }
repeat = (repeat << 8) + bytestream2_get_byte(gB) + 1;
+ }
if (last_symbol == -2) {
int skip = FFMIN((unsigned)repeat, dst + w - p);
repeat -= skip;
diff --git a/libavcodec/pngenc.c b/libavcodec/pngenc.c
index f6ad830cd9..edc9011a9b 100644
--- a/libavcodec/pngenc.c
+++ b/libavcodec/pngenc.c
@@ -747,12 +747,11 @@ static int apng_encode_frame(AVCodecContext *avctx, const AVFrame *pict,
// Do disposal
if (last_fctl_chunk.dispose_op != APNG_DISPOSE_OP_PREVIOUS) {
- memcpy(diffFrame->data[0], s->last_frame->data[0],
- s->last_frame->linesize[0] * s->last_frame->height);
+ av_frame_copy(diffFrame, s->last_frame);
if (last_fctl_chunk.dispose_op == APNG_DISPOSE_OP_BACKGROUND) {
for (y = last_fctl_chunk.y_offset; y < last_fctl_chunk.y_offset + last_fctl_chunk.height; ++y) {
- size_t row_start = s->last_frame->linesize[0] * y + bpp * last_fctl_chunk.x_offset;
+ size_t row_start = diffFrame->linesize[0] * y + bpp * last_fctl_chunk.x_offset;
memset(diffFrame->data[0] + row_start, 0, bpp * last_fctl_chunk.width);
}
}
@@ -760,8 +759,7 @@ static int apng_encode_frame(AVCodecContext *avctx, const AVFrame *pict,
if (!s->prev_frame)
continue;
- memcpy(diffFrame->data[0], s->prev_frame->data[0],
- s->prev_frame->linesize[0] * s->prev_frame->height);
+ av_frame_copy(diffFrame, s->prev_frame);
}
// Do inverse blending
@@ -923,13 +921,12 @@ static int encode_apng(AVCodecContext *avctx, AVPacket *pkt,
}
// Do disposal, but not blending
- memcpy(s->prev_frame->data[0], s->last_frame->data[0],
- s->last_frame->linesize[0] * s->last_frame->height);
+ av_frame_copy(s->prev_frame, s->last_frame);
if (s->last_frame_fctl.dispose_op == APNG_DISPOSE_OP_BACKGROUND) {
uint32_t y;
uint8_t bpp = (s->bits_per_pixel + 7) >> 3;
for (y = s->last_frame_fctl.y_offset; y < s->last_frame_fctl.y_offset + s->last_frame_fctl.height; ++y) {
- size_t row_start = s->last_frame->linesize[0] * y + bpp * s->last_frame_fctl.x_offset;
+ size_t row_start = s->prev_frame->linesize[0] * y + bpp * s->last_frame_fctl.x_offset;
memset(s->prev_frame->data[0] + row_start, 0, bpp * s->last_frame_fctl.width);
}
}
diff --git a/libavcodec/put_bits.h b/libavcodec/put_bits.h
index 5b1bc8b8b7..0db8a033ad 100644
--- a/libavcodec/put_bits.h
+++ b/libavcodec/put_bits.h
@@ -163,9 +163,13 @@ static inline void put_bits(PutBitContext *s, int n, unsigned int value)
#ifdef BITSTREAM_WRITER_LE
bit_buf |= value << (32 - bit_left);
if (n >= bit_left) {
- av_assert2(s->buf_ptr+3<s->buf_end);
- AV_WL32(s->buf_ptr, bit_buf);
- s->buf_ptr += 4;
+ if (3 < s->buf_end - s->buf_ptr) {
+ AV_WL32(s->buf_ptr, bit_buf);
+ s->buf_ptr += 4;
+ } else {
+ av_log(NULL, AV_LOG_ERROR, "Internal error, put_bits buffer too small\n");
+ av_assert2(0);
+ }
bit_buf = value >> bit_left;
bit_left += 32;
}
@@ -177,9 +181,13 @@ static inline void put_bits(PutBitContext *s, int n, unsigned int value)
} else {
bit_buf <<= bit_left;
bit_buf |= value >> (n - bit_left);
- av_assert2(s->buf_ptr+3<s->buf_end);
- AV_WB32(s->buf_ptr, bit_buf);
- s->buf_ptr += 4;
+ if (3 < s->buf_end - s->buf_ptr) {
+ AV_WB32(s->buf_ptr, bit_buf);
+ s->buf_ptr += 4;
+ } else {
+ av_log(NULL, AV_LOG_ERROR, "Internal error, put_bits buffer too small\n");
+ av_assert2(0);
+ }
bit_left += 32 - n;
bit_buf = value;
}
diff --git a/libavcodec/snowenc.c b/libavcodec/snowenc.c
index 5e5dc35e86..6757971ce3 100644
--- a/libavcodec/snowenc.c
+++ b/libavcodec/snowenc.c
@@ -291,7 +291,7 @@ static int encode_q_branch(SnowContext *s, int level, int x, int y){
c->penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_cmp);
c->sub_penalty_factor= get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_sub_cmp);
c->mb_penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->mb_cmp);
- c->current_mv_penalty= c->mv_penalty[s->m.f_code=1] + MAX_MV;
+ c->current_mv_penalty= c->mv_penalty[s->m.f_code=1] + MAX_DMV;
c->xmin = - x*block_w - 16+3;
c->ymin = - y*block_w - 16+3;
diff --git a/libavcodec/wavpackenc.c b/libavcodec/wavpackenc.c
index 6091e3f39b..751d1fdf92 100644
--- a/libavcodec/wavpackenc.c
+++ b/libavcodec/wavpackenc.c
@@ -128,6 +128,11 @@ static av_cold int wavpack_encode_init(AVCodecContext *avctx)
s->avctx = avctx;
+ if (avctx->channels > 255) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid channel count: %d\n", avctx->channels);
+ return AVERROR(EINVAL);
+ }
+
if (!avctx->frame_size) {
int block_samples;
if (!(avctx->sample_rate & 1))
@@ -2878,7 +2883,7 @@ static int wavpack_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
}
buf_size = s->block_samples * avctx->channels * 8
- + 200 /* for headers */;
+ + 200 * avctx->channels /* for headers */;
if ((ret = ff_alloc_packet2(avctx, avpkt, buf_size, 0)) < 0)
return ret;
buf = avpkt->data;
diff --git a/libavfilter/vf_scale.c b/libavfilter/vf_scale.c
index a40b392c65..8780be4bae 100644
--- a/libavfilter/vf_scale.c
+++ b/libavfilter/vf_scale.c
@@ -565,6 +565,8 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
sws_setColorspaceDetails(scale->isws[1], inv_table, in_full,
table, out_full,
brightness, contrast, saturation);
+
+ av_frame_set_color_range(out, out_full ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG);
}
av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
diff --git a/libavfilter/vf_zoompan.c b/libavfilter/vf_zoompan.c
index fb6acd4da6..c65ce56602 100644
--- a/libavfilter/vf_zoompan.c
+++ b/libavfilter/vf_zoompan.c
@@ -230,9 +230,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
s->frame_count++;
ret = ff_filter_frame(outlink, out);
+ out = NULL;
if (ret < 0)
break;
- out = NULL;
sws_freeContext(s->sws);
s->sws = NULL;
diff --git a/libavformat/asfenc.c b/libavformat/asfenc.c
index 32b726bb29..94f96b4dbe 100644
--- a/libavformat/asfenc.c
+++ b/libavformat/asfenc.c
@@ -959,6 +959,11 @@ static int asf_write_packet(AVFormatContext *s, AVPacket *pkt)
pts = (pkt->pts != AV_NOPTS_VALUE) ? pkt->pts : pkt->dts;
av_assert0(pts != AV_NOPTS_VALUE);
+ if ( pts < - PREROLL_TIME
+ || pts > (INT_MAX-3)/10000LL * ASF_INDEXED_INTERVAL - PREROLL_TIME) {
+ av_log(s, AV_LOG_ERROR, "input pts %"PRId64" is invalid\n", pts);
+ return AVERROR(EINVAL);
+ }
pts *= 10000;
asf->duration = FFMAX(asf->duration, pts + pkt->duration * 10000);
diff --git a/libavformat/avformat.h b/libavformat/avformat.h
index 4068ab6a56..487f5b4b76 100644
--- a/libavformat/avformat.h
+++ b/libavformat/avformat.h
@@ -968,7 +968,7 @@ typedef struct AVStream {
/**
* Stream information used internally by av_find_stream_info()
*/
-#define MAX_STD_TIMEBASES (30*12+7+6)
+#define MAX_STD_TIMEBASES (30*12+30+3+6)
struct {
int64_t last_dts;
int64_t duration_gcd;
diff --git a/libavformat/aviobuf.c b/libavformat/aviobuf.c
index 1b3d5f5b80..4e79e3f917 100644
--- a/libavformat/aviobuf.c
+++ b/libavformat/aviobuf.c
@@ -360,6 +360,8 @@ static inline int put_str16(AVIOContext *s, const char *str, const int be)
invalid:
av_log(s, AV_LOG_ERROR, "Invaid UTF8 sequence in avio_put_str16%s\n", be ? "be" : "le");
err = AVERROR(EINVAL);
+ if (!*(q-1))
+ break;
}
if (be)
avio_wb16(s, 0);
diff --git a/libavformat/hls.c b/libavformat/hls.c
index d3cb14b591..c32ecb129d 100644
--- a/libavformat/hls.c
+++ b/libavformat/hls.c
@@ -618,6 +618,18 @@ static int open_url(HLSContext *c, URLContext **uc, const char *url, AVDictionar
{
AVDictionary *tmp = NULL;
int ret;
+ const char *proto_name = avio_find_protocol_name(url);
+
+ if (!proto_name)
+ return AVERROR_INVALIDDATA;
+
+ // only http(s) & file are allowed
+ if (!av_strstart(proto_name, "http", NULL) && !av_strstart(proto_name, "file", NULL))
+ return AVERROR_INVALIDDATA;
+ if (!strncmp(proto_name, url, strlen(proto_name)) && url[strlen(proto_name)] == ':')
+ ;
+ else if (strcmp(proto_name, "file") || !strncmp(url, "file,", 5))
+ return AVERROR_INVALIDDATA;
av_dict_copy(&tmp, c->avio_opts, 0);
av_dict_copy(&tmp, opts, 0);
diff --git a/libavformat/ivfenc.c b/libavformat/ivfenc.c
index 2053c509f9..484d87d093 100644
--- a/libavformat/ivfenc.c
+++ b/libavformat/ivfenc.c
@@ -72,8 +72,9 @@ static int ivf_write_packet(AVFormatContext *s, AVPacket *pkt)
static int ivf_write_trailer(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
- if (pb->seekable) {
- IVFEncContext *ctx = s->priv_data;
+ IVFEncContext *ctx = s->priv_data;
+
+ if (pb->seekable && ctx->frame_cnt > 1) {
size_t end = avio_tell(pb);
avio_seek(pb, 24, SEEK_SET);
diff --git a/libavformat/mov.c b/libavformat/mov.c
index 4ce4e2dddd..3668f2a95f 100644
--- a/libavformat/mov.c
+++ b/libavformat/mov.c
@@ -4535,7 +4535,7 @@ static int mov_read_header(AVFormatContext *s)
}
}
if (mov->handbrake_version &&
- mov->handbrake_version <= 1000000*0 + 1000*10 + 0 && // 0.10.0
+ mov->handbrake_version <= 1000000*0 + 1000*10 + 2 && // 0.10.2
st->codec->codec_id == AV_CODEC_ID_MP3
) {
av_log(s, AV_LOG_VERBOSE, "Forcing full parsing for mp3 stream\n");
diff --git a/libavformat/utils.c b/libavformat/utils.c
index 30567fa2ec..28c3bdf62c 100644
--- a/libavformat/utils.c
+++ b/libavformat/utils.c
@@ -2892,10 +2892,14 @@ static int get_std_framerate(int i)
return (i + 1) * 1001;
i -= 30*12;
- if (i < 7)
- return ((const int[]) { 40, 48, 50, 60, 80, 120, 240})[i] * 1001 * 12;
+ if (i < 30)
+ return (i + 31) * 1001 * 12;
+ i -= 30;
- i -= 7;
+ if (i < 3)
+ return ((const int[]) { 80, 120, 240})[i] * 1001 * 12;
+
+ i -= 3;
return ((const int[]) { 24, 30, 60, 12, 15, 48 })[i] * 1000 * 12;
}
diff --git a/libavutil/x86/float_dsp.asm b/libavutil/x86/float_dsp.asm
index 87229d4374..2800cc6cca 100644
--- a/libavutil/x86/float_dsp.asm
+++ b/libavutil/x86/float_dsp.asm
@@ -332,10 +332,10 @@ VECTOR_FMUL_REVERSE
; float scalarproduct_float_sse(const float *v1, const float *v2, int len)
INIT_XMM sse
cglobal scalarproduct_float, 3,3,2, v1, v2, offset
+ shl offsetd, 2
+ add v1q, offsetq
+ add v2q, offsetq
neg offsetq
- shl offsetq, 2
- sub v1q, offsetq
- sub v2q, offsetq
xorps xmm0, xmm0
.loop:
movaps xmm1, [v1q+offsetq]
diff --git a/libswscale/swscale_internal.h b/libswscale/swscale_internal.h
index e1591c2128..6522d58c4b 100644
--- a/libswscale/swscale_internal.h
+++ b/libswscale/swscale_internal.h
@@ -39,7 +39,8 @@
#define STR(s) AV_TOSTRING(s) // AV_STRINGIFY is too long
-#define YUVRGB_TABLE_HEADROOM 256
+#define YUVRGB_TABLE_HEADROOM 512
+#define YUVRGB_TABLE_LUMA_HEADROOM 512
#define MAX_FILTER_SIZE SWS_MAX_FILTER_SIZE
@@ -396,6 +397,7 @@ typedef struct SwsContext {
uint8_t *chrMmxextFilterCode; ///< Runtime-generated MMXEXT horizontal fast bilinear scaler code for chroma planes.
int canMMXEXTBeUsed;
+ int warned_unuseable_bilinear;
int dstY; ///< Last destination vertical line output from last slice.
int flags; ///< Flags passed by the user to select scaler algorithm, optimizations, subsampling, etc...
@@ -1002,15 +1004,6 @@ typedef struct FilterContext
int xInc;
} FilterContext;
-typedef struct VScalerContext
-{
- uint16_t *filter[2];
- int32_t *filter_pos;
- int filter_size;
- int isMMX;
- void *pfn;
-} VScalerContext;
-
// warp input lines in the form (src + width*i + j) to slice format (line[i][j])
// relative=true means first line src[x][0] otherwise first line is src[x][lum/crh Y]
int ff_init_slice_from_src(SwsSlice * s, uint8_t *src[4], int stride[4], int srcW, int lumY, int lumH, int chrY, int chrH, int relative);
diff --git a/libswscale/utils.c b/libswscale/utils.c
index 2a88dc96f7..37820f68aa 100644
--- a/libswscale/utils.c
+++ b/libswscale/utils.c
@@ -830,8 +830,6 @@ int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4],
const AVPixFmtDescriptor *desc_dst;
const AVPixFmtDescriptor *desc_src;
int need_reinit = 0;
- memmove(c->srcColorspaceTable, inv_table, sizeof(int) * 4);
- memmove(c->dstColorspaceTable, table, sizeof(int) * 4);
handle_formats(c);
desc_dst = av_pix_fmt_desc_get(c->dstFormat);
@@ -842,11 +840,24 @@ int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4],
if(!isYUV(c->srcFormat) && !isGray(c->srcFormat))
srcRange = 0;
+ if (c->srcRange != srcRange ||
+ c->dstRange != dstRange ||
+ c->brightness != brightness ||
+ c->contrast != contrast ||
+ c->saturation != saturation ||
+ memcmp(c->srcColorspaceTable, inv_table, sizeof(int) * 4) ||
+ memcmp(c->dstColorspaceTable, table, sizeof(int) * 4)
+ )
+ need_reinit = 1;
+
+ memmove(c->srcColorspaceTable, inv_table, sizeof(int) * 4);
+ memmove(c->dstColorspaceTable, table, sizeof(int) * 4);
+
+
+
c->brightness = brightness;
c->contrast = contrast;
c->saturation = saturation;
- if (c->srcRange != srcRange || c->dstRange != dstRange)
- need_reinit = 1;
c->srcRange = srcRange;
c->dstRange = dstRange;
@@ -861,6 +872,9 @@ int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4],
if (c->cascaded_context[0])
return sws_setColorspaceDetails(c->cascaded_context[0],inv_table, srcRange,table, dstRange, brightness, contrast, saturation);
+ if (!need_reinit)
+ return 0;
+
if ((isYUV(c->dstFormat) || isGray(c->dstFormat)) && (isYUV(c->srcFormat) || isGray(c->srcFormat))) {
if (!c->cascaded_context[0] &&
memcmp(c->dstColorspaceTable, c->srcColorspaceTable, sizeof(int) * 4) &&
@@ -1193,6 +1207,12 @@ av_cold int sws_init_context(SwsContext *c, SwsFilter *srcFilter,
srcW, srcH, dstW, dstH);
return AVERROR(EINVAL);
}
+ if (flags & SWS_FAST_BILINEAR) {
+ if (srcW < 8 || dstW < 8) {
+ flags ^= SWS_FAST_BILINEAR | SWS_BILINEAR;
+ c->flags = flags;
+ }
+ }
if (!dstFilter)
dstFilter = &dummyFilter;
@@ -1786,6 +1806,9 @@ fail: // FIXME replace things by appropriate error codes
int tmpH = sqrt(srcH * (int64_t)dstH);
enum AVPixelFormat tmpFormat = AV_PIX_FMT_YUV420P;
+ if (isALPHA(srcFormat))
+ tmpFormat = AV_PIX_FMT_YUVA420P;
+
if (srcW*(int64_t)srcH <= 4LL*dstW*dstH)
return AVERROR(EINVAL);
diff --git a/libswscale/vscale.c b/libswscale/vscale.c
index 3d6e81a70f..14109d8a00 100644
--- a/libswscale/vscale.c
+++ b/libswscale/vscale.c
@@ -19,6 +19,17 @@
*/
#include "swscale_internal.h"
+typedef struct VScalerContext
+{
+ uint16_t *filter[2];
+ int32_t *filter_pos;
+ int filter_size;
+ int isMMX;
+ void *pfn;
+ yuv2packedX_fn yuv2packedX;
+} VScalerContext;
+
+
static int lum_planar_vscale(SwsContext *c, SwsFilterDescriptor *desc, int sliceY, int sliceH)
{
VScalerContext *inst = desc->instance;
@@ -113,10 +124,21 @@ static int packed_vscale(SwsContext *c, SwsFilterDescriptor *desc, int sliceY, i
uint8_t **dst = desc->dst->plane[0].line + dp;
- if (c->yuv2packed1 && lum_fsize == 1 && chr_fsize <= 2) { // unscaled RGB
- int chrAlpha = chr_fsize == 1 ? 0 : chr_filter[2 * sliceY + 1];
- ((yuv2packed1_fn)inst->pfn)(c, (const int16_t*)*src0, (const int16_t**)src1, (const int16_t**)src2, (const int16_t*)(desc->alpha ? *src3 : NULL), *dst, dstW, chrAlpha, sliceY);
- } else if (c->yuv2packed2 && lum_fsize == 2 && chr_fsize == 2) { // bilinear upscale RGB
+ if (c->yuv2packed1 && lum_fsize == 1 && chr_fsize == 1) { // unscaled RGB
+ ((yuv2packed1_fn)inst->pfn)(c, (const int16_t*)*src0, (const int16_t**)src1, (const int16_t**)src2,
+ (const int16_t*)(desc->alpha ? *src3 : NULL), *dst, dstW, 0, sliceY);
+ } else if (c->yuv2packed1 && lum_fsize == 1 && chr_fsize == 2 &&
+ chr_filter[2 * sliceY + 1] + chr_filter[2 * chrSliceY] == 4096 &&
+ chr_filter[2 * sliceY + 1] <= 4096U) { // unscaled RGB
+ int chrAlpha = chr_filter[2 * sliceY + 1];
+ ((yuv2packed1_fn)inst->pfn)(c, (const int16_t*)*src0, (const int16_t**)src1, (const int16_t**)src2,
+ (const int16_t*)(desc->alpha ? *src3 : NULL), *dst, dstW, chrAlpha, sliceY);
+ } else if (c->yuv2packed2 && lum_fsize == 2 && chr_fsize == 2 &&
+ lum_filter[2 * sliceY + 1] + lum_filter[2 * sliceY] == 4096 &&
+ lum_filter[2 * sliceY + 1] <= 4096U &&
+ chr_filter[2 * chrSliceY + 1] + chr_filter[2 * chrSliceY] == 4096 &&
+ chr_filter[2 * chrSliceY + 1] <= 4096U
+ ) { // bilinear upscale RGB
int lumAlpha = lum_filter[2 * sliceY + 1];
int chrAlpha = chr_filter[2 * sliceY + 1];
c->lumMmxFilter[2] =
@@ -126,7 +148,14 @@ static int packed_vscale(SwsContext *c, SwsFilterDescriptor *desc, int sliceY, i
((yuv2packed2_fn)inst->pfn)(c, (const int16_t**)src0, (const int16_t**)src1, (const int16_t**)src2, (const int16_t**)src3,
*dst, dstW, lumAlpha, chrAlpha, sliceY);
} else { // general RGB
- ((yuv2packedX_fn)inst->pfn)(c, lum_filter + sliceY * lum_fsize,
+ if ((c->yuv2packed1 && lum_fsize == 1 && chr_fsize == 2) ||
+ (c->yuv2packed2 && lum_fsize == 2 && chr_fsize == 2)) {
+ if (!c->warned_unuseable_bilinear)
+ av_log(c, AV_LOG_INFO, "Optimized 2 tap filter code cannot be used\n");
+ c->warned_unuseable_bilinear = 1;
+ }
+
+ inst->yuv2packedX(c, lum_filter + sliceY * lum_fsize,
(const int16_t**)src0, lum_fsize, chr_filter + sliceY * chr_fsize,
(const int16_t**)src1, (const int16_t**)src2, chr_fsize, (const int16_t**)src3, *dst, dstW, sliceY);
}
@@ -277,8 +306,7 @@ void ff_init_vscale_pfn(SwsContext *c,
lumCtx->pfn = yuv2packed1;
else if (c->yuv2packed2 && c->vLumFilterSize == 2 && c->vChrFilterSize == 2)
lumCtx->pfn = yuv2packed2;
- else
- lumCtx->pfn = yuv2packedX;
+ lumCtx->yuv2packedX = yuv2packedX;
} else
lumCtx->pfn = yuv2anyX;
}
diff --git a/libswscale/yuv2rgb.c b/libswscale/yuv2rgb.c
index 1d682ba57c..e81f3f6f31 100644
--- a/libswscale/yuv2rgb.c
+++ b/libswscale/yuv2rgb.c
@@ -776,7 +776,8 @@ av_cold int ff_yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4],
uint16_t *y_table16;
uint32_t *y_table32;
int i, base, rbase, gbase, bbase, av_uninit(abase), needAlpha;
- const int yoffs = fullRange ? 384 : 326;
+ const int yoffs = (fullRange ? 384 : 326) + YUVRGB_TABLE_LUMA_HEADROOM;
+ const int table_plane_size = 1024 + 2*YUVRGB_TABLE_LUMA_HEADROOM;
int64_t crv = inv_table[0];
int64_t cbu = inv_table[1];
@@ -833,10 +834,10 @@ av_cold int ff_yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4],
return AVERROR(ENOMEM);
switch (bpp) {
case 1:
- ALLOC_YUV_TABLE(1024);
+ ALLOC_YUV_TABLE(table_plane_size);
y_table = c->yuvTable;
- yb = -(384 << 16) - oy;
- for (i = 0; i < 1024 - 110; i++) {
+ yb = -(384 << 16) - YUVRGB_TABLE_LUMA_HEADROOM*cy - oy;
+ for (i = 0; i < table_plane_size - 110; i++) {
y_table[i + 110] = av_clip_uint8((yb + 0x8000) >> 16) >> 7;
yb += cy;
}
@@ -848,60 +849,60 @@ av_cold int ff_yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4],
rbase = isRgb ? 3 : 0;
gbase = 1;
bbase = isRgb ? 0 : 3;
- ALLOC_YUV_TABLE(1024 * 3);
+ ALLOC_YUV_TABLE(table_plane_size * 3);
y_table = c->yuvTable;
- yb = -(384 << 16) - oy;
- for (i = 0; i < 1024 - 110; i++) {
+ yb = -(384 << 16) - YUVRGB_TABLE_LUMA_HEADROOM*cy - oy;
+ for (i = 0; i < table_plane_size - 110; i++) {
int yval = av_clip_uint8((yb + 0x8000) >> 16);
y_table[i + 110] = (yval >> 7) << rbase;
- y_table[i + 37 + 1024] = ((yval + 43) / 85) << gbase;
- y_table[i + 110 + 2048] = (yval >> 7) << bbase;
+ y_table[i + 37 + table_plane_size] = ((yval + 43) / 85) << gbase;
+ y_table[i + 110 + 2*table_plane_size] = (yval >> 7) << bbase;
yb += cy;
}
fill_table(c->table_rV, 1, crv, y_table + yoffs);
- fill_table(c->table_gU, 1, cgu, y_table + yoffs + 1024);
- fill_table(c->table_bU, 1, cbu, y_table + yoffs + 2048);
+ fill_table(c->table_gU, 1, cgu, y_table + yoffs + table_plane_size);
+ fill_table(c->table_bU, 1, cbu, y_table + yoffs + 2*table_plane_size);
fill_gv_table(c->table_gV, 1, cgv);
break;
case 8:
rbase = isRgb ? 5 : 0;
gbase = isRgb ? 2 : 3;
bbase = isRgb ? 0 : 6;
- ALLOC_YUV_TABLE(1024 * 3);
+ ALLOC_YUV_TABLE(table_plane_size * 3);
y_table = c->yuvTable;
- yb = -(384 << 16) - oy;
- for (i = 0; i < 1024 - 38; i++) {
+ yb = -(384 << 16) - YUVRGB_TABLE_LUMA_HEADROOM*cy - oy;
+ for (i = 0; i < table_plane_size - 38; i++) {
int yval = av_clip_uint8((yb + 0x8000) >> 16);
y_table[i + 16] = ((yval + 18) / 36) << rbase;
- y_table[i + 16 + 1024] = ((yval + 18) / 36) << gbase;
- y_table[i + 37 + 2048] = ((yval + 43) / 85) << bbase;
+ y_table[i + 16 + table_plane_size] = ((yval + 18) / 36) << gbase;
+ y_table[i + 37 + 2*table_plane_size] = ((yval + 43) / 85) << bbase;
yb += cy;
}
fill_table(c->table_rV, 1, crv, y_table + yoffs);
- fill_table(c->table_gU, 1, cgu, y_table + yoffs + 1024);
- fill_table(c->table_bU, 1, cbu, y_table + yoffs + 2048);
+ fill_table(c->table_gU, 1, cgu, y_table + yoffs + table_plane_size);
+ fill_table(c->table_bU, 1, cbu, y_table + yoffs + 2*table_plane_size);
fill_gv_table(c->table_gV, 1, cgv);
break;
case 12:
rbase = isRgb ? 8 : 0;
gbase = 4;
bbase = isRgb ? 0 : 8;
- ALLOC_YUV_TABLE(1024 * 3 * 2);
+ ALLOC_YUV_TABLE(table_plane_size * 3 * 2);
y_table16 = c->yuvTable;
- yb = -(384 << 16) - oy;
- for (i = 0; i < 1024; i++) {
+ yb = -(384 << 16) - YUVRGB_TABLE_LUMA_HEADROOM*cy - oy;
+ for (i = 0; i < table_plane_size; i++) {
uint8_t yval = av_clip_uint8((yb + 0x8000) >> 16);
y_table16[i] = (yval >> 4) << rbase;
- y_table16[i + 1024] = (yval >> 4) << gbase;
- y_table16[i + 2048] = (yval >> 4) << bbase;
+ y_table16[i + table_plane_size] = (yval >> 4) << gbase;
+ y_table16[i + 2*table_plane_size] = (yval >> 4) << bbase;
yb += cy;
}
if (isNotNe)
- for (i = 0; i < 1024 * 3; i++)
+ for (i = 0; i < table_plane_size * 3; i++)
y_table16[i] = av_bswap16(y_table16[i]);
fill_table(c->table_rV, 2, crv, y_table16 + yoffs);
- fill_table(c->table_gU, 2, cgu, y_table16 + yoffs + 1024);
- fill_table(c->table_bU, 2, cbu, y_table16 + yoffs + 2048);
+ fill_table(c->table_gU, 2, cgu, y_table16 + yoffs + table_plane_size);
+ fill_table(c->table_bU, 2, cbu, y_table16 + yoffs + 2*table_plane_size);
fill_gv_table(c->table_gV, 2, cgv);
break;
case 15:
@@ -909,30 +910,30 @@ av_cold int ff_yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4],
rbase = isRgb ? bpp - 5 : 0;
gbase = 5;
bbase = isRgb ? 0 : (bpp - 5);
- ALLOC_YUV_TABLE(1024 * 3 * 2);
+ ALLOC_YUV_TABLE(table_plane_size * 3 * 2);
y_table16 = c->yuvTable;
- yb = -(384 << 16) - oy;
- for (i = 0; i < 1024; i++) {
+ yb = -(384 << 16) - YUVRGB_TABLE_LUMA_HEADROOM*cy - oy;
+ for (i = 0; i < table_plane_size; i++) {
uint8_t yval = av_clip_uint8((yb + 0x8000) >> 16);
y_table16[i] = (yval >> 3) << rbase;
- y_table16[i + 1024] = (yval >> (18 - bpp)) << gbase;
- y_table16[i + 2048] = (yval >> 3) << bbase;
+ y_table16[i + table_plane_size] = (yval >> (18 - bpp)) << gbase;
+ y_table16[i + 2*table_plane_size] = (yval >> 3) << bbase;
yb += cy;
}
if (isNotNe)
- for (i = 0; i < 1024 * 3; i++)
+ for (i = 0; i < table_plane_size * 3; i++)
y_table16[i] = av_bswap16(y_table16[i]);
fill_table(c->table_rV, 2, crv, y_table16 + yoffs);
- fill_table(c->table_gU, 2, cgu, y_table16 + yoffs + 1024);
- fill_table(c->table_bU, 2, cbu, y_table16 + yoffs + 2048);
+ fill_table(c->table_gU, 2, cgu, y_table16 + yoffs + table_plane_size);
+ fill_table(c->table_bU, 2, cbu, y_table16 + yoffs + 2*table_plane_size);
fill_gv_table(c->table_gV, 2, cgv);
break;
case 24:
case 48:
- ALLOC_YUV_TABLE(1024);
+ ALLOC_YUV_TABLE(table_plane_size);
y_table = c->yuvTable;
- yb = -(384 << 16) - oy;
- for (i = 0; i < 1024; i++) {
+ yb = -(384 << 16) - YUVRGB_TABLE_LUMA_HEADROOM*cy - oy;
+ for (i = 0; i < table_plane_size; i++) {
y_table[i] = av_clip_uint8((yb + 0x8000) >> 16);
yb += cy;
}
@@ -951,20 +952,20 @@ av_cold int ff_yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4],
needAlpha = CONFIG_SWSCALE_ALPHA && isALPHA(c->srcFormat);
if (!needAlpha)
abase = (base + 24) & 31;
- ALLOC_YUV_TABLE(1024 * 3 * 4);
+ ALLOC_YUV_TABLE(table_plane_size * 3 * 4);
y_table32 = c->yuvTable;
- yb = -(384 << 16) - oy;
- for (i = 0; i < 1024; i++) {
+ yb = -(384 << 16) - YUVRGB_TABLE_LUMA_HEADROOM*cy - oy;
+ for (i = 0; i < table_plane_size; i++) {
unsigned yval = av_clip_uint8((yb + 0x8000) >> 16);
y_table32[i] = (yval << rbase) +
(needAlpha ? 0 : (255u << abase));
- y_table32[i + 1024] = yval << gbase;
- y_table32[i + 2048] = yval << bbase;
+ y_table32[i + table_plane_size] = yval << gbase;
+ y_table32[i + 2*table_plane_size] = yval << bbase;
yb += cy;
}
fill_table(c->table_rV, 4, crv, y_table32 + yoffs);
- fill_table(c->table_gU, 4, cgu, y_table32 + yoffs + 1024);
- fill_table(c->table_bU, 4, cbu, y_table32 + yoffs + 2048);
+ fill_table(c->table_gU, 4, cgu, y_table32 + yoffs + table_plane_size);
+ fill_table(c->table_bU, 4, cbu, y_table32 + yoffs + 2*table_plane_size);
fill_gv_table(c->table_gV, 4, cgv);
break;
default: