Merge remote-tracking branch 'qatar/master'
authorMichael Niedermayer <michaelni@gmx.at>
Thu, 24 Nov 2011 01:08:21 +0000 (02:08 +0100)
committerMichael Niedermayer <michaelni@gmx.at>
Thu, 24 Nov 2011 02:32:24 +0000 (03:32 +0100)
* qatar/master:
  libavutil: add utility functions to simplify allocation of audio buffers.
  libavutil: add planar sample formats and av_sample_fmt_is_planar()
  avconv: fix segfault at EOF with delayed pictures
  pcmdec: remove unneeded resetting of samples pointer
  avconv: remove a now unused parameter from output_packet().
  avconv: formatting fixes in output_packet()
  avconv: declare some variables in blocks where they are used
  avconv: use the same behavior when decoding audio/video/subs
  bethsoftvideo: return proper consumed size for palette packets.
  cdg: skip packets that don't contain a cdg command.
  crcenc: add flags
  avconv: use vsync 0 for AVFMT_NOTIMESTAMPS formats.
  tiffenc: add a private option for selecting compression algorithm
  md5enc: add flags
  ARM: remove needless .text/.align directives

Conflicts:
doc/APIchanges
libavcodec/tiffenc.c
libavutil/avutil.h
libavutil/samplefmt.c
libavutil/samplefmt.h
tests/ref/fate/bethsoft-vid
tests/ref/fate/cdgraphics
tests/ref/fate/film-cvid-pcm-stereo-8bit
tests/ref/fate/mpeg2-field-enc
tests/ref/fate/nuv
tests/ref/fate/tiertex-seq
tests/ref/fate/tscc-32bit
tests/ref/fate/vmnc-32bit

Merged-by: Michael Niedermayer <michaelni@gmx.at>
27 files changed:
1  2 
avconv.c
doc/APIchanges
ffmpeg.c
libavcodec/arm/dsputil_armv6.S
libavcodec/arm/dsputil_neon.S
libavcodec/arm/fft_neon.S
libavcodec/arm/fmtconvert_neon.S
libavcodec/arm/h264dsp_neon.S
libavcodec/arm/h264idct_neon.S
libavcodec/arm/int_neon.S
libavcodec/arm/mdct_neon.S
libavcodec/arm/simple_idct_arm.S
libavcodec/bethsoftvideo.c
libavcodec/pcm.c
libavcodec/tiffenc.c
libavfilter/af_aconvert.c
libavfilter/asrc_abuffer.c
libavfilter/defaults.c
libavformat/cdg.c
libavformat/crcenc.c
libavformat/framecrcenc.c
libavformat/md5enc.c
libavutil/avutil.h
libavutil/samplefmt.c
libavutil/samplefmt.h
tests/fate.mak
tests/ref/fate/nuv

diff --cc avconv.c
+++ b/avconv.c
@@@ -1819,9 -1742,10 +1818,9 @@@ static int transcode_video(InputStream 
      if (!*got_output) {
          /* no picture yet */
          av_freep(&decoded_frame);
-         return 0;
+         return ret;
      }
 -    ist->next_pts = ist->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
 -                                                 decoded_frame->pkt_dts);
 +    ist->next_pts = ist->pts = decoded_frame->best_effort_timestamp;
      if (ist->st->codec->time_base.num != 0) {
          int ticks      = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
                                             ist->st->codec->ticks_per_frame;
diff --cc doc/APIchanges
@@@ -13,12 -13,13 +13,19 @@@ libavutil:   2011-04-1
  
  API changes, most recent first:
  
 +2011-11-03 - 96949da - lavu 51.23.0
 +  Add av_strcasecmp() and av_strncasecmp() to avstring.h.
 +
 +2011-10-20 - b35e9e1 - lavu 51.22.0
 +  Add av_strtok() to avstring.h.
 +
+ 2011-xx-xx - xxxxxxx - lavu 51.18.0
+   Add av_samples_get_buffer_size(), av_samples_fill_arrays(), and
+   av_samples_alloc(), to samplefmt.h.
+ 2011-xx-xx - xxxxxxx - lavu 51.17.0
+   Add planar sample formats and av_sample_fmt_is_planar() to samplefmt.h.
  2011-xx-xx - xxxxxxx - lavc 53.21.0
    Move some AVCodecContext fields to a new private struct, AVCodecInternal,
    which is accessed from a new field, AVCodecContext.internal.
diff --cc ffmpeg.c
+++ b/ffmpeg.c
@@@ -1244,12 -1126,8 +1244,13 @@@ static void do_video_out(AVFormatContex
  
      *frame_size = 0;
  
 -    if(video_sync_method){
 -        double vdelta = sync_ipts - ost->sync_opts;
 +    format_video_sync = video_sync_method;
 +    if (format_video_sync < 0)
-         format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? 2 : 1;
++        format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? 0 :
++                            (s->oformat->flags & AVFMT_VARIABLE_FPS) ? 2 : 1;
 +
 +    if (format_video_sync) {
 +        double vdelta = sync_ipts - ost->sync_opts + duration;
          //FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
          if (vdelta < -1.1)
              nb_frames = 0;
@@@ -1536,527 -1449,454 +1537,528 @@@ static void generate_silence(uint8_t *b
      memset(buf, fill_char, size);
  }
  
 -/* pkt = NULL means EOF (needed to flush decoder buffers) */
 -static int output_packet(InputStream *ist, int ist_index,
 -                         OutputStream **ost_table, int nb_ostreams,
 -                         const AVPacket *pkt)
 +static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
  {
 -    AVFormatContext *os;
 -    OutputStream *ost;
 -    int ret, i;
 -    int got_output;
 -    AVFrame picture;
 -    void *buffer_to_free = NULL;
 -    static unsigned int samples_size= 0;
 -    AVSubtitle subtitle, *subtitle_to_free;
 -    int64_t pkt_pts = AV_NOPTS_VALUE;
 -#if CONFIG_AVFILTER
 -    int frame_available;
 -#endif
 -    float quality;
 -
 -    AVPacket avpkt;
 -    int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
 +    int i, ret;
  
 -    if(ist->next_pts == AV_NOPTS_VALUE)
 -        ist->next_pts= ist->pts;
 +    for (i = 0; i < nb_ostreams; i++) {
 +        OutputStream   *ost = &ost_table[i];
 +        AVCodecContext *enc = ost->st->codec;
 +        AVFormatContext *os = output_files[ost->file_index].ctx;
  
 -    if (pkt == NULL) {
 -        /* EOF handling */
 -        av_init_packet(&avpkt);
 -        avpkt.data = NULL;
 -        avpkt.size = 0;
 -        goto handle_eof;
 -    } else {
 -        avpkt = *pkt;
 -    }
 +        if (!ost->encoding_needed)
 +            continue;
  
 -    if(pkt->dts != AV_NOPTS_VALUE)
 -        ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
 -    if(pkt->pts != AV_NOPTS_VALUE)
 -        pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
 +        if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <=1)
 +            continue;
 +        if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE))
 +            continue;
  
 -    //while we have more to decode or while the decoder did output something on EOF
 -    while (avpkt.size > 0 || (!pkt && got_output)) {
 -        uint8_t *data_buf, *decoded_data_buf;
 -        int data_size, decoded_data_size;
 -    handle_eof:
 -        ist->pts= ist->next_pts;
 +        for(;;) {
 +            AVPacket pkt;
 +            int fifo_bytes;
 +            av_init_packet(&pkt);
 +            pkt.stream_index= ost->index;
  
 -        if(avpkt.size && avpkt.size != pkt->size &&
 -           ((!ist->showed_multi_packet_warning && verbose>0) || verbose>1)){
 -            fprintf(stderr, "Multiple frames in a packet from stream %d\n", pkt->stream_index);
 -            ist->showed_multi_packet_warning=1;
 -        }
 +            switch (ost->st->codec->codec_type) {
 +            case AVMEDIA_TYPE_AUDIO:
 +                fifo_bytes = av_fifo_size(ost->fifo);
 +                ret = 0;
 +                /* encode any samples remaining in fifo */
 +                if (fifo_bytes > 0) {
 +                    int osize = av_get_bytes_per_sample(enc->sample_fmt);
 +                    int fs_tmp = enc->frame_size;
 +
 +                    av_fifo_generic_read(ost->fifo, audio_buf, fifo_bytes, NULL);
 +                    if (enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
 +                        enc->frame_size = fifo_bytes / (osize * enc->channels);
 +                    } else { /* pad */
 +                        int frame_bytes = enc->frame_size*osize*enc->channels;
 +                        if (allocated_audio_buf_size < frame_bytes)
 +                            exit_program(1);
 +                        generate_silence(audio_buf+fifo_bytes, enc->sample_fmt, frame_bytes - fifo_bytes);
 +                    }
  
 -        /* decode the packet if needed */
 -        decoded_data_buf = NULL; /* fail safe */
 -        decoded_data_size= 0;
 -        data_buf  = avpkt.data;
 -        data_size = avpkt.size;
 -        subtitle_to_free = NULL;
 -        if (ist->decoding_needed) {
 -            switch(ist->st->codec->codec_type) {
 -            case AVMEDIA_TYPE_AUDIO:{
 -                if(pkt && samples_size < FFMAX(pkt->size*sizeof(*samples), AVCODEC_MAX_AUDIO_FRAME_SIZE)) {
 -                    samples_size = FFMAX(pkt->size*sizeof(*samples), AVCODEC_MAX_AUDIO_FRAME_SIZE);
 -                    av_free(samples);
 -                    samples= av_malloc(samples_size);
 +                    ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, (short *)audio_buf);
 +                    pkt.duration = av_rescale((int64_t)enc->frame_size*ost->st->time_base.den,
 +                                              ost->st->time_base.num, enc->sample_rate);
 +                    enc->frame_size = fs_tmp;
                  }
 -                decoded_data_size= samples_size;
 -                    /* XXX: could avoid copy if PCM 16 bits with same
 -                       endianness as CPU */
 -                ret = avcodec_decode_audio3(ist->st->codec, samples, &decoded_data_size,
 -                                            &avpkt);
 -                if (ret < 0)
 -                    return ret;
 -                avpkt.data += ret;
 -                avpkt.size -= ret;
 -                data_size   = ret;
 -                got_output  = decoded_data_size > 0;
 -                /* Some bug in mpeg audio decoder gives */
 -                /* decoded_data_size < 0, it seems they are overflows */
 -                if (!got_output) {
 -                    /* no audio frame */
 -                    continue;
 +                if (ret <= 0) {
 +                    ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, NULL);
                  }
 -                decoded_data_buf = (uint8_t *)samples;
 -                ist->next_pts += ((int64_t)AV_TIME_BASE/bps * decoded_data_size) /
 -                    (ist->st->codec->sample_rate * ist->st->codec->channels);
 -                break;}
 -            case AVMEDIA_TYPE_VIDEO:
 -                    decoded_data_size = (ist->st->codec->width * ist->st->codec->height * 3) / 2;
 -                    /* XXX: allocate picture correctly */
 -                    avcodec_get_frame_defaults(&picture);
 -                    avpkt.pts = pkt_pts;
 -                    avpkt.dts = ist->pts;
 -                    pkt_pts = AV_NOPTS_VALUE;
 -
 -                    ret = avcodec_decode_video2(ist->st->codec,
 -                                                &picture, &got_output, &avpkt);
 -                    quality = same_quality ? picture.quality : 0;
 -                    if (ret < 0)
 -                        return ret;
 -                    if (!got_output) {
 -                        /* no picture yet */
 -                        goto discard_packet;
 -                    }
 -                    ist->next_pts = ist->pts = guess_correct_pts(&ist->pts_ctx, picture.pkt_pts, picture.pkt_dts);
 -                    if (ist->st->codec->time_base.num != 0) {
 -                        int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
 -                        ist->next_pts += ((int64_t)AV_TIME_BASE *
 -                                          ist->st->codec->time_base.num * ticks) /
 -                            ist->st->codec->time_base.den;
 -                    }
 -                    avpkt.size = 0;
 -                    buffer_to_free = NULL;
 -                    pre_process_video_frame(ist, (AVPicture *)&picture, &buffer_to_free);
 -                    break;
 -            case AVMEDIA_TYPE_SUBTITLE:
 -                ret = avcodec_decode_subtitle2(ist->st->codec,
 -                                               &subtitle, &got_output, &avpkt);
 -                if (ret < 0)
 -                    return ret;
 -                if (!got_output) {
 -                    goto discard_packet;
 +                if (ret < 0) {
 +                    av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
 +                    exit_program(1);
                  }
 -                subtitle_to_free = &subtitle;
 -                avpkt.size = 0;
 -                break;
 -            default:
 -                return -1;
 -            }
 -        } else {
 -            switch(ist->st->codec->codec_type) {
 -            case AVMEDIA_TYPE_AUDIO:
 -                ist->next_pts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
 -                    ist->st->codec->sample_rate;
 +                audio_size += ret;
 +                pkt.flags |= AV_PKT_FLAG_KEY;
                  break;
              case AVMEDIA_TYPE_VIDEO:
 -                if (ist->st->codec->time_base.num != 0) {
 -                    int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
 -                    ist->next_pts += ((int64_t)AV_TIME_BASE *
 -                                      ist->st->codec->time_base.num * ticks) /
 -                        ist->st->codec->time_base.den;
 +                ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL);
 +                if (ret < 0) {
 +                    av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
 +                    exit_program(1);
 +                }
 +                video_size += ret;
 +                if(enc->coded_frame && enc->coded_frame->key_frame)
 +                    pkt.flags |= AV_PKT_FLAG_KEY;
 +                if (ost->logfile && enc->stats_out) {
 +                    fprintf(ost->logfile, "%s", enc->stats_out);
                  }
                  break;
 +            default:
 +                ret=-1;
              }
 -            ret = avpkt.size;
 -            avpkt.size = 0;
 +
 +            if (ret <= 0)
 +                break;
 +            pkt.data = bit_buffer;
 +            pkt.size = ret;
 +            if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
 +                pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
 +            write_frame(os, &pkt, ost->st->codec, ost->bitstream_filters);
          }
 +    }
 +}
  
 -#if CONFIG_AVFILTER
 -        if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
 -            for (i = 0; i < nb_ostreams; i++) {
 -                ost = ost_table[i];
 -                if (ost->input_video_filter && ost->source_index == ist_index) {
 -                    AVRational sar;
 -                    if (ist->st->sample_aspect_ratio.num)
 -                        sar = ist->st->sample_aspect_ratio;
 -                    else
 -                        sar = ist->st->codec->sample_aspect_ratio;
 -                    // add it to be filtered
 -                    av_vsrc_buffer_add_frame(ost->input_video_filter, &picture,
 -                                             ist->pts,
 -                                             sar);
 -                }
 +/*
 + * Check whether a packet from ist should be written into ost at this time
 + */
 +static int check_output_constraints(InputStream *ist, OutputStream *ost)
 +{
 +    OutputFile *of = &output_files[ost->file_index];
 +    int ist_index  = ist - input_streams;
 +
 +    if (ost->source_index != ist_index)
 +        return 0;
 +
 +    if (of->start_time && ist->pts < of->start_time)
 +        return 0;
 +
 +    if (of->recording_time != INT64_MAX &&
 +        av_compare_ts(ist->pts, AV_TIME_BASE_Q, of->recording_time + of->start_time,
 +                      (AVRational){1, 1000000}) >= 0) {
 +        ost->is_past_recording_time = 1;
 +        return 0;
 +    }
 +
 +    return 1;
 +}
 +
 +static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
 +{
 +    OutputFile *of = &output_files[ost->file_index];
 +    int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
 +    AVPicture pict;
 +    AVPacket opkt;
 +
 +    av_init_packet(&opkt);
 +
 +    if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
 +        !ost->copy_initial_nonkeyframes)
 +        return;
 +
 +    /* force the input stream PTS */
 +    if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
 +        audio_size += pkt->size;
 +    else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
 +        video_size += pkt->size;
 +        ost->sync_opts++;
 +    }
 +
 +    opkt.stream_index = ost->index;
 +    if (pkt->pts != AV_NOPTS_VALUE)
 +        opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
 +    else
 +        opkt.pts = AV_NOPTS_VALUE;
 +
 +    if (pkt->dts == AV_NOPTS_VALUE)
 +        opkt.dts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->st->time_base);
 +    else
 +        opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
 +    opkt.dts -= ost_tb_start_time;
 +
 +    opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
 +    opkt.flags    = pkt->flags;
 +
 +    //FIXME remove the following 2 lines they shall be replaced by the bitstream filters
 +    if(   ost->st->codec->codec_id != CODEC_ID_H264
 +       && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
 +       && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
 +       ) {
 +        if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
 +            opkt.destruct = av_destruct_packet;
 +    } else {
 +        opkt.data = pkt->data;
 +        opkt.size = pkt->size;
 +    }
 +    if (of->ctx->oformat->flags & AVFMT_RAWPICTURE) {
 +        /* store AVPicture in AVPacket, as expected by the output format */
 +        avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
 +        opkt.data = (uint8_t *)&pict;
 +        opkt.size = sizeof(AVPicture);
 +        opkt.flags |= AV_PKT_FLAG_KEY;
 +    }
 +
 +    write_frame(of->ctx, &opkt, ost->st->codec, ost->bitstream_filters);
 +    ost->st->codec->frame_number++;
 +    ost->frame_number++;
 +    av_free_packet(&opkt);
 +}
 +
 +static void rate_emu_sleep(InputStream *ist)
 +{
 +    if (input_files[ist->file_index].rate_emu) {
 +        int64_t pts = av_rescale(ist->pts, 1000000, AV_TIME_BASE);
 +        int64_t now = av_gettime() - ist->start;
 +        if (pts > now)
 +            usleep(pts - now);
 +    }
 +}
 +
 +static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
 +{
 +    static unsigned int samples_size = 0;
 +    int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
 +    uint8_t *decoded_data_buf  = NULL;
 +    int      decoded_data_size = 0;
 +    int i, ret;
 +
 +    if (pkt && samples_size < FFMAX(pkt->size * bps, AVCODEC_MAX_AUDIO_FRAME_SIZE)) {
 +        av_free(samples);
 +        samples_size = FFMAX(pkt->size * bps, AVCODEC_MAX_AUDIO_FRAME_SIZE);
 +        samples      = av_malloc(samples_size);
 +    }
 +    decoded_data_size = samples_size;
 +
 +    ret = avcodec_decode_audio3(ist->st->codec, samples, &decoded_data_size,
 +                                pkt);
 +    if (ret < 0)
 +        return ret;
-     pkt->data   += ret;
-     pkt->size   -= ret;
 +    *got_output  = decoded_data_size > 0;
 +
 +    /* Some bug in mpeg audio decoder gives */
 +    /* decoded_data_size < 0, it seems they are overflows */
 +    if (!*got_output) {
 +        /* no audio frame */
-         return 0;
++        return ret;
 +    }
 +
 +    decoded_data_buf = (uint8_t *)samples;
 +    ist->next_pts   += ((int64_t)AV_TIME_BASE/bps * decoded_data_size) /
 +                       (ist->st->codec->sample_rate * ist->st->codec->channels);
 +
 +    // preprocess audio (volume)
 +    if (audio_volume != 256) {
 +        switch (ist->st->codec->sample_fmt) {
 +        case AV_SAMPLE_FMT_U8:
 +        {
 +            uint8_t *volp = samples;
 +            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
 +                int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128;
 +                *volp++ = av_clip_uint8(v);
              }
 +            break;
          }
 -#endif
 -
 -        // preprocess audio (volume)
 -        if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
 -            if (audio_volume != 256) {
 -                short *volp;
 -                volp = samples;
 -                for(i=0;i<(decoded_data_size / sizeof(short));i++) {
 -                    int v = ((*volp) * audio_volume + 128) >> 8;
 -                    if (v < -32768) v = -32768;
 -                    if (v >  32767) v = 32767;
 -                    *volp++ = v;
 -                }
 +        case AV_SAMPLE_FMT_S16:
 +        {
 +            int16_t *volp = samples;
 +            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
 +                int v = ((*volp) * audio_volume + 128) >> 8;
 +                *volp++ = av_clip_int16(v);
              }
 +            break;
          }
 -
 -        /* frame rate emulation */
 -        if (rate_emu) {
 -            int64_t pts = av_rescale(ist->pts, 1000000, AV_TIME_BASE);
 -            int64_t now = av_gettime() - ist->start;
 -            if (pts > now)
 -                usleep(pts - now);
 +        case AV_SAMPLE_FMT_S32:
 +        {
 +            int32_t *volp = samples;
 +            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
 +                int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8);
 +                *volp++ = av_clipl_int32(v);
 +            }
 +            break;
          }
 -        /* if output time reached then transcode raw format,
 -           encode packets and output them */
 -        if (start_time == 0 || ist->pts >= start_time)
 -            for(i=0;i<nb_ostreams;i++) {
 -                int frame_size;
 -
 -                ost = ost_table[i];
 -                if (ost->source_index == ist_index) {
 -#if CONFIG_AVFILTER
 -                frame_available = ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO ||
 -                    !ost->output_video_filter || avfilter_poll_frame(ost->output_video_filter->inputs[0]);
 -                while (frame_available) {
 -                    AVRational ist_pts_tb;
 -                    if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ost->output_video_filter)
 -                        get_filtered_video_frame(ost->output_video_filter, &picture, &ost->picref, &ist_pts_tb);
 -                    if (ost->picref)
 -                        ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
 -#endif
 -                    os = output_files[ost->file_index];
 -
 -                    /* set the input output pts pairs */
 -                    //ost->sync_ipts = (double)(ist->pts + input_files[ist->file_index].ts_offset - start_time)/ AV_TIME_BASE;
 -
 -                    if (ost->encoding_needed) {
 -                        av_assert0(ist->decoding_needed);
 -                        switch(ost->st->codec->codec_type) {
 -                        case AVMEDIA_TYPE_AUDIO:
 -                            do_audio_out(os, ost, ist, decoded_data_buf, decoded_data_size);
 -                            break;
 -                        case AVMEDIA_TYPE_VIDEO:
 +        case AV_SAMPLE_FMT_FLT:
 +        {
 +            float *volp = samples;
 +            float scale = audio_volume / 256.f;
 +            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
 +                *volp++ *= scale;
 +            }
 +            break;
 +        }
 +        case AV_SAMPLE_FMT_DBL:
 +        {
 +            double *volp = samples;
 +            double scale = audio_volume / 256.;
 +            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
 +                *volp++ *= scale;
 +            }
 +            break;
 +        }
 +        default:
 +            av_log(NULL, AV_LOG_FATAL,
 +                   "Audio volume adjustment on sample format %s is not supported.\n",
 +                   av_get_sample_fmt_name(ist->st->codec->sample_fmt));
 +            exit_program(1);
 +        }
 +    }
 +
 +    rate_emu_sleep(ist);
 +
 +    for (i = 0; i < nb_output_streams; i++) {
 +        OutputStream *ost = &output_streams[i];
 +
 +        if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
 +            continue;
 +        do_audio_out(output_files[ost->file_index].ctx, ost, ist,
 +                     decoded_data_buf, decoded_data_size);
 +    }
-     return 0;
++    return ret;
 +}
 +
 +static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *pkt_pts, int64_t *pkt_dts)
 +{
 +    AVFrame *decoded_frame, *filtered_frame = NULL;
 +    void *buffer_to_free = NULL;
 +    int i, ret = 0;
 +    float quality = 0;
  #if CONFIG_AVFILTER
 -                            if (ost->picref->video && !ost->frame_aspect_ratio)
 -                                ost->st->codec->sample_aspect_ratio = ost->picref->video->pixel_aspect;
 +    int frame_available = 1;
  #endif
 -                            do_video_out(os, ost, ist, &picture, &frame_size,
 -                                         same_quality ? quality : ost->st->codec->global_quality);
 -                            if (vstats_filename && frame_size)
 -                                do_video_stats(os, ost, frame_size);
 -                            break;
 -                        case AVMEDIA_TYPE_SUBTITLE:
 -                            do_subtitle_out(os, ost, ist, &subtitle,
 -                                            pkt->pts);
 -                            break;
 -                        default:
 -                            abort();
 -                        }
 -                    } else {
 -                        AVFrame avframe; //FIXME/XXX remove this
 -                        AVPacket opkt;
 -                        int64_t ost_tb_start_time= av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
  
 -                        av_init_packet(&opkt);
 +    if (!(decoded_frame = avcodec_alloc_frame()))
 +        return AVERROR(ENOMEM);
 +    pkt->pts  = *pkt_pts;
 +    pkt->dts  = *pkt_dts;
 +    *pkt_pts  = AV_NOPTS_VALUE;
 +
 +    if(*pkt_dts != AV_NOPTS_VALUE && ist->st->codec->time_base.num != 0) {
 +        int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
 +        *pkt_dts += ((int64_t)AV_TIME_BASE *
 +                          ist->st->codec->time_base.num * ticks) /
 +            ist->st->codec->time_base.den;
 +    }else
 +        *pkt_dts = AV_NOPTS_VALUE;
  
 -                        if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) && !copy_initial_nonkeyframes)
 -#if !CONFIG_AVFILTER
 -                            continue;
 -#else
 -                            goto cont;
 -#endif
 +    ret = avcodec_decode_video2(ist->st->codec,
 +                                decoded_frame, got_output, pkt);
 +    if (ret < 0)
 +        goto fail;
  
 -                        /* no reencoding needed : output the packet directly */
 -                        /* force the input stream PTS */
 +    quality = same_quant ? decoded_frame->quality : 0;
 +    if (!*got_output) {
 +        /* no picture yet */
 +        av_freep(&decoded_frame);
-         return 0;
++        return ret;
 +    }
  
 -                        avcodec_get_frame_defaults(&avframe);
 -                        ost->st->codec->coded_frame= &avframe;
 -                        avframe.key_frame = pkt->flags & AV_PKT_FLAG_KEY;
 +    if(decoded_frame->best_effort_timestamp != AV_NOPTS_VALUE)
 +        ist->next_pts = ist->pts = decoded_frame->best_effort_timestamp;
  
 -                        if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
 -                            audio_size += data_size;
 -                        else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
 -                            video_size += data_size;
 -                            ost->sync_opts++;
 -                        }
 +    if (ist->st->codec->time_base.num != 0) {
 +        int ticks      = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
 +                                           ist->st->codec->ticks_per_frame;
 +        ist->next_pts += ((int64_t)AV_TIME_BASE *
 +                          ist->st->codec->time_base.num * ticks) /
 +                          ist->st->codec->time_base.den;
 +    }
 +    pkt->size = 0;
  
 -                        opkt.stream_index= ost->index;
 -                        if(pkt->pts != AV_NOPTS_VALUE)
 -                            opkt.pts= av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
 -                        else
 -                            opkt.pts= AV_NOPTS_VALUE;
 -
 -                        if (pkt->dts == AV_NOPTS_VALUE)
 -                            opkt.dts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->st->time_base);
 -                        else
 -                            opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
 -                        opkt.dts -= ost_tb_start_time;
 -
 -                        opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
 -                        opkt.flags= pkt->flags;
 -
 -                        //FIXME remove the following 2 lines they shall be replaced by the bitstream filters
 -                        if(   ost->st->codec->codec_id != CODEC_ID_H264
 -                           && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
 -                           && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
 -                           ) {
 -                            if(av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, data_buf, data_size, pkt->flags & AV_PKT_FLAG_KEY))
 -                                opkt.destruct= av_destruct_packet;
 -                        } else {
 -                            opkt.data = data_buf;
 -                            opkt.size = data_size;
 -                        }
 +    pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
  
 -                        write_frame(os, &opkt, ost->st->codec, ost->bitstream_filters);
 -                        ost->st->codec->frame_number++;
 -                        ost->frame_number++;
 -                        av_free_packet(&opkt);
 -                    }
  #if CONFIG_AVFILTER
 -                    cont:
 -                    frame_available = (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) &&
 -                                       ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]);
 -                    if (ost->picref)
 -                        avfilter_unref_buffer(ost->picref);
 -                }
 -#endif
 -                }
 -            }
 -
 -        av_free(buffer_to_free);
 -        /* XXX: allocate the subtitles in the codec ? */
 -        if (subtitle_to_free) {
 -            avsubtitle_free(subtitle_to_free);
 -            subtitle_to_free = NULL;
 +    for(i=0;i<nb_output_streams;i++) {
 +        OutputStream *ost = ost = &output_streams[i];
 +        if(check_output_constraints(ist, ost)){
 +            if (!decoded_frame->sample_aspect_ratio.num)
 +                decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
 +            decoded_frame->pts = ist->pts;
 +
 +            av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame, AV_VSRC_BUF_FLAG_OVERWRITE);
          }
      }
 - discard_packet:
 -    if (pkt == NULL) {
 -        /* EOF handling */
 +#endif
  
 -        for(i=0;i<nb_ostreams;i++) {
 -            ost = ost_table[i];
 -            if (ost->source_index == ist_index) {
 -                AVCodecContext *enc= ost->st->codec;
 -                os = output_files[ost->file_index];
 -
 -                if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <=1)
 -                    continue;
 -                if(ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE))
 -                    continue;
 -
 -                if (ost->encoding_needed) {
 -                    for(;;) {
 -                        AVPacket pkt;
 -                        int fifo_bytes;
 -                        av_init_packet(&pkt);
 -                        pkt.stream_index= ost->index;
 -
 -                        switch(ost->st->codec->codec_type) {
 -                        case AVMEDIA_TYPE_AUDIO:
 -                            fifo_bytes = av_fifo_size(ost->fifo);
 -                            ret = 0;
 -                            /* encode any samples remaining in fifo */
 -                            if (fifo_bytes > 0) {
 -                                int osize = av_get_bytes_per_sample(enc->sample_fmt);
 -                                int fs_tmp = enc->frame_size;
 -
 -                                av_fifo_generic_read(ost->fifo, audio_buf, fifo_bytes, NULL);
 -                                if (enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
 -                                    enc->frame_size = fifo_bytes / (osize * enc->channels);
 -                                } else { /* pad */
 -                                    int frame_bytes = enc->frame_size*osize*enc->channels;
 -                                    if (allocated_audio_buf_size < frame_bytes)
 -                                        exit_program(1);
 -                                    generate_silence(audio_buf+fifo_bytes, enc->sample_fmt, frame_bytes - fifo_bytes);
 -                                }
 -
 -                                ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, (short *)audio_buf);
 -                                pkt.duration = av_rescale((int64_t)enc->frame_size*ost->st->time_base.den,
 -                                                          ost->st->time_base.num, enc->sample_rate);
 -                                enc->frame_size = fs_tmp;
 -                            }
 -                            if(ret <= 0) {
 -                                ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, NULL);
 -                            }
 -                            if (ret < 0) {
 -                                fprintf(stderr, "Audio encoding failed\n");
 -                                exit_program(1);
 -                            }
 -                            audio_size += ret;
 -                            pkt.flags |= AV_PKT_FLAG_KEY;
 -                            break;
 -                        case AVMEDIA_TYPE_VIDEO:
 -                            ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL);
 -                            if (ret < 0) {
 -                                fprintf(stderr, "Video encoding failed\n");
 -                                exit_program(1);
 -                            }
 -                            video_size += ret;
 -                            if(enc->coded_frame && enc->coded_frame->key_frame)
 -                                pkt.flags |= AV_PKT_FLAG_KEY;
 -                            if (ost->logfile && enc->stats_out) {
 -                                fprintf(ost->logfile, "%s", enc->stats_out);
 -                            }
 -                            break;
 -                        default:
 -                            ret=-1;
 -                        }
 +    rate_emu_sleep(ist);
  
 -                        if(ret<=0)
 -                            break;
 -                        pkt.data= bit_buffer;
 -                        pkt.size= ret;
 -                        if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
 -                            pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
 -                        write_frame(os, &pkt, ost->st->codec, ost->bitstream_filters);
 -                    }
 +    for (i = 0; i < nb_output_streams; i++) {
 +        OutputStream *ost = &output_streams[i];
 +        int frame_size;
 +
 +        if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
 +            continue;
 +
 +#if CONFIG_AVFILTER
 +        if (ost->input_video_filter) {
 +            frame_available = avfilter_poll_frame(ost->output_video_filter->inputs[0]);
 +        }
 +        while (frame_available) {
 +            if (ost->output_video_filter) {
 +                AVRational ist_pts_tb = ost->output_video_filter->inputs[0]->time_base;
 +                if (av_buffersink_get_buffer_ref(ost->output_video_filter, &ost->picref, 0) < 0)
 +                    goto cont;
 +                if (!filtered_frame && !(filtered_frame = avcodec_alloc_frame())) {
 +                    ret = AVERROR(ENOMEM);
 +                    goto fail;
 +                }
 +                *filtered_frame= *decoded_frame; //for me_threshold
 +                if (ost->picref) {
 +                    avfilter_fill_frame_from_video_buffer_ref(filtered_frame, ost->picref);
 +                    ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
                  }
              }
 +            if (ost->picref->video && !ost->frame_aspect_ratio)
 +                ost->st->codec->sample_aspect_ratio = ost->picref->video->sample_aspect_ratio;
 +#else
 +            filtered_frame = decoded_frame;
 +#endif
 +
 +            do_video_out(output_files[ost->file_index].ctx, ost, ist, filtered_frame, &frame_size,
 +                         same_quant ? quality : ost->st->codec->global_quality);
 +            if (vstats_filename && frame_size)
 +                do_video_stats(output_files[ost->file_index].ctx, ost, frame_size);
 +#if CONFIG_AVFILTER
 +            cont:
 +            frame_available = ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]);
 +            avfilter_unref_buffer(ost->picref);
          }
 +        av_freep(&filtered_frame);
 +#endif
      }
  
 -    return 0;
 +fail:
 +    av_free(buffer_to_free);
 +    av_freep(&decoded_frame);
 +    return ret;
  }
  
 -static void print_sdp(AVFormatContext **avc, int n)
 +static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
  {
 -    char sdp[2048];
 +    AVSubtitle subtitle;
 +    int i, ret = avcodec_decode_subtitle2(ist->st->codec,
 +                                          &subtitle, got_output, pkt);
 +    if (ret < 0)
 +        return ret;
 +    if (!*got_output)
-         return 0;
-     pkt->size = 0;
++        return ret;
  
 -    av_sdp_create(avc, n, sdp, sizeof(sdp));
 -    printf("SDP:\n%s\n", sdp);
 -    fflush(stdout);
 +    rate_emu_sleep(ist);
 +
 +    for (i = 0; i < nb_output_streams; i++) {
 +        OutputStream *ost = &output_streams[i];
 +
 +        if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
 +            continue;
 +
 +        do_subtitle_out(output_files[ost->file_index].ctx, ost, ist, &subtitle, pkt->pts);
 +    }
 +
 +    avsubtitle_free(&subtitle);
-     return 0;
++    return ret;
  }
  
 -static int copy_chapters(int infile, int outfile)
 +/* pkt = NULL means EOF (needed to flush decoder buffers) */
- static int output_packet(InputStream *ist, int ist_index,
++static int output_packet(InputStream *ist,
 +                         OutputStream *ost_table, int nb_ostreams,
 +                         const AVPacket *pkt)
  {
-     OutputStream *ost;
 -    AVFormatContext *is = input_files[infile].ctx;
 -    AVFormatContext *os = output_files[outfile];
 -    int i;
 +    int ret = 0, i;
 +    int got_output;
 +    int64_t pkt_dts = AV_NOPTS_VALUE;
 +    int64_t pkt_pts = AV_NOPTS_VALUE;
  
 -    for (i = 0; i < is->nb_chapters; i++) {
 -        AVChapter *in_ch = is->chapters[i], *out_ch;
 -        int64_t ts_off   = av_rescale_q(start_time - input_files[infile].ts_offset,
 -                                      AV_TIME_BASE_Q, in_ch->time_base);
 -        int64_t rt       = (recording_time == INT64_MAX) ? INT64_MAX :
 -                           av_rescale_q(recording_time, AV_TIME_BASE_Q, in_ch->time_base);
 +    AVPacket avpkt;
  
-     if(ist->next_pts == AV_NOPTS_VALUE)
-         ist->next_pts= ist->pts;
++    if (ist->next_pts == AV_NOPTS_VALUE)
++        ist->next_pts = ist->pts;
  
 -        if (in_ch->end < ts_off)
 -            continue;
 -        if (rt != INT64_MAX && in_ch->start > rt + ts_off)
 +    if (pkt == NULL) {
 +        /* EOF handling */
 +        av_init_packet(&avpkt);
 +        avpkt.data = NULL;
 +        avpkt.size = 0;
 +        goto handle_eof;
 +    } else {
 +        avpkt = *pkt;
 +    }
 +
 +    if(pkt->dts != AV_NOPTS_VALUE){
 +        if(ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
 +            ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
 +        pkt_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
 +    }
 +    if(pkt->pts != AV_NOPTS_VALUE)
 +        pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
 +
 +    //while we have more to decode or while the decoder did output something on EOF
 +    while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
 +    handle_eof:
-         ist->pts= ist->next_pts;
 +
-         if(avpkt.size && avpkt.size != pkt->size)
++        ist->pts = ist->next_pts;
++
++        if (avpkt.size && avpkt.size != pkt->size) {
 +            av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
 +                   "Multiple frames in a packet from stream %d\n", pkt->stream_index);
-             ist->showed_multi_packet_warning=1;
++            ist->showed_multi_packet_warning = 1;
++        }
 +
 +        switch(ist->st->codec->codec_type) {
 +        case AVMEDIA_TYPE_AUDIO:
 +            ret = transcode_audio    (ist, &avpkt, &got_output);
              break;
 +        case AVMEDIA_TYPE_VIDEO:
 +            ret = transcode_video    (ist, &avpkt, &got_output, &pkt_pts, &pkt_dts);
 +            break;
 +        case AVMEDIA_TYPE_SUBTITLE:
 +            ret = transcode_subtitles(ist, &avpkt, &got_output);
 +            break;
 +        default:
 +            return -1;
 +        }
  
 -        out_ch = av_mallocz(sizeof(AVChapter));
 -        if (!out_ch)
 -            return AVERROR(ENOMEM);
 +        if (ret < 0)
 +            return ret;
++        // touch data and size only if not EOF
++        if (pkt) {
++            if(ist->st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
++                ret = avpkt.size;
++            avpkt.data += ret;
++            avpkt.size -= ret;
++        }
 +        if (!got_output) {
-             if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
-                 continue;
-             goto discard_packet;
++            continue;
 +        }
 +    }
-  discard_packet:
  
 -        out_ch->id        = in_ch->id;
 -        out_ch->time_base = in_ch->time_base;
 -        out_ch->start     = FFMAX(0,  in_ch->start - ts_off);
 -        out_ch->end       = FFMIN(rt, in_ch->end   - ts_off);
 +    /* handle stream copy */
 +    if (!ist->decoding_needed) {
 +        rate_emu_sleep(ist);
 +        switch (ist->st->codec->codec_type) {
 +        case AVMEDIA_TYPE_AUDIO:
 +            ist->next_pts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
 +                             ist->st->codec->sample_rate;
 +            break;
 +        case AVMEDIA_TYPE_VIDEO:
 +            if (ist->st->codec->time_base.num != 0) {
 +                int ticks = ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
 +                ist->next_pts += ((int64_t)AV_TIME_BASE *
 +                                  ist->st->codec->time_base.num * ticks) /
 +                                  ist->st->codec->time_base.den;
 +            }
 +            break;
 +        }
 +    }
 +    for (i = 0; pkt && i < nb_ostreams; i++) {
-         ost = &ost_table[i];
++        OutputStream *ost = &ost_table[i];
  
 -        if (metadata_chapters_autocopy)
 -            av_dict_copy(&out_ch->metadata, in_ch->metadata, 0);
 +        if (!check_output_constraints(ist, ost) || ost->encoding_needed)
 +            continue;
  
 -        os->nb_chapters++;
 -        os->chapters = av_realloc(os->chapters, sizeof(AVChapter)*os->nb_chapters);
 -        if (!os->chapters)
 -            return AVERROR(ENOMEM);
 -        os->chapters[os->nb_chapters - 1] = out_ch;
 +        do_streamcopy(ist, ost, pkt);
      }
 +
      return 0;
  }
  
@@@ -2752,11 -2637,19 +2754,11 @@@ static int transcode(OutputFile *output
              }
          }
  
 -        /* finish if recording time exhausted */
 -        if (recording_time != INT64_MAX &&
 -            av_compare_ts(pkt.pts, ist->st->time_base, recording_time + start_time, (AVRational){1, 1000000}) >= 0) {
 -            ist->is_past_recording_time = 1;
 -            goto discard_packet;
 -        }
 -
          //fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
-         if (output_packet(ist, ist_index, output_streams, nb_output_streams, &pkt) < 0) {
 -        if (output_packet(ist, ist_index, ost_table, nb_ostreams, &pkt) < 0) {
++        if (output_packet(ist, output_streams, nb_output_streams, &pkt) < 0) {
  
 -            if (verbose >= 0)
 -                fprintf(stderr, "Error while decoding stream #%d.%d\n",
 -                        ist->file_index, ist->st->index);
 +            av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
 +                   ist->file_index, ist->st->index);
              if (exit_on_error)
                  exit_program(1);
              av_free_packet(&pkt);
      for (i = 0; i < nb_input_streams; i++) {
          ist = &input_streams[i];
          if (ist->decoding_needed) {
-             output_packet(ist, i, output_streams, nb_output_streams, NULL);
 -            output_packet(ist, i, ost_table, nb_ostreams, NULL);
++            output_packet(ist, output_streams, nb_output_streams, NULL);
          }
      }
 +    flush_encoders(output_streams, nb_output_streams);
  
      term_exit();
  
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -51,11 -50,15 +51,16 @@@ static int set_palette(AVFrame * frame
  {
      uint32_t * palette = (uint32_t *)frame->data[1];
      int a;
+     if (buf_size < 256*3)
+         return AVERROR_INVALIDDATA;
      for(a = 0; a < 256; a++){
 -        palette[a] = AV_RB24(&palette_buffer[a * 3]) * 4;
 +        palette[a] = 0xFF << 24 | AV_RB24(&palette_buffer[a * 3]) * 4;
 +        palette[a] |= palette[a] >> 6 & 0x30303;
      }
      frame->palette_has_changed = 1;
+     return 256*3;
  }
  
  static int bethsoftvid_decode_frame(AVCodecContext *avctx,
Simple merge
@@@ -453,11 -447,25 +455,26 @@@ fail
      return ret;
  }
  
- static const AVOption options[]={
- {"dpi", "set the image resolution (in dpi)", offsetof(TiffEncoderContext, dpi), AV_OPT_TYPE_INT, {.dbl = 72}, 1, 0x10000, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_ENCODING_PARAM},
- {NULL}
+ #define OFFSET(x) offsetof(TiffEncoderContext, x)
+ #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
+ static const AVOption options[] = {
++    {"dpi", "set the image resolution (in dpi)", OFFSET(dpi), AV_OPT_TYPE_INT, {.dbl = 72}, 1, 0x10000, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_ENCODING_PARAM},
+     { "compression_algo", NULL, OFFSET(compr), AV_OPT_TYPE_INT, {TIFF_PACKBITS}, TIFF_RAW, TIFF_DEFLATE, VE, "compression_algo" },
+     { "packbits", NULL, 0, AV_OPT_TYPE_CONST, {TIFF_PACKBITS}, 0, 0, VE, "compression_algo" },
+     { "raw",      NULL, 0, AV_OPT_TYPE_CONST, {TIFF_RAW},      0, 0, VE, "compression_algo" },
+     { "lzw",      NULL, 0, AV_OPT_TYPE_CONST, {TIFF_LZW},      0, 0, VE, "compression_algo" },
+ #if CONFIG_ZLIB
+     { "deflate",  NULL, 0, AV_OPT_TYPE_CONST, {TIFF_DEFLATE},  0, 0, VE, "compression_algo" },
+ #endif
+     { NULL },
+ };
+ static const AVClass tiffenc_class = {
+     .class_name = "TIFF encoder",
+     .item_name  = av_default_item_name,
+     .option     = options,
+     .version    = LIBAVUTIL_VERSION_INT,
  };
- static const AVClass class = { "tiff", av_default_item_name, options, LIBAVUTIL_VERSION_INT };
  
  AVCodec ff_tiff_encoder = {
      .name           = "tiff",
                                PIX_FMT_MONOBLACK, PIX_FMT_MONOWHITE,
                                PIX_FMT_YUV420P, PIX_FMT_YUV422P,
                                PIX_FMT_YUV444P, PIX_FMT_YUV410P,
 -                              PIX_FMT_YUV411P,
 +                              PIX_FMT_YUV411P, PIX_FMT_RGB48LE,
                                PIX_FMT_NONE},
      .long_name = NULL_IF_CONFIG_SMALL("TIFF image"),
-     .priv_class= &class,
+     .priv_class     = &tiffenc_class,
  };
index 14a9fdc,0000000..e3c7f8c
mode 100644,000000..100644
--- /dev/null
@@@ -1,418 -1,0 +1,418 @@@
-                              inlink->format, inlink->planar, 16) < 0)
 +/*
 + * Copyright (c) 2010 S.N. Hemanth Meenakshisundaram <smeenaks@ucsd.edu>
 + * Copyright (c) 2011 Stefano Sabatini
 + * Copyright (c) 2011 Mina Nagy Zaki
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +/**
 + * @file
 + * sample format and channel layout conversion audio filter
 + * based on code in libavcodec/resample.c by Fabrice Bellard and
 + * libavcodec/audioconvert.c by Michael Niedermayer
 + */
 +
 +#include "libavutil/audioconvert.h"
 +#include "libavutil/avstring.h"
 +#include "libavcodec/audioconvert.h"
 +#include "avfilter.h"
 +#include "internal.h"
 +
 +typedef struct {
 +    enum AVSampleFormat  out_sample_fmt,  in_sample_fmt;   ///< in/out sample formats
 +    int64_t              out_chlayout,    in_chlayout;     ///< in/out channel layout
 +    int                  out_nb_channels, in_nb_channels;  ///< number of in/output channels
 +    enum AVFilterPacking out_packing_fmt, in_packing_fmt;  ///< output packing format
 +
 +    int max_nb_samples;                     ///< maximum number of buffered samples
 +    AVFilterBufferRef *mix_samplesref;      ///< rematrixed buffer
 +    AVFilterBufferRef *out_samplesref;      ///< output buffer after required conversions
 +
 +    uint8_t *in_mix[8], *out_mix[8];        ///< input/output for rematrixing functions
 +    uint8_t *packed_data[8];                ///< pointers for packing conversion
 +    int out_strides[8], in_strides[8];      ///< input/output strides for av_audio_convert
 +    uint8_t **in_conv, **out_conv;          ///< input/output for av_audio_convert
 +
 +    AVAudioConvert *audioconvert_ctx;       ///< context for conversion to output sample format
 +
 +    void (*convert_chlayout)();             ///< function to do the requested rematrixing
 +} AConvertContext;
 +
 +#define REMATRIX_FUNC_SIG(NAME) static void REMATRIX_FUNC_NAME(NAME) \
 +    (FMT_TYPE *outp[], FMT_TYPE *inp[], int nb_samples, AConvertContext *aconvert)
 +
 +#define FMT_TYPE uint8_t
 +#define REMATRIX_FUNC_NAME(NAME) NAME ## _u8
 +#include "af_aconvert_rematrix.c"
 +
 +#define FMT_TYPE int16_t
 +#define REMATRIX_FUNC_NAME(NAME) NAME ## _s16
 +#include "af_aconvert_rematrix.c"
 +
 +#define FMT_TYPE int32_t
 +#define REMATRIX_FUNC_NAME(NAME) NAME ## _s32
 +#include "af_aconvert_rematrix.c"
 +
 +#define FLOATING
 +
 +#define FMT_TYPE float
 +#define REMATRIX_FUNC_NAME(NAME) NAME ## _flt
 +#include "af_aconvert_rematrix.c"
 +
 +#define FMT_TYPE double
 +#define REMATRIX_FUNC_NAME(NAME) NAME ## _dbl
 +#include "af_aconvert_rematrix.c"
 +
 +#define FMT_TYPE uint8_t
 +#define REMATRIX_FUNC_NAME(NAME) NAME
 +REMATRIX_FUNC_SIG(stereo_remix_planar)
 +{
 +    int size = av_get_bytes_per_sample(aconvert->in_sample_fmt) * nb_samples;
 +
 +    memcpy(outp[0], inp[0], size);
 +    memcpy(outp[1], inp[aconvert->in_nb_channels == 1 ? 0 : 1], size);
 +}
 +
 +#define REGISTER_FUNC_PACKING(INCHLAYOUT, OUTCHLAYOUT, FUNC, PACKING)   \
 +    {INCHLAYOUT, OUTCHLAYOUT, PACKING, AV_SAMPLE_FMT_U8,  FUNC##_u8},   \
 +    {INCHLAYOUT, OUTCHLAYOUT, PACKING, AV_SAMPLE_FMT_S16, FUNC##_s16},  \
 +    {INCHLAYOUT, OUTCHLAYOUT, PACKING, AV_SAMPLE_FMT_S32, FUNC##_s32},  \
 +    {INCHLAYOUT, OUTCHLAYOUT, PACKING, AV_SAMPLE_FMT_FLT, FUNC##_flt},  \
 +    {INCHLAYOUT, OUTCHLAYOUT, PACKING, AV_SAMPLE_FMT_DBL, FUNC##_dbl},
 +
 +#define REGISTER_FUNC(INCHLAYOUT, OUTCHLAYOUT, FUNC)                                \
 +    REGISTER_FUNC_PACKING(INCHLAYOUT, OUTCHLAYOUT, FUNC##_packed, AVFILTER_PACKED)  \
 +    REGISTER_FUNC_PACKING(INCHLAYOUT, OUTCHLAYOUT, FUNC##_planar, AVFILTER_PLANAR)
 +
 +static const struct RematrixFunctionInfo {
 +    int64_t in_chlayout, out_chlayout;
 +    int planar, sfmt;
 +    void (*func)();
 +} rematrix_funcs[] = {
 +    REGISTER_FUNC        (AV_CH_LAYOUT_STEREO,  AV_CH_LAYOUT_5POINT1, stereo_to_surround_5p1)
 +    REGISTER_FUNC        (AV_CH_LAYOUT_5POINT1, AV_CH_LAYOUT_STEREO,  surround_5p1_to_stereo)
 +    REGISTER_FUNC_PACKING(AV_CH_LAYOUT_STEREO,  AV_CH_LAYOUT_MONO,    stereo_to_mono_packed, AVFILTER_PACKED)
 +    REGISTER_FUNC_PACKING(AV_CH_LAYOUT_MONO,    AV_CH_LAYOUT_STEREO,  mono_to_stereo_packed, AVFILTER_PACKED)
 +    REGISTER_FUNC        (0,                    AV_CH_LAYOUT_MONO,    mono_downmix)
 +    REGISTER_FUNC_PACKING(0,                    AV_CH_LAYOUT_STEREO,  stereo_downmix_packed, AVFILTER_PACKED)
 +
 +    // This function works for all sample formats
 +    {0, AV_CH_LAYOUT_STEREO, AVFILTER_PLANAR, -1, stereo_remix_planar}
 +};
 +
 +static av_cold int init(AVFilterContext *ctx, const char *args0, void *opaque)
 +{
 +    AConvertContext *aconvert = ctx->priv;
 +    char *arg, *ptr = NULL;
 +    int ret = 0;
 +    char *args = av_strdup(args0);
 +
 +    aconvert->out_sample_fmt  = AV_SAMPLE_FMT_NONE;
 +    aconvert->out_chlayout    = 0;
 +    aconvert->out_packing_fmt = -1;
 +
 +    if ((arg = av_strtok(args, ":", &ptr)) && strcmp(arg, "auto")) {
 +        if ((ret = ff_parse_sample_format(&aconvert->out_sample_fmt, arg, ctx)) < 0)
 +            goto end;
 +    }
 +    if ((arg = av_strtok(NULL, ":", &ptr)) && strcmp(arg, "auto")) {
 +        if ((ret = ff_parse_channel_layout(&aconvert->out_chlayout, arg, ctx)) < 0)
 +            goto end;
 +    }
 +    if ((arg = av_strtok(NULL, ":", &ptr)) && strcmp(arg, "auto")) {
 +        if ((ret = ff_parse_packing_format((int *)&aconvert->out_packing_fmt, arg, ctx)) < 0)
 +            goto end;
 +    }
 +
 +end:
 +    av_freep(&args);
 +    return ret;
 +}
 +
 +static av_cold void uninit(AVFilterContext *ctx)
 +{
 +    AConvertContext *aconvert = ctx->priv;
 +    avfilter_unref_buffer(aconvert->mix_samplesref);
 +    avfilter_unref_buffer(aconvert->out_samplesref);
 +    if (aconvert->audioconvert_ctx)
 +        av_audio_convert_free(aconvert->audioconvert_ctx);
 +}
 +
 +static int query_formats(AVFilterContext *ctx)
 +{
 +    AVFilterFormats *formats = NULL;
 +    AConvertContext *aconvert = ctx->priv;
 +    AVFilterLink *inlink  = ctx->inputs[0];
 +    AVFilterLink *outlink = ctx->outputs[0];
 +
 +    avfilter_formats_ref(avfilter_make_all_formats(AVMEDIA_TYPE_AUDIO),
 +                         &inlink->out_formats);
 +    if (aconvert->out_sample_fmt != AV_SAMPLE_FMT_NONE) {
 +        formats = NULL;
 +        avfilter_add_format(&formats, aconvert->out_sample_fmt);
 +        avfilter_formats_ref(formats, &outlink->in_formats);
 +    } else
 +        avfilter_formats_ref(avfilter_make_all_formats(AVMEDIA_TYPE_AUDIO),
 +                             &outlink->in_formats);
 +
 +    avfilter_formats_ref(avfilter_make_all_channel_layouts(),
 +                         &inlink->out_chlayouts);
 +    if (aconvert->out_chlayout != 0) {
 +        formats = NULL;
 +        avfilter_add_format(&formats, aconvert->out_chlayout);
 +        avfilter_formats_ref(formats, &outlink->in_chlayouts);
 +    } else
 +        avfilter_formats_ref(avfilter_make_all_channel_layouts(),
 +                             &outlink->in_chlayouts);
 +
 +    avfilter_formats_ref(avfilter_make_all_packing_formats(),
 +                         &inlink->out_packing);
 +    if (aconvert->out_packing_fmt != -1) {
 +        formats = NULL;
 +        avfilter_add_format(&formats, aconvert->out_packing_fmt);
 +        avfilter_formats_ref(formats, &outlink->in_packing);
 +    } else
 +        avfilter_formats_ref(avfilter_make_all_packing_formats(),
 +                             &outlink->in_packing);
 +
 +    return 0;
 +}
 +
 +static int config_output(AVFilterLink *outlink)
 +{
 +    AVFilterLink *inlink = outlink->src->inputs[0];
 +    AConvertContext *aconvert = outlink->src->priv;
 +    char buf1[64], buf2[64];
 +
 +    aconvert->in_sample_fmt  = inlink->format;
 +    aconvert->in_packing_fmt = inlink->planar;
 +    if (aconvert->out_packing_fmt == -1)
 +        aconvert->out_packing_fmt = outlink->planar;
 +    aconvert->in_chlayout    = inlink->channel_layout;
 +    aconvert->in_nb_channels =
 +        av_get_channel_layout_nb_channels(inlink->channel_layout);
 +
 +    /* if not specified in args, use the format and layout of the output */
 +    if (aconvert->out_sample_fmt == AV_SAMPLE_FMT_NONE)
 +        aconvert->out_sample_fmt = outlink->format;
 +    if (aconvert->out_chlayout   == 0)
 +        aconvert->out_chlayout   = outlink->channel_layout;
 +    aconvert->out_nb_channels  =
 +        av_get_channel_layout_nb_channels(outlink->channel_layout);
 +
 +    av_get_channel_layout_string(buf1, sizeof(buf1),
 +                                 -1, inlink ->channel_layout);
 +    av_get_channel_layout_string(buf2, sizeof(buf2),
 +                                 -1, outlink->channel_layout);
 +    av_log(outlink->src, AV_LOG_INFO,
 +           "fmt:%s cl:%s planar:%i -> fmt:%s cl:%s planar:%i\n",
 +           av_get_sample_fmt_name(inlink ->format), buf1, inlink ->planar,
 +           av_get_sample_fmt_name(outlink->format), buf2, outlink->planar);
 +
 +    /* compute which channel layout conversion to use */
 +    if (inlink->channel_layout != outlink->channel_layout) {
 +        int i;
 +        for (i = 0; i < sizeof(rematrix_funcs); i++) {
 +            const struct RematrixFunctionInfo *f = &rematrix_funcs[i];
 +            if ((f->in_chlayout  == 0 || f->in_chlayout  == inlink ->channel_layout) &&
 +                (f->out_chlayout == 0 || f->out_chlayout == outlink->channel_layout) &&
 +                (f->planar == -1 || f->planar == inlink->planar) &&
 +                (f->sfmt   == -1 || f->sfmt   == inlink->format)
 +               ) {
 +                aconvert->convert_chlayout = f->func;
 +                break;
 +            }
 +        }
 +        if (!aconvert->convert_chlayout) {
 +            av_log(outlink->src, AV_LOG_ERROR,
 +                   "Unsupported channel layout conversion '%s -> %s' requested!\n",
 +                   buf1, buf2);
 +            return AVERROR(EINVAL);
 +        }
 +    }
 +
 +    return 0;
 +}
 +
 +static int init_buffers(AVFilterLink *inlink, int nb_samples)
 +{
 +    AConvertContext *aconvert = inlink->dst->priv;
 +    AVFilterLink * const outlink = inlink->dst->outputs[0];
 +    int i, packed_stride = 0;
 +    const unsigned
 +        packing_conv = inlink->planar != outlink->planar &&
 +                       aconvert->out_nb_channels != 1,
 +        format_conv  = inlink->format != outlink->format;
 +    int nb_channels  = aconvert->out_nb_channels;
 +
 +    uninit(inlink->dst);
 +    aconvert->max_nb_samples = nb_samples;
 +
 +    if (aconvert->convert_chlayout) {
 +        /* allocate buffer for storing intermediary mixing samplesref */
 +        uint8_t *data[8];
 +        int linesize[8];
 +        int nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout);
 +
 +        if (av_samples_alloc(data, linesize, nb_channels, nb_samples,
++                             inlink->format, 16) < 0)
 +            goto fail_no_mem;
 +        aconvert->mix_samplesref =
 +            avfilter_get_audio_buffer_ref_from_arrays(data, linesize, AV_PERM_WRITE,
 +                                                      nb_samples, inlink->format,
 +                                                      outlink->channel_layout,
 +                                                      inlink->planar);
 +        if (!aconvert->mix_samplesref)
 +            goto fail_no_mem;
 +    }
 +
 +    // if there's a format/packing conversion we need an audio_convert context
 +    if (format_conv || packing_conv) {
 +        aconvert->out_samplesref =
 +            avfilter_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
 +        if (!aconvert->out_samplesref)
 +            goto fail_no_mem;
 +
 +        aconvert->in_strides [0] = av_get_bytes_per_sample(inlink ->format);
 +        aconvert->out_strides[0] = av_get_bytes_per_sample(outlink->format);
 +
 +        aconvert->out_conv = aconvert->out_samplesref->data;
 +        if (aconvert->mix_samplesref)
 +            aconvert->in_conv = aconvert->mix_samplesref->data;
 +
 +        if (packing_conv) {
 +            // packed -> planar
 +            if (outlink->planar == AVFILTER_PLANAR) {
 +                if (aconvert->mix_samplesref)
 +                    aconvert->packed_data[0] = aconvert->mix_samplesref->data[0];
 +                aconvert->in_conv         = aconvert->packed_data;
 +                packed_stride             = aconvert->in_strides[0];
 +                aconvert->in_strides[0]  *= nb_channels;
 +            // planar -> packed
 +            } else {
 +                aconvert->packed_data[0]  = aconvert->out_samplesref->data[0];
 +                aconvert->out_conv        = aconvert->packed_data;
 +                packed_stride             = aconvert->out_strides[0];
 +                aconvert->out_strides[0] *= nb_channels;
 +            }
 +        } else if (outlink->planar == AVFILTER_PACKED) {
 +            /* If there's no packing conversion, and the stream is packed
 +             * then we treat the entire stream as one big channel
 +             */
 +            nb_channels = 1;
 +        }
 +
 +        for (i = 1; i < nb_channels; i++) {
 +            aconvert->packed_data[i] = aconvert->packed_data[i-1] + packed_stride;
 +            aconvert->in_strides[i]  = aconvert->in_strides[0];
 +            aconvert->out_strides[i] = aconvert->out_strides[0];
 +        }
 +
 +        aconvert->audioconvert_ctx =
 +                av_audio_convert_alloc(outlink->format, nb_channels,
 +                                       inlink->format,  nb_channels, NULL, 0);
 +        if (!aconvert->audioconvert_ctx)
 +            goto fail_no_mem;
 +    }
 +
 +    return 0;
 +
 +fail_no_mem:
 +    av_log(inlink->dst, AV_LOG_ERROR, "Could not allocate memory.\n");
 +    return AVERROR(ENOMEM);
 +}
 +
 +static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamplesref)
 +{
 +    AConvertContext *aconvert = inlink->dst->priv;
 +    AVFilterBufferRef *curbuf = insamplesref;
 +    AVFilterLink * const outlink = inlink->dst->outputs[0];
 +    int chan_mult;
 +
 +    /* in/reinint the internal buffers if this is the first buffer
 +     * provided or it is needed to use a bigger one */
 +    if (!aconvert->max_nb_samples ||
 +        (curbuf->audio->nb_samples > aconvert->max_nb_samples))
 +        if (init_buffers(inlink, curbuf->audio->nb_samples) < 0) {
 +            av_log(inlink->dst, AV_LOG_ERROR, "Could not initialize buffers.\n");
 +            return;
 +        }
 +
 +    /* if channel mixing is required */
 +    if (aconvert->mix_samplesref) {
 +        memcpy(aconvert->in_mix,  curbuf->data, sizeof(aconvert->in_mix));
 +        memcpy(aconvert->out_mix, aconvert->mix_samplesref->data, sizeof(aconvert->out_mix));
 +        aconvert->convert_chlayout(aconvert->out_mix,
 +                                   aconvert->in_mix,
 +                                   curbuf->audio->nb_samples,
 +                                   aconvert);
 +        curbuf = aconvert->mix_samplesref;
 +    }
 +
 +    if (aconvert->audioconvert_ctx) {
 +        if (!aconvert->mix_samplesref) {
 +            if (aconvert->in_conv == aconvert->packed_data) {
 +                int i, packed_stride = av_get_bytes_per_sample(inlink->format);
 +                aconvert->packed_data[0] = curbuf->data[0];
 +                for (i = 1; i < aconvert->out_nb_channels; i++)
 +                    aconvert->packed_data[i] = aconvert->packed_data[i-1] + packed_stride;
 +            } else {
 +                aconvert->in_conv = curbuf->data;
 +            }
 +        }
 +
 +        chan_mult = inlink->planar == outlink->planar && inlink->planar == 0 ?
 +            aconvert->out_nb_channels : 1;
 +
 +        av_audio_convert(aconvert->audioconvert_ctx,
 +                         (void * const *) aconvert->out_conv,
 +                         aconvert->out_strides,
 +                         (const void * const *) aconvert->in_conv,
 +                         aconvert->in_strides,
 +                         curbuf->audio->nb_samples * chan_mult);
 +
 +        curbuf = aconvert->out_samplesref;
 +    }
 +
 +    avfilter_copy_buffer_ref_props(curbuf, insamplesref);
 +    curbuf->audio->channel_layout = outlink->channel_layout;
 +    curbuf->audio->planar         = outlink->planar;
 +
 +    avfilter_filter_samples(inlink->dst->outputs[0],
 +                            avfilter_ref_buffer(curbuf, ~0));
 +    avfilter_unref_buffer(insamplesref);
 +}
 +
 +AVFilter avfilter_af_aconvert = {
 +    .name          = "aconvert",
 +    .description   = NULL_IF_CONFIG_SMALL("Convert the input audio to sample_fmt:channel_layout:packed_fmt."),
 +    .priv_size     = sizeof(AConvertContext),
 +    .init          = init,
 +    .uninit        = uninit,
 +    .query_formats = query_formats,
 +
 +    .inputs    = (const AVFilterPad[]) {{ .name      = "default",
 +                                    .type            = AVMEDIA_TYPE_AUDIO,
 +                                    .filter_samples  = filter_samples,
 +                                    .min_perms       = AV_PERM_READ, },
 +                                  { .name = NULL}},
 +    .outputs   = (const AVFilterPad[]) {{ .name      = "default",
 +                                    .type            = AVMEDIA_TYPE_AUDIO,
 +                                    .config_props    = config_output, },
 +                                  { .name = NULL}},
 +};
index 0cb9842,0000000..ad71e12
mode 100644,000000..100644
--- /dev/null
@@@ -1,372 -1,0 +1,372 @@@
-                            sample_fmt, planar, 16);
 +/*
 + * Copyright (c) 2010 S.N. Hemanth Meenakshisundaram
 + * Copyright (c) 2011 Mina Nagy Zaki
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +/**
 + * @file
 + * memory buffer source for audio
 + */
 +
 +#include "libavutil/audioconvert.h"
 +#include "libavutil/avstring.h"
 +#include "libavutil/fifo.h"
 +#include "asrc_abuffer.h"
 +#include "internal.h"
 +
 +typedef struct {
 +    // Audio format of incoming buffers
 +    int sample_rate;
 +    unsigned int sample_format;
 +    int64_t channel_layout;
 +    int packing_format;
 +
 +    // FIFO buffer of audio buffer ref pointers
 +    AVFifoBuffer *fifo;
 +
 +    // Normalization filters
 +    AVFilterContext *aconvert;
 +    AVFilterContext *aresample;
 +} ABufferSourceContext;
 +
 +#define FIFO_SIZE 8
 +
 +static void buf_free(AVFilterBuffer *ptr)
 +{
 +    av_free(ptr);
 +    return;
 +}
 +
 +static void set_link_source(AVFilterContext *src, AVFilterLink *link)
 +{
 +    link->src       = src;
 +    link->srcpad    = &(src->output_pads[0]);
 +    src->outputs[0] = link;
 +}
 +
 +static int reconfigure_filter(ABufferSourceContext *abuffer, AVFilterContext *filt_ctx)
 +{
 +    int ret;
 +    AVFilterLink * const inlink  = filt_ctx->inputs[0];
 +    AVFilterLink * const outlink = filt_ctx->outputs[0];
 +
 +    inlink->format         = abuffer->sample_format;
 +    inlink->channel_layout = abuffer->channel_layout;
 +    inlink->planar         = abuffer->packing_format;
 +    inlink->sample_rate    = abuffer->sample_rate;
 +
 +    filt_ctx->filter->uninit(filt_ctx);
 +    memset(filt_ctx->priv, 0, filt_ctx->filter->priv_size);
 +    if ((ret = filt_ctx->filter->init(filt_ctx, NULL , NULL)) < 0)
 +        return ret;
 +    if ((ret = inlink->srcpad->config_props(inlink)) < 0)
 +        return ret;
 +    return outlink->srcpad->config_props(outlink);
 +}
 +
 +static int insert_filter(ABufferSourceContext *abuffer,
 +                         AVFilterLink *link, AVFilterContext **filt_ctx,
 +                         const char *filt_name)
 +{
 +    int ret;
 +
 +    if ((ret = avfilter_open(filt_ctx, avfilter_get_by_name(filt_name), NULL)) < 0)
 +        return ret;
 +
 +    link->src->outputs[0] = NULL;
 +    if ((ret = avfilter_link(link->src, 0, *filt_ctx, 0)) < 0) {
 +        link->src->outputs[0] = link;
 +        return ret;
 +    }
 +
 +    set_link_source(*filt_ctx, link);
 +
 +    if ((ret = reconfigure_filter(abuffer, *filt_ctx)) < 0) {
 +        avfilter_free(*filt_ctx);
 +        return ret;
 +    }
 +
 +    return 0;
 +}
 +
 +static void remove_filter(AVFilterContext **filt_ctx)
 +{
 +    AVFilterLink *outlink = (*filt_ctx)->outputs[0];
 +    AVFilterContext *src  = (*filt_ctx)->inputs[0]->src;
 +
 +    (*filt_ctx)->outputs[0] = NULL;
 +    avfilter_free(*filt_ctx);
 +    *filt_ctx = NULL;
 +
 +    set_link_source(src, outlink);
 +}
 +
 +static inline void log_input_change(void *ctx, AVFilterLink *link, AVFilterBufferRef *ref)
 +{
 +    char old_layout_str[16], new_layout_str[16];
 +    av_get_channel_layout_string(old_layout_str, sizeof(old_layout_str),
 +                                 -1, link->channel_layout);
 +    av_get_channel_layout_string(new_layout_str, sizeof(new_layout_str),
 +                                 -1, ref->audio->channel_layout);
 +    av_log(ctx, AV_LOG_INFO,
 +           "Audio input format changed: "
 +           "%s:%s:%d -> %s:%s:%d, normalizing\n",
 +           av_get_sample_fmt_name(link->format),
 +           old_layout_str, (int)link->sample_rate,
 +           av_get_sample_fmt_name(ref->format),
 +           new_layout_str, ref->audio->sample_rate);
 +}
 +
 +int av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *ctx,
 +                                        AVFilterBufferRef *samplesref,
 +                                        int av_unused flags)
 +{
 +    ABufferSourceContext *abuffer = ctx->priv;
 +    AVFilterLink *link;
 +    int ret, logged = 0;
 +
 +    if (av_fifo_space(abuffer->fifo) < sizeof(samplesref)) {
 +        av_log(ctx, AV_LOG_ERROR,
 +               "Buffering limit reached. Please consume some available frames "
 +               "before adding new ones.\n");
 +        return AVERROR(EINVAL);
 +    }
 +
 +    // Normalize input
 +
 +    link = ctx->outputs[0];
 +    if (samplesref->audio->sample_rate != link->sample_rate) {
 +
 +        log_input_change(ctx, link, samplesref);
 +        logged = 1;
 +
 +        abuffer->sample_rate = samplesref->audio->sample_rate;
 +
 +        if (!abuffer->aresample) {
 +            ret = insert_filter(abuffer, link, &abuffer->aresample, "aresample");
 +            if (ret < 0) return ret;
 +        } else {
 +            link = abuffer->aresample->outputs[0];
 +            if (samplesref->audio->sample_rate == link->sample_rate)
 +                remove_filter(&abuffer->aresample);
 +            else
 +                if ((ret = reconfigure_filter(abuffer, abuffer->aresample)) < 0)
 +                    return ret;
 +        }
 +    }
 +
 +    link = ctx->outputs[0];
 +    if (samplesref->format                != link->format         ||
 +        samplesref->audio->channel_layout != link->channel_layout ||
 +        samplesref->audio->planar         != link->planar) {
 +
 +        if (!logged) log_input_change(ctx, link, samplesref);
 +
 +        abuffer->sample_format  = samplesref->format;
 +        abuffer->channel_layout = samplesref->audio->channel_layout;
 +        abuffer->packing_format = samplesref->audio->planar;
 +
 +        if (!abuffer->aconvert) {
 +            ret = insert_filter(abuffer, link, &abuffer->aconvert, "aconvert");
 +            if (ret < 0) return ret;
 +        } else {
 +            link = abuffer->aconvert->outputs[0];
 +            if (samplesref->format                == link->format         &&
 +                samplesref->audio->channel_layout == link->channel_layout &&
 +                samplesref->audio->planar         == link->planar
 +               )
 +                remove_filter(&abuffer->aconvert);
 +            else
 +                if ((ret = reconfigure_filter(abuffer, abuffer->aconvert)) < 0)
 +                    return ret;
 +        }
 +    }
 +
 +    if (sizeof(samplesref) != av_fifo_generic_write(abuffer->fifo, &samplesref,
 +                                                    sizeof(samplesref), NULL)) {
 +        av_log(ctx, AV_LOG_ERROR, "Error while writing to FIFO\n");
 +        return AVERROR(EINVAL);
 +    }
 +
 +    return 0;
 +}
 +
 +int av_asrc_buffer_add_samples(AVFilterContext *ctx,
 +                               uint8_t *data[8], int linesize[8],
 +                               int nb_samples, int sample_rate,
 +                               int sample_fmt, int64_t channel_layout, int planar,
 +                               int64_t pts, int av_unused flags)
 +{
 +    AVFilterBufferRef *samplesref;
 +
 +    samplesref = avfilter_get_audio_buffer_ref_from_arrays(
 +                     data, linesize, AV_PERM_WRITE,
 +                     nb_samples,
 +                     sample_fmt, channel_layout, planar);
 +    if (!samplesref)
 +        return AVERROR(ENOMEM);
 +
 +    samplesref->buf->free  = buf_free;
 +    samplesref->pts = pts;
 +    samplesref->audio->sample_rate = sample_rate;
 +
 +    return av_asrc_buffer_add_audio_buffer_ref(ctx, samplesref, 0);
 +}
 +
 +int av_asrc_buffer_add_buffer(AVFilterContext *ctx,
 +                              uint8_t *buf, int buf_size, int sample_rate,
 +                              int sample_fmt, int64_t channel_layout, int planar,
 +                              int64_t pts, int av_unused flags)
 +{
 +    uint8_t *data[8];
 +    int linesize[8];
 +    int nb_channels = av_get_channel_layout_nb_channels(channel_layout),
 +        nb_samples  = buf_size / nb_channels / av_get_bytes_per_sample(sample_fmt);
 +
 +    av_samples_fill_arrays(data, linesize,
 +                           buf, nb_channels, nb_samples,
++                           sample_fmt, 16);
 +
 +    return av_asrc_buffer_add_samples(ctx,
 +                                      data, linesize, nb_samples,
 +                                      sample_rate,
 +                                      sample_fmt, channel_layout, planar,
 +                                      pts, flags);
 +}
 +
 +static av_cold int init(AVFilterContext *ctx, const char *args0, void *opaque)
 +{
 +    ABufferSourceContext *abuffer = ctx->priv;
 +    char *arg = NULL, *ptr, chlayout_str[16];
 +    char *args = av_strdup(args0);
 +    int ret;
 +
 +    arg = av_strtok(args, ":", &ptr);
 +
 +#define ADD_FORMAT(fmt_name)                                            \
 +    if (!arg)                                                           \
 +        goto arg_fail;                                                  \
 +    if ((ret = ff_parse_##fmt_name(&abuffer->fmt_name, arg, ctx)) < 0) { \
 +        av_freep(&args);                                                \
 +        return ret;                                                     \
 +    }                                                                   \
 +    if (*args)                                                          \
 +        arg = av_strtok(NULL, ":", &ptr)
 +
 +    ADD_FORMAT(sample_rate);
 +    ADD_FORMAT(sample_format);
 +    ADD_FORMAT(channel_layout);
 +    ADD_FORMAT(packing_format);
 +
 +    abuffer->fifo = av_fifo_alloc(FIFO_SIZE*sizeof(AVFilterBufferRef*));
 +    if (!abuffer->fifo) {
 +        av_log(ctx, AV_LOG_ERROR, "Failed to allocate fifo, filter init failed.\n");
 +        return AVERROR(ENOMEM);
 +    }
 +
 +    av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str),
 +                                 -1, abuffer->channel_layout);
 +    av_log(ctx, AV_LOG_INFO, "format:%s layout:%s rate:%d\n",
 +           av_get_sample_fmt_name(abuffer->sample_format), chlayout_str,
 +           abuffer->sample_rate);
 +    av_freep(&args);
 +
 +    return 0;
 +
 +arg_fail:
 +    av_log(ctx, AV_LOG_ERROR, "Invalid arguments, must be of the form "
 +                              "sample_rate:sample_fmt:channel_layout:packing\n");
 +    av_freep(&args);
 +    return AVERROR(EINVAL);
 +}
 +
 +static av_cold void uninit(AVFilterContext *ctx)
 +{
 +    ABufferSourceContext *abuffer = ctx->priv;
 +    av_fifo_free(abuffer->fifo);
 +}
 +
 +static int query_formats(AVFilterContext *ctx)
 +{
 +    ABufferSourceContext *abuffer = ctx->priv;
 +    AVFilterFormats *formats;
 +
 +    formats = NULL;
 +    avfilter_add_format(&formats, abuffer->sample_format);
 +    avfilter_set_common_sample_formats(ctx, formats);
 +
 +    formats = NULL;
 +    avfilter_add_format(&formats, abuffer->channel_layout);
 +    avfilter_set_common_channel_layouts(ctx, formats);
 +
 +    formats = NULL;
 +    avfilter_add_format(&formats, abuffer->packing_format);
 +    avfilter_set_common_packing_formats(ctx, formats);
 +
 +    return 0;
 +}
 +
 +static int config_output(AVFilterLink *outlink)
 +{
 +    ABufferSourceContext *abuffer = outlink->src->priv;
 +    outlink->sample_rate = abuffer->sample_rate;
 +    return 0;
 +}
 +
 +static int request_frame(AVFilterLink *outlink)
 +{
 +    ABufferSourceContext *abuffer = outlink->src->priv;
 +    AVFilterBufferRef *samplesref;
 +
 +    if (!av_fifo_size(abuffer->fifo)) {
 +        av_log(outlink->src, AV_LOG_ERROR,
 +               "request_frame() called with no available frames!\n");
 +        return AVERROR(EINVAL);
 +    }
 +
 +    av_fifo_generic_read(abuffer->fifo, &samplesref, sizeof(samplesref), NULL);
 +    avfilter_filter_samples(outlink, avfilter_ref_buffer(samplesref, ~0));
 +    avfilter_unref_buffer(samplesref);
 +
 +    return 0;
 +}
 +
 +static int poll_frame(AVFilterLink *outlink)
 +{
 +    ABufferSourceContext *abuffer = outlink->src->priv;
 +    return av_fifo_size(abuffer->fifo)/sizeof(AVFilterBufferRef*);
 +}
 +
 +AVFilter avfilter_asrc_abuffer = {
 +    .name        = "abuffer",
 +    .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."),
 +    .priv_size   = sizeof(ABufferSourceContext),
 +    .query_formats = query_formats,
 +
 +    .init        = init,
 +    .uninit      = uninit,
 +
 +    .inputs      = (const AVFilterPad[]) {{ .name = NULL }},
 +    .outputs     = (const AVFilterPad[]) {{ .name      = "default",
 +                                      .type            = AVMEDIA_TYPE_AUDIO,
 +                                      .request_frame   = request_frame,
 +                                      .poll_frame      = poll_frame,
 +                                      .config_props    = config_output, },
 +                                    { .name = NULL}},
 +};
@@@ -81,29 -56,81 +81,29 @@@ AVFilterBufferRef *avfilter_default_get
  }
  
  AVFilterBufferRef *avfilter_default_get_audio_buffer(AVFilterLink *link, int perms,
 -                                                     enum AVSampleFormat sample_fmt, int size,
 -                                                     int64_t channel_layout, int planar)
 +                                                     int nb_samples)
  {
 -    AVFilterBuffer *samples = av_mallocz(sizeof(AVFilterBuffer));
 -    AVFilterBufferRef *ref = NULL;
 -    int i, sample_size, chans_nb, bufsize, per_channel_size, step_size = 0;
 -    char *buf;
 -
 -    if (!samples || !(ref = av_mallocz(sizeof(AVFilterBufferRef))))
 -        goto fail;
 -
 -    ref->buf                   = samples;
 -    ref->format                = sample_fmt;
 -
 -    ref->audio = av_mallocz(sizeof(AVFilterBufferRefAudioProps));
 -    if (!ref->audio)
 -        goto fail;
 -
 -    ref->audio->channel_layout = channel_layout;
 -    ref->audio->size           = size;
 -    ref->audio->planar         = planar;
 -
 -    /* make sure the buffer gets read permission or it's useless for output */
 -    ref->perms = perms | AV_PERM_READ;
 -
 -    samples->refcount   = 1;
 -    samples->free       = ff_avfilter_default_free_buffer;
 -
 -    sample_size = av_get_bytes_per_sample(sample_fmt);
 -    chans_nb = av_get_channel_layout_nb_channels(channel_layout);
 -
 -    per_channel_size = size/chans_nb;
 -    ref->audio->nb_samples = per_channel_size/sample_size;
 -
 -    /* Set the number of bytes to traverse to reach next sample of a particular channel:
 -     * For planar, this is simply the sample size.
 -     * For packed, this is the number of samples * sample_size.
 -     */
 -    for (i = 0; i < chans_nb; i++)
 -        samples->linesize[i] = planar > 0 ? per_channel_size : sample_size;
 -    memset(&samples->linesize[chans_nb], 0, (8-chans_nb) * sizeof(samples->linesize[0]));
 +    AVFilterBufferRef *samplesref = NULL;
 +    int linesize[8];
 +    uint8_t *data[8];
 +    int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
  
      /* Calculate total buffer size, round to multiple of 16 to be SIMD friendly */
 -    bufsize = (size + 15)&~15;
 -    buf = av_malloc(bufsize);
 -    if (!buf)
 -        goto fail;
 -
 -    /* For planar, set the start point of each channel's data within the buffer
 -     * For packed, set the start point of the entire buffer only
 -     */
 -    samples->data[0] = buf;
 -    if (buf && planar) {
 -        for (i = 1; i < chans_nb; i++) {
 -            step_size += per_channel_size;
 -            samples->data[i] = buf + step_size;
 -        }
 -    } else {
 -        for (i = 1; i < chans_nb; i++)
 -            samples->data[i] = buf;
 -    }
 -
 -    memset(&samples->data[chans_nb], 0, (8-chans_nb) * sizeof(samples->data[0]));
 -
 -    memcpy(ref->data,     samples->data,     sizeof(ref->data));
 -    memcpy(ref->linesize, samples->linesize, sizeof(ref->linesize));
 +    if (av_samples_alloc(data, linesize,
 +                         nb_channels, nb_samples, link->format,
-                          link->planar, 16) < 0)
++                         16) < 0)
 +        return NULL;
  
 -    return ref;
 +    samplesref =
 +        avfilter_get_audio_buffer_ref_from_arrays(data, linesize, perms,
 +                                                  nb_samples, link->format,
 +                                                  link->channel_layout, link->planar);
 +    if (!samplesref) {
 +        av_free(data[0]);
 +        return NULL;
 +    }
  
 -fail:
 -    if (ref)
 -        av_free(ref->audio);
 -    av_free(ref);
 -    av_free(samples);
 -    return NULL;
 +    return samplesref;
  }
  
  void avfilter_default_start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
@@@ -49,14 -51,14 +51,19 @@@ static int read_packet(AVFormatContext 
  {
      int ret;
  
-     ret = av_get_packet(s->pb, pkt, CDG_PACKET_SIZE);
+     while (1) {
+         ret = av_get_packet(s->pb, pkt, CDG_PACKET_SIZE);
+         if (ret < 1 || (pkt->data[0] & CDG_MASK) == CDG_COMMAND)
+             break;
+         av_free_packet(pkt);
+     }
  
      pkt->stream_index = 0;
 +    pkt->dts=pkt->pts= s->streams[0]->cur_dts;
 +
 +    if(ret>5 && (pkt->data[0]&0x3F) == 9 && (pkt->data[1]&0x3F)==1 && !(pkt->data[2+2+1] & 0x0F)){
 +        pkt->flags = AV_PKT_FLAG_KEY;
 +    }
      return ret;
  }
  
Simple merge
Simple merge
Simple merge
   */
  
  #define LIBAVUTIL_VERSION_MAJOR 51
- #define LIBAVUTIL_VERSION_MINOR 26
 -#define LIBAVUTIL_VERSION_MINOR 18
++#define LIBAVUTIL_VERSION_MINOR 27
  #define LIBAVUTIL_VERSION_MICRO  0
  
  #define LIBAVUTIL_VERSION_INT   AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
@@@ -23,8 -23,9 +23,9 @@@
  #include <string.h>
  
  typedef struct SampleFmtInfo {
 -    const char *name;
 +    char name[4];
      int bits;
+     int planar;
  } SampleFmtInfo;
  
  /** this table gives more information about formats */
Simple merge
diff --cc tests/fate.mak
Simple merge
@@@ -1,30 -1,27 +1,27 @@@
  0, 0, 460800, 0x54aedafe
  1, 0, 4096, 0x00000000
  1, 2090, 4096, 0x4dfae7a6
- 0, 3003, 460800, 0x54aedafe
+ 0, 3003, 460800, 0xb7aa8b56
  1, 4180, 4096, 0x3fd9f5c6
- 0, 6006, 460800, 0x54aedafe
+ 0, 6006, 460800, 0x283ea3b5
  1, 6269, 4096, 0x7b86e310
  1, 8359, 4096, 0x611cece5
- 0, 9009, 460800, 0x54aedafe
+ 0, 9009, 460800, 0x283ea3b5
  1, 10449, 4096, 0xb7d8e872
- 0, 12012, 460800, 0xb7aa8b56
+ 0, 12012, 460800, 0x10e577de
  1, 12539, 4096, 0x072ef72b
  1, 14629, 4096, 0xb3560144
- 0, 15015, 460800, 0x283ea3b5
+ 0, 15015, 460800, 0x4e091ee2
  1, 16718, 4096, 0x0a3d119e
- 0, 18018, 460800, 0x283ea3b5
+ 0, 18018, 460800, 0x2ea88828
  1, 18808, 4096, 0xbe391aa4
  1, 20898, 4096, 0x28f7c6e5
- 0, 21021, 460800, 0x10e577de
+ 0, 21021, 460800, 0x4b7f4df0
  1, 22988, 4096, 0xca9d9df2
- 0, 24024, 460800, 0x4e091ee2
 -0, 24024, 460800, 0xb30eb322
++0, 24024, 460800, 0xa57f20d0
  1, 25078, 4096, 0x5c6b95a9
- 0, 27027, 460800, 0x2ea88828
  1, 27167, 4096, 0x0bdfc0bf
  1, 29257, 4096, 0xd95a9277
- 0, 30030, 460800, 0x4b7f4df0
  1, 31347, 4096, 0xae2bef2c
- 0, 33033, 460800, 0xa57f20d0
  1, 33437, 4096, 0xbf031e83
  1, 35527, 4096, 0x4c83e2d1