Merge remote-tracking branch 'qatar/master'
authorMichael Niedermayer <michaelni@gmx.at>
Tue, 17 Jan 2012 00:40:45 +0000 (01:40 +0100)
committerMichael Niedermayer <michaelni@gmx.at>
Tue, 17 Jan 2012 01:37:30 +0000 (02:37 +0100)
* qatar/master:
  rv34: add NEON rv34_idct_add
  rv34: 1-pass inter MB reconstruction
  add SMJPEG muxer
  avformat: split out common SMJPEG code
  pictordec: Use bytestream2 functions
  avconv: use avcodec_encode_audio2()
  pcmenc: use AVCodec.encode2()
  avcodec: bump minor version and add APIChanges for the new audio encoding API
  avcodec: Add avcodec_encode_audio2() as replacement for avcodec_encode_audio()
  avcodec: add a public function, avcodec_fill_audio_frame().
  rv34: Intra 16x16 handling
  rv34: Inter/intra MB code split

Conflicts:
Changelog
libavcodec/avcodec.h
libavcodec/pictordec.c
libavcodec/utils.c
libavcodec/version.h
libavcodec/x86/rv34dsp.asm
libavformat/version.h

Merged-by: Michael Niedermayer <michaelni@gmx.at>
17 files changed:
1  2 
Changelog
avconv.c
doc/APIchanges
doc/general.texi
ffmpeg.c
libavcodec/avcodec.h
libavcodec/internal.h
libavcodec/pcm.c
libavcodec/pictordec.c
libavcodec/rv34.c
libavcodec/utils.c
libavcodec/version.h
libavcodec/x86/rv34dsp.asm
libavformat/Makefile
libavformat/allformats.c
libavformat/smjpeg.c
libavformat/version.h

diff --cc Changelog
+++ b/Changelog
@@@ -2,27 -2,17 +2,28 @@@ Entries are sorted chronologically fro
  releases are sorted from youngest to oldest.
  
  
 -version <next>:
 -
 +version next:
 +- v410 Quicktime Uncompressed 4:4:4 10-bit encoder and decoder
 +- SBaGen (SBG) binaural beats script demuxer
 +- OpenMG Audio muxer
 +- Timecode extraction in DV and MOV
 +- thumbnail video filter
 +- XML output in ffprobe
 +- asplit audio filter
 +- tinterlace video filter
 +- astreamsync audio filter
 +- amerge audio filter
  - GSM audio parser
 -
 -
 -version 0.8_beta2:
 -
+ - SMJPEG muxer
  - Automatic thread count based on detection number of (available) CPU cores
 -- Deprecate libpostproc. If desired, the switch --enable-postproc will
 -  enable it but it may be removed in a later Libav release.
 +- y41p Brooktree Uncompressed 4:1:1 12-bit encoder and decoder
 +- ffprobe -show_error option
 +- Avid 1:1 10-bit RGB Packer decoder
 +- v308 Quicktime Uncompressed 4:4:4 encoder and decoder
 +- yuv4 libquicktime packed 4:2:0 encoder and decoder
 +- ffprobe -show_frames option
 +- silencedetect audio filter
 +- ffprobe -show_program_version, -show_library_versions, -show_versions options
  - rv34: frame-level multi-threading
  - optimized iMDCT transform on x86 using SSE for for mpegaudiodec
  
diff --cc avconv.c
Simple merge
diff --cc doc/APIchanges
Simple merge
Simple merge
diff --cc ffmpeg.c
+++ b/ffmpeg.c
@@@ -173,13 -221,20 +173,12 @@@ static int input_sync
  
  static float dts_delta_threshold = 10;
  
 -static int64_t timer_start;
 +static int print_stats = 1;
  
  static uint8_t *audio_buf;
--static uint8_t *audio_out;
--static unsigned int allocated_audio_out_size, allocated_audio_buf_size;
++static unsigned int allocated_audio_buf_size;
  
 -static short *samples;
 -
 -static AVBitStreamFilterContext *video_bitstream_filters=NULL;
 -static AVBitStreamFilterContext *audio_bitstream_filters=NULL;
 -static AVBitStreamFilterContext *subtitle_bitstream_filters=NULL;
 +static uint8_t *input_tmp= NULL;
  
  #define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
  
@@@ -239,12 -249,11 +238,13 @@@ typedef struct OutputStream 
      int frame_number;
      /* input pts and corresponding output pts
         for A/V sync */
 -    //double sync_ipts;        /* dts from the AVPacket of the demuxer in second units */
 +    // double sync_ipts;        /* dts from the AVPacket of the demuxer in second units */
      struct InputStream *sync_ist; /* input stream to sync against */
 -    int64_t sync_opts;       /* output frame counter, could be changed to some true timestamp */ //FIXME look at frame_number
 +    int64_t sync_opts;       /* output frame counter, could be changed to some true timestamp */ // FIXME look at frame_number
      AVBitStreamFilterContext *bitstream_filters;
      AVCodec *enc;
 +    int64_t max_frames;
++    AVFrame *output_frame;
  
      /* video only */
      int video_resample;
@@@ -815,26 -456,17 +815,33 @@@ void av_noreturn exit_program(int ret
          if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
              avio_close(s->pb);
          avformat_free_context(s);
 -        av_free(output_streams_for_file[i]);
 -        av_dict_free(&output_opts[i]);
 +        av_dict_free(&output_files[i].opts);
      }
 -    for(i=0;i<nb_input_files;i++) {
 -        av_close_input_file(input_files[i].ctx);
 +    for (i = 0; i < nb_output_streams; i++) {
 +        AVBitStreamFilterContext *bsfc = output_streams[i].bitstream_filters;
 +        while (bsfc) {
 +            AVBitStreamFilterContext *next = bsfc->next;
 +            av_bitstream_filter_close(bsfc);
 +            bsfc = next;
 +        }
 +        output_streams[i].bitstream_filters = NULL;
++
++        if (output_streams[i].output_frame) {
++            AVFrame *frame = output_streams[i].output_frame;
++            if (frame->extended_data != frame->data)
++                av_freep(&frame->extended_data);
++            av_freep(&frame);
++        }
      }
 -    for (i = 0; i < nb_input_streams; i++)
 +    for (i = 0; i < nb_input_files; i++) {
 +        avformat_close_input(&input_files[i].ctx);
 +    }
 +    for (i = 0; i < nb_input_streams; i++) {
 +        av_freep(&input_streams[i].decoded_frame);
 +        av_freep(&input_streams[i].filtered_frame);
          av_dict_free(&input_streams[i].opts);
 -
 -    av_free(intra_matrix);
 -    av_free(inter_matrix);
 +        free_buffer_pool(&input_streams[i]);
 +    }
  
      if (vstats_file)
          fclose(vstats_file);
  
      uninit_opts();
      av_free(audio_buf);
--    av_free(audio_out);
-     allocated_audio_buf_size = allocated_audio_out_size = 0;
 -    allocated_audio_buf_size= allocated_audio_out_size= 0;
 -    av_free(samples);
++    allocated_audio_buf_size = 0;
  
  #if CONFIG_AVFILTER
      avfilter_uninit();
@@@ -965,94 -637,159 +971,145 @@@ static void choose_pixel_fmt(AVStream *
      }
  }
  
 -static OutputStream *new_output_stream(AVFormatContext *oc, int file_idx, AVCodec *codec)
 +static double get_sync_ipts(const OutputStream *ost)
  {
 -    OutputStream *ost;
 -    AVStream *st = av_new_stream(oc, oc->nb_streams < nb_streamid_map ? streamid_map[oc->nb_streams] : 0);
 -    int idx      = oc->nb_streams - 1;
 -
 -    if (!st) {
 -        av_log(NULL, AV_LOG_ERROR, "Could not alloc stream.\n");
 -        exit_program(1);
 -    }
 -
 -    output_streams_for_file[file_idx] =
 -        grow_array(output_streams_for_file[file_idx],
 -                   sizeof(*output_streams_for_file[file_idx]),
 -                   &nb_output_streams_for_file[file_idx],
 -                   oc->nb_streams);
 -    ost = output_streams_for_file[file_idx][idx] =
 -        av_mallocz(sizeof(OutputStream));
 -    if (!ost) {
 -        fprintf(stderr, "Could not alloc output stream\n");
 -        exit_program(1);
 -    }
 -    ost->file_index = file_idx;
 -    ost->index = idx;
 -    ost->st    = st;
 -    ost->enc   = codec;
 -    if (codec)
 -        ost->opts  = filter_codec_opts(codec_opts, codec->id, oc, st);
 -
 -    avcodec_get_context_defaults3(st->codec, codec);
 -
 -    ost->sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
 -    return ost;
 +    const InputStream *ist = ost->sync_ist;
 +    OutputFile *of = &output_files[ost->file_index];
 +    return (double)(ist->pts - of->start_time) / AV_TIME_BASE;
  }
  
 -static int read_avserver_streams(AVFormatContext *s, const char *filename)
 +static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
  {
 -    int i, err;
 -    AVFormatContext *ic = NULL;
 -
 -    err = avformat_open_input(&ic, filename, NULL, NULL);
 -    if (err < 0)
 -        return err;
 -    /* copy stream format */
 -    for(i=0;i<ic->nb_streams;i++) {
 -        AVStream *st;
 -        OutputStream *ost;
 -        AVCodec *codec;
 -
 -        codec = avcodec_find_encoder(ic->streams[i]->codec->codec_id);
 -        ost   = new_output_stream(s, nb_output_files, codec);
 -        st    = ost->st;
 -
 -        // FIXME: a more elegant solution is needed
 -        memcpy(st, ic->streams[i], sizeof(AVStream));
 -        st->info = NULL;
 -        avcodec_copy_context(st->codec, ic->streams[i]->codec);
 +    AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
 +    AVCodecContext          *avctx = ost->st->codec;
 +    int ret;
  
 -        if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
 -            if (audio_stream_copy) {
 -                st->stream_copy = 1;
 -            } else
 -                choose_sample_fmt(st, codec);
 -        } else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
 -            if (video_stream_copy) {
 -                st->stream_copy = 1;
 -            } else
 -                choose_pixel_fmt(st, codec);
 +    while (bsfc) {
 +        AVPacket new_pkt = *pkt;
 +        int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
 +                                           &new_pkt.data, &new_pkt.size,
 +                                           pkt->data, pkt->size,
 +                                           pkt->flags & AV_PKT_FLAG_KEY);
 +        if (a > 0) {
 +            av_free_packet(pkt);
 +            new_pkt.destruct = av_destruct_packet;
 +        } else if (a < 0) {
 +            av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
 +                   bsfc->filter->name, pkt->stream_index,
 +                   avctx->codec ? avctx->codec->name : "copy");
 +            print_error("", a);
 +            if (exit_on_error)
 +                exit_program(1);
          }
 +        *pkt = new_pkt;
 +
 +        bsfc = bsfc->next;
      }
  
 -    av_close_input_file(ic);
 -    return 0;
 +    ret = av_interleaved_write_frame(s, pkt);
 +    if (ret < 0) {
 +        print_error("av_interleaved_write_frame()", ret);
 +        exit_program(1);
 +    }
 +    ost->frame_number++;
  }
  
 -static double
 -get_sync_ipts(const OutputStream *ost)
 +static void generate_silence(uint8_t* buf, enum AVSampleFormat sample_fmt, size_t size)
  {
 -    const InputStream *ist = ost->sync_ist;
 -    return (double)(ist->pts - start_time)/AV_TIME_BASE;
 +    int fill_char = 0x00;
 +    if (sample_fmt == AV_SAMPLE_FMT_U8)
 +        fill_char = 0x80;
 +    memset(buf, fill_char, size);
  }
  
 -static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx, AVBitStreamFilterContext *bsfc){
 -    int ret;
++static int encode_audio_frame(AVFormatContext *s, OutputStream *ost,
++                              const uint8_t *buf, int buf_size)
++{
++    AVCodecContext *enc = ost->st->codec;
++    AVFrame *frame = NULL;
++    AVPacket pkt;
++    int ret, got_packet;
 -    while(bsfc){
 -        AVPacket new_pkt= *pkt;
 -        int a= av_bitstream_filter_filter(bsfc, avctx, NULL,
 -                                          &new_pkt.data, &new_pkt.size,
 -                                          pkt->data, pkt->size,
 -                                          pkt->flags & AV_PKT_FLAG_KEY);
 -        if(a>0){
 -            av_free_packet(pkt);
 -            new_pkt.destruct= av_destruct_packet;
 -        } else if(a<0){
 -            fprintf(stderr, "%s failed for stream %d, codec %s",
 -                    bsfc->filter->name, pkt->stream_index,
 -                    avctx->codec ? avctx->codec->name : "copy");
 -            print_error("", a);
 -            if (exit_on_error)
++    av_init_packet(&pkt);
++    pkt.data = NULL;
++    pkt.size = 0;
++
++    if (buf) {
++        if (!ost->output_frame) {
++            ost->output_frame = avcodec_alloc_frame();
++            if (!ost->output_frame) {
++                av_log(NULL, AV_LOG_FATAL, "out-of-memory in encode_audio_frame()\n");
+                 exit_program(1);
++            }
++        }
++        frame = ost->output_frame;
++        if (frame->extended_data != frame->data)
++            av_freep(&frame->extended_data);
++        avcodec_get_frame_defaults(frame);
++
++        frame->nb_samples  = buf_size /
++                             (enc->channels * av_get_bytes_per_sample(enc->sample_fmt));
++        if ((ret = avcodec_fill_audio_frame(frame, enc->channels, enc->sample_fmt,
++                                            buf, buf_size, 1)) < 0) {
++            av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
++            exit_program(1);
+         }
 -        *pkt= new_pkt;
 -
 -        bsfc= bsfc->next;
+     }
 -    ret= av_interleaved_write_frame(s, pkt);
 -    if(ret < 0){
 -        print_error("av_interleaved_write_frame()", ret);
++    got_packet = 0;
++    if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
++        av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
+         exit_program(1);
+     }
 -}
 -#define MAX_AUDIO_PACKET_SIZE (128 * 1024)
++    if (got_packet) {
++        pkt.stream_index = ost->index;
++        if (pkt.pts != AV_NOPTS_VALUE)
++            pkt.pts      = av_rescale_q(pkt.pts,      enc->time_base, ost->st->time_base);
++        if (pkt.duration > 0)
++            pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
++
++        write_frame(s, &pkt, ost);
 -static void do_audio_out(AVFormatContext *s,
 -                         OutputStream *ost,
 -                         InputStream *ist,
 -                         unsigned char *buf, int size)
++        audio_size += pkt.size;
++    }
++
++    if (frame)
++        ost->sync_opts += frame->nb_samples;
++
++    return pkt.size;
++}
++
 +static void do_audio_out(AVFormatContext *s, OutputStream *ost,
 +                         InputStream *ist, AVFrame *decoded_frame)
  {
      uint8_t *buftmp;
-     int64_t audio_out_size, audio_buf_size, size_out;
 -    int64_t audio_out_size, audio_buf_size;
 -    int64_t allocated_for_size= size;
++    int64_t audio_buf_size, size_out;
  
-     int frame_bytes, ret, resample_changed;
 -    int size_out, frame_bytes, ret, resample_changed;
 -    AVCodecContext *enc= ost->st->codec;
 -    AVCodecContext *dec= ist->st->codec;
++    int frame_bytes, resample_changed;
 +    AVCodecContext *enc = ost->st->codec;
 +    AVCodecContext *dec = ist->st->codec;
      int osize = av_get_bytes_per_sample(enc->sample_fmt);
      int isize = av_get_bytes_per_sample(dec->sample_fmt);
--    const int coded_bps = av_get_bits_per_sample(enc->codec->id);
 +    uint8_t *buf = decoded_frame->data[0];
 +    int size     = decoded_frame->nb_samples * dec->channels * isize;
 +    int64_t allocated_for_size = size;
  
  need_realloc:
 -    audio_buf_size= (allocated_for_size + isize*dec->channels - 1) / (isize*dec->channels);
 -    audio_buf_size= (audio_buf_size*enc->sample_rate + dec->sample_rate) / dec->sample_rate;
 -    audio_buf_size= audio_buf_size*2 + 10000; //safety factors for the deprecated resampling API
 -    audio_buf_size= FFMAX(audio_buf_size, enc->frame_size);
 -    audio_buf_size*= osize*enc->channels;
 -
 -    audio_out_size= FFMAX(audio_buf_size, enc->frame_size * osize * enc->channels);
 -    if(coded_bps > 8*osize)
 -        audio_out_size= audio_out_size * coded_bps / (8*osize);
 -    audio_out_size += FF_MIN_BUFFER_SIZE;
 -
 -    if(audio_out_size > INT_MAX || audio_buf_size > INT_MAX){
 -        fprintf(stderr, "Buffer sizes too large\n");
 +    audio_buf_size  = (allocated_for_size + isize * dec->channels - 1) / (isize * dec->channels);
 +    audio_buf_size  = (audio_buf_size * enc->sample_rate + dec->sample_rate) / dec->sample_rate;
 +    audio_buf_size  = audio_buf_size * 2 + 10000; // safety factors for the deprecated resampling API
 +    audio_buf_size  = FFMAX(audio_buf_size, enc->frame_size);
 +    audio_buf_size *= osize * enc->channels;
 +
-     audio_out_size = FFMAX(audio_buf_size, enc->frame_size * osize * enc->channels);
-     if (coded_bps > 8 * osize)
-         audio_out_size = audio_out_size * coded_bps / (8*osize);
-     audio_out_size += FF_MIN_BUFFER_SIZE;
-     if (audio_out_size > INT_MAX || audio_buf_size > INT_MAX) {
++    if (audio_buf_size > INT_MAX) {
 +        av_log(NULL, AV_LOG_FATAL, "Buffer sizes too large\n");
          exit_program(1);
      }
  
      av_fast_malloc(&audio_buf, &allocated_audio_buf_size, audio_buf_size);
--    av_fast_malloc(&audio_out, &allocated_audio_out_size, audio_out_size);
-     if (!audio_buf || !audio_out) {
 -    if (!audio_buf || !audio_out){
 -        fprintf(stderr, "Out of memory in do_audio_out\n");
++    if (!audio_buf) {
 +        av_log(NULL, AV_LOG_FATAL, "Out of memory in do_audio_out\n");
          exit_program(1);
      }
  
          size_out = size;
      }
  
 -    if (!ost->audio_resample && dec->sample_fmt!=enc->sample_fmt) {
 -        const void *ibuf[6]= {buftmp};
 -        void *obuf[6]= {audio_buf};
 -        int istride[6]= {isize};
 -        int ostride[6]= {osize};
 -        int len= size_out/istride[0];
 -        if (av_audio_convert(ost->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
 -            printf("av_audio_convert() failed\n");
 -            if (exit_on_error)
 -                exit_program(1);
 -            return;
 -        }
 -        buftmp = audio_buf;
 -        size_out = len*osize;
 -    }
 +    av_assert0(ost->audio_resample || dec->sample_fmt==enc->sample_fmt);
  
      /* now encode as many frames as possible */
--    if (enc->frame_size > 1) {
++    if (!(enc->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) {
          /* output resampled raw samples */
          if (av_fifo_realloc2(ost->fifo, av_fifo_size(ost->fifo) + size_out) < 0) {
 -            fprintf(stderr, "av_fifo_realloc2() failed\n");
 +            av_log(NULL, AV_LOG_FATAL, "av_fifo_realloc2() failed\n");
              exit_program(1);
          }
          av_fifo_generic_write(ost->fifo, buftmp, size_out, NULL);
          frame_bytes = enc->frame_size * osize * enc->channels;
  
          while (av_fifo_size(ost->fifo) >= frame_bytes) {
--            AVPacket pkt;
--            av_init_packet(&pkt);
--
              av_fifo_generic_read(ost->fifo, audio_buf, frame_bytes, NULL);
--
-             // FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
 -            //FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
--
--            ret = avcodec_encode_audio(enc, audio_out, audio_out_size,
--                                       (short *)audio_buf);
--            if (ret < 0) {
-                 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
 -                fprintf(stderr, "Audio encoding failed\n");
--                exit_program(1);
--            }
--            audio_size += ret;
-             pkt.stream_index = ost->index;
-             pkt.data = audio_out;
-             pkt.size = ret;
-             if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
-                 pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
 -            pkt.stream_index= ost->index;
 -            pkt.data= audio_out;
 -            pkt.size= ret;
 -            if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
 -                pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
--            pkt.flags |= AV_PKT_FLAG_KEY;
-             write_frame(s, &pkt, ost);
 -            write_frame(s, &pkt, enc, ost->bitstream_filters);
--
--            ost->sync_opts += enc->frame_size;
++            encode_audio_frame(s, ost, audio_buf, frame_bytes);
          }
      } else {
--        AVPacket pkt;
--        av_init_packet(&pkt);
--
--        ost->sync_opts += size_out / (osize * enc->channels);
--
--        /* output a pcm frame */
--        /* determine the size of the coded buffer */
--        size_out /= osize;
--        if (coded_bps)
-             size_out = size_out * coded_bps / 8;
 -            size_out = size_out*coded_bps/8;
--
-         if (size_out > audio_out_size) {
-             av_log(NULL, AV_LOG_FATAL, "Internal error, buffer size too small\n");
 -        if(size_out > audio_out_size){
 -            fprintf(stderr, "Internal error, buffer size too small\n");
--            exit_program(1);
--        }
--
-         // FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
 -        //FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
--        ret = avcodec_encode_audio(enc, audio_out, size_out,
--                                   (short *)buftmp);
--        if (ret < 0) {
-             av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
 -            fprintf(stderr, "Audio encoding failed\n");
--            exit_program(1);
--        }
--        audio_size += ret;
-         pkt.stream_index = ost->index;
-         pkt.data = audio_out;
-         pkt.size = ret;
-         if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
-             pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
 -        pkt.stream_index= ost->index;
 -        pkt.data= audio_out;
 -        pkt.size= ret;
 -        if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
 -            pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
--        pkt.flags |= AV_PKT_FLAG_KEY;
-         write_frame(s, &pkt, ost);
 -        write_frame(s, &pkt, enc, ost->bitstream_filters);
++        encode_audio_frame(s, ost, buftmp, size_out);
      }
  }
  
@@@ -1735,644 -1441,629 +1741,643 @@@ static void print_report(OutputFile *ou
      }
  }
  
 -static void generate_silence(uint8_t* buf, enum AVSampleFormat sample_fmt, size_t size)
 -{
 -    int fill_char = 0x00;
 -    if (sample_fmt == AV_SAMPLE_FMT_U8)
 -        fill_char = 0x80;
 -    memset(buf, fill_char, size);
 -}
 -
 -/* pkt = NULL means EOF (needed to flush decoder buffers) */
 -static int output_packet(InputStream *ist, int ist_index,
 -                         OutputStream **ost_table, int nb_ostreams,
 -                         const AVPacket *pkt)
 +static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
  {
 -    AVFormatContext *os;
 -    OutputStream *ost;
 -    int ret, i;
 -    int got_output;
 -    AVFrame picture;
 -    void *buffer_to_free = NULL;
 -    static unsigned int samples_size= 0;
 -    AVSubtitle subtitle, *subtitle_to_free;
 -    int64_t pkt_pts = AV_NOPTS_VALUE;
 -#if CONFIG_AVFILTER
 -    int frame_available;
 -#endif
 -    float quality;
 -
 -    AVPacket avpkt;
 -    int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
 -
 -    if(ist->next_pts == AV_NOPTS_VALUE)
 -        ist->next_pts= ist->pts;
 +    int i, ret;
  
 -    if (pkt == NULL) {
 -        /* EOF handling */
 -        av_init_packet(&avpkt);
 -        avpkt.data = NULL;
 -        avpkt.size = 0;
 -        goto handle_eof;
 -    } else {
 -        avpkt = *pkt;
 -    }
 +    for (i = 0; i < nb_ostreams; i++) {
 +        OutputStream   *ost = &ost_table[i];
 +        AVCodecContext *enc = ost->st->codec;
 +        AVFormatContext *os = output_files[ost->file_index].ctx;
++        int stop_encoding = 0;
  
 -    if(pkt->dts != AV_NOPTS_VALUE)
 -        ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
 -    if(pkt->pts != AV_NOPTS_VALUE)
 -        pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
 +        if (!ost->encoding_needed)
 +            continue;
  
 -    //while we have more to decode or while the decoder did output something on EOF
 -    while (avpkt.size > 0 || (!pkt && got_output)) {
 -        uint8_t *data_buf, *decoded_data_buf;
 -        int data_size, decoded_data_size;
 -    handle_eof:
 -        ist->pts= ist->next_pts;
 +        if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
 +            continue;
 +        if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == CODEC_ID_RAWVIDEO)
 +            continue;
  
 -        if(avpkt.size && avpkt.size != pkt->size &&
 -           ((!ist->showed_multi_packet_warning && verbose>0) || verbose>1)){
 -            fprintf(stderr, "Multiple frames in a packet from stream %d\n", pkt->stream_index);
 -            ist->showed_multi_packet_warning=1;
 -        }
 +        for (;;) {
 +            AVPacket pkt;
 +            int fifo_bytes;
 +            av_init_packet(&pkt);
-             pkt.stream_index = ost->index;
++            pkt.data = NULL;
++            pkt.size = 0;
  
 -        /* decode the packet if needed */
 -        decoded_data_buf = NULL; /* fail safe */
 -        decoded_data_size= 0;
 -        data_buf  = avpkt.data;
 -        data_size = avpkt.size;
 -        subtitle_to_free = NULL;
 -        if (ist->decoding_needed) {
 -            switch(ist->st->codec->codec_type) {
 -            case AVMEDIA_TYPE_AUDIO:{
 -                if(pkt && samples_size < FFMAX(pkt->size*sizeof(*samples), AVCODEC_MAX_AUDIO_FRAME_SIZE)) {
 -                    samples_size = FFMAX(pkt->size*sizeof(*samples), AVCODEC_MAX_AUDIO_FRAME_SIZE);
 -                    av_free(samples);
 -                    samples= av_malloc(samples_size);
 -                }
 -                decoded_data_size= samples_size;
 -                    /* XXX: could avoid copy if PCM 16 bits with same
 -                       endianness as CPU */
 -                ret = avcodec_decode_audio3(ist->st->codec, samples, &decoded_data_size,
 -                                            &avpkt);
 -                if (ret < 0)
 -                    return ret;
 -                avpkt.data += ret;
 -                avpkt.size -= ret;
 -                data_size   = ret;
 -                got_output  = decoded_data_size > 0;
 -                /* Some bug in mpeg audio decoder gives */
 -                /* decoded_data_size < 0, it seems they are overflows */
 -                if (!got_output) {
 -                    /* no audio frame */
 -                    continue;
 -                }
 -                decoded_data_buf = (uint8_t *)samples;
 -                ist->next_pts += ((int64_t)AV_TIME_BASE/bps * decoded_data_size) /
 -                    (ist->st->codec->sample_rate * ist->st->codec->channels);
 -                break;}
 -            case AVMEDIA_TYPE_VIDEO:
 -                    decoded_data_size = (ist->st->codec->width * ist->st->codec->height * 3) / 2;
 -                    /* XXX: allocate picture correctly */
 -                    avcodec_get_frame_defaults(&picture);
 -                    avpkt.pts = pkt_pts;
 -                    avpkt.dts = ist->pts;
 -                    pkt_pts = AV_NOPTS_VALUE;
 -
 -                    ret = avcodec_decode_video2(ist->st->codec,
 -                                                &picture, &got_output, &avpkt);
 -                    quality = same_quality ? picture.quality : 0;
 -                    if (ret < 0)
 -                        return ret;
 -                    if (!got_output) {
 -                        /* no picture yet */
 -                        goto discard_packet;
 +            switch (ost->st->codec->codec_type) {
 +            case AVMEDIA_TYPE_AUDIO:
 +                fifo_bytes = av_fifo_size(ost->fifo);
-                 ret = 0;
-                 /* encode any samples remaining in fifo */
 +                if (fifo_bytes > 0) {
-                     int osize = av_get_bytes_per_sample(enc->sample_fmt);
-                     int fs_tmp = enc->frame_size;
++                    /* encode any samples remaining in fifo */
++                    int frame_bytes = fifo_bytes;
 +
 +                    av_fifo_generic_read(ost->fifo, audio_buf, fifo_bytes, NULL);
-                     if (enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
-                         enc->frame_size = fifo_bytes / (osize * enc->channels);
-                     } else { /* pad */
-                         int frame_bytes = enc->frame_size*osize*enc->channels;
++
++                    /* pad last frame with silence if needed */
++                    if (!(enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME)) {
++                        frame_bytes = enc->frame_size * enc->channels *
++                                      av_get_bytes_per_sample(enc->sample_fmt);
 +                        if (allocated_audio_buf_size < frame_bytes)
 +                            exit_program(1);
 +                        generate_silence(audio_buf+fifo_bytes, enc->sample_fmt, frame_bytes - fifo_bytes);
                      }
-                     ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, (short *)audio_buf);
-                     pkt.duration = av_rescale((int64_t)enc->frame_size*ost->st->time_base.den,
-                                               ost->st->time_base.num, enc->sample_rate);
-                     enc->frame_size = fs_tmp;
-                 }
-                 if (ret <= 0) {
-                     ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, NULL);
-                 }
-                 if (ret < 0) {
-                     av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
-                     exit_program(1);
 -                    ist->next_pts = ist->pts = guess_correct_pts(&ist->pts_ctx, picture.pkt_pts, picture.pkt_dts);
 -                    if (ist->st->codec->time_base.num != 0) {
 -                        int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
 -                        ist->next_pts += ((int64_t)AV_TIME_BASE *
 -                                          ist->st->codec->time_base.num * ticks) /
 -                            ist->st->codec->time_base.den;
++                    encode_audio_frame(os, ost, audio_buf, frame_bytes);
++                } else {
++                    /* flush encoder with NULL frames until it is done
++                       returning packets */
++                    if (encode_audio_frame(os, ost, NULL, 0) == 0) {
++                        stop_encoding = 1;
++                        break;
+                     }
 -                    avpkt.size = 0;
 -                    buffer_to_free = NULL;
 -                    pre_process_video_frame(ist, (AVPicture *)&picture, &buffer_to_free);
 -                    break;
 -            case AVMEDIA_TYPE_SUBTITLE:
 -                ret = avcodec_decode_subtitle2(ist->st->codec,
 -                                               &subtitle, &got_output, &avpkt);
 -                if (ret < 0)
 -                    return ret;
 -                if (!got_output) {
 -                    goto discard_packet;
                  }
-                 audio_size += ret;
-                 pkt.flags  |= AV_PKT_FLAG_KEY;
 -                subtitle_to_free = &subtitle;
 -                avpkt.size = 0;
 -                break;
 -            default:
 -                return -1;
 -            }
 -        } else {
 -            switch(ist->st->codec->codec_type) {
 -            case AVMEDIA_TYPE_AUDIO:
 -                ist->next_pts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
 -                    ist->st->codec->sample_rate;
                  break;
              case AVMEDIA_TYPE_VIDEO:
 -                if (ist->st->codec->time_base.num != 0) {
 -                    int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
 -                    ist->next_pts += ((int64_t)AV_TIME_BASE *
 -                                      ist->st->codec->time_base.num * ticks) /
 -                        ist->st->codec->time_base.den;
 +                ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL);
 +                if (ret < 0) {
 +                    av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
 +                    exit_program(1);
                  }
 -                break;
 -            }
 -            ret = avpkt.size;
 -            avpkt.size = 0;
 -        }
 -
 -#if CONFIG_AVFILTER
 -        if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
 -            for (i = 0; i < nb_ostreams; i++) {
 -                ost = ost_table[i];
 -                if (ost->input_video_filter && ost->source_index == ist_index) {
 -                    AVRational sar;
 -                    if (ist->st->sample_aspect_ratio.num)
 -                        sar = ist->st->sample_aspect_ratio;
 -                    else
 -                        sar = ist->st->codec->sample_aspect_ratio;
 -                    // add it to be filtered
 -                    av_vsrc_buffer_add_frame(ost->input_video_filter, &picture,
 -                                             ist->pts,
 -                                             sar);
 +                video_size += ret;
 +                if (enc->coded_frame && enc->coded_frame->key_frame)
 +                    pkt.flags |= AV_PKT_FLAG_KEY;
 +                if (ost->logfile && enc->stats_out) {
 +                    fprintf(ost->logfile, "%s", enc->stats_out);
                  }
 -            }
 -        }
 -#endif
 -
 -        // preprocess audio (volume)
 -        if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
 -            if (audio_volume != 256) {
 -                short *volp;
 -                volp = samples;
 -                for(i=0;i<(decoded_data_size / sizeof(short));i++) {
 -                    int v = ((*volp) * audio_volume + 128) >> 8;
 -                    if (v < -32768) v = -32768;
 -                    if (v >  32767) v = 32767;
 -                    *volp++ = v;
++                if (ret <= 0) {
++                    stop_encoding = 1;
++                    break;
+                 }
++                pkt.stream_index = ost->index;
++                pkt.data = bit_buffer;
++                pkt.size = ret;
++                if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
++                    pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
++                write_frame(os, &pkt, ost);
 +                break;
 +            default:
-                 ret = -1;
++                stop_encoding = 1;
              }
-             if (ret <= 0)
++            if (stop_encoding)
 +                break;
-             pkt.data = bit_buffer;
-             pkt.size = ret;
-             if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
-                 pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
-             write_frame(os, &pkt, ost);
          }
 +    }
 +}
  
 -        /* frame rate emulation */
 -        if (rate_emu) {
 -            int64_t pts = av_rescale(ist->pts, 1000000, AV_TIME_BASE);
 -            int64_t now = av_gettime() - ist->start;
 -            if (pts > now)
 -                usleep(pts - now);
 -        }
 -        /* if output time reached then transcode raw format,
 -           encode packets and output them */
 -        if (start_time == 0 || ist->pts >= start_time)
 -            for(i=0;i<nb_ostreams;i++) {
 -                int frame_size;
 -
 -                ost = ost_table[i];
 -                if (ost->source_index == ist_index) {
 -#if CONFIG_AVFILTER
 -                frame_available = ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO ||
 -                    !ost->output_video_filter || avfilter_poll_frame(ost->output_video_filter->inputs[0]);
 -                while (frame_available) {
 -                    AVRational ist_pts_tb;
 -                    if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ost->output_video_filter)
 -                        get_filtered_video_frame(ost->output_video_filter, &picture, &ost->picref, &ist_pts_tb);
 -                    if (ost->picref)
 -                        ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
 -#endif
 -                    os = output_files[ost->file_index];
 -
 -                    /* set the input output pts pairs */
 -                    //ost->sync_ipts = (double)(ist->pts + input_files[ist->file_index].ts_offset - start_time)/ AV_TIME_BASE;
 -
 -                    if (ost->encoding_needed) {
 -                        av_assert0(ist->decoding_needed);
 -                        switch(ost->st->codec->codec_type) {
 -                        case AVMEDIA_TYPE_AUDIO:
 -                            do_audio_out(os, ost, ist, decoded_data_buf, decoded_data_size);
 -                            break;
 -                        case AVMEDIA_TYPE_VIDEO:
 -#if CONFIG_AVFILTER
 -                            if (ost->picref->video && !ost->frame_aspect_ratio)
 -                                ost->st->codec->sample_aspect_ratio = ost->picref->video->pixel_aspect;
 -#endif
 -                            do_video_out(os, ost, ist, &picture, &frame_size,
 -                                         same_quality ? quality : ost->st->codec->global_quality);
 -                            if (vstats_filename && frame_size)
 -                                do_video_stats(os, ost, frame_size);
 -                            break;
 -                        case AVMEDIA_TYPE_SUBTITLE:
 -                            do_subtitle_out(os, ost, ist, &subtitle,
 -                                            pkt->pts);
 -                            break;
 -                        default:
 -                            abort();
 -                        }
 -                    } else {
 -                        AVFrame avframe; //FIXME/XXX remove this
 -                        AVPacket opkt;
 -                        int64_t ost_tb_start_time= av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
 +/*
 + * Check whether a packet from ist should be written into ost at this time
 + */
 +static int check_output_constraints(InputStream *ist, OutputStream *ost)
 +{
 +    OutputFile *of = &output_files[ost->file_index];
 +    int ist_index  = ist - input_streams;
  
 -                        av_init_packet(&opkt);
 +    if (ost->source_index != ist_index)
 +        return 0;
  
 -                        if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) && !copy_initial_nonkeyframes)
 -#if !CONFIG_AVFILTER
 -                            continue;
 -#else
 -                            goto cont;
 -#endif
 +    if (of->start_time && ist->pts < of->start_time)
 +        return 0;
  
 -                        /* no reencoding needed : output the packet directly */
 -                        /* force the input stream PTS */
 +    if (of->recording_time != INT64_MAX &&
 +        av_compare_ts(ist->pts, AV_TIME_BASE_Q, of->recording_time + of->start_time,
 +                      (AVRational){ 1, 1000000 }) >= 0) {
 +        ost->is_past_recording_time = 1;
 +        return 0;
 +    }
  
 -                        avcodec_get_frame_defaults(&avframe);
 -                        ost->st->codec->coded_frame= &avframe;
 -                        avframe.key_frame = pkt->flags & AV_PKT_FLAG_KEY;
 +    return 1;
 +}
  
 -                        if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
 -                            audio_size += data_size;
 -                        else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
 -                            video_size += data_size;
 -                            ost->sync_opts++;
 -                        }
 +static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
 +{
 +    OutputFile *of = &output_files[ost->file_index];
 +    int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
 +    AVPicture pict;
 +    AVPacket opkt;
  
 -                        opkt.stream_index= ost->index;
 -                        if(pkt->pts != AV_NOPTS_VALUE)
 -                            opkt.pts= av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
 -                        else
 -                            opkt.pts= AV_NOPTS_VALUE;
 -
 -                        if (pkt->dts == AV_NOPTS_VALUE)
 -                            opkt.dts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->st->time_base);
 -                        else
 -                            opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
 -                        opkt.dts -= ost_tb_start_time;
 -
 -                        opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
 -                        opkt.flags= pkt->flags;
 -
 -                        //FIXME remove the following 2 lines they shall be replaced by the bitstream filters
 -                        if(   ost->st->codec->codec_id != CODEC_ID_H264
 -                           && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
 -                           && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
 -                           ) {
 -                            if(av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, data_buf, data_size, pkt->flags & AV_PKT_FLAG_KEY))
 -                                opkt.destruct= av_destruct_packet;
 -                        } else {
 -                            opkt.data = data_buf;
 -                            opkt.size = data_size;
 -                        }
 +    av_init_packet(&opkt);
  
 -                        write_frame(os, &opkt, ost->st->codec, ost->bitstream_filters);
 -                        ost->st->codec->frame_number++;
 -                        ost->frame_number++;
 -                        av_free_packet(&opkt);
 -                    }
 -#if CONFIG_AVFILTER
 -                    cont:
 -                    frame_available = (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) &&
 -                                       ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]);
 -                    if (ost->picref)
 -                        avfilter_unref_buffer(ost->picref);
 -                }
 -#endif
 -                }
 -            }
 +    if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
 +        !ost->copy_initial_nonkeyframes)
 +        return;
  
 -        av_free(buffer_to_free);
 -        /* XXX: allocate the subtitles in the codec ? */
 -        if (subtitle_to_free) {
 -            avsubtitle_free(subtitle_to_free);
 -            subtitle_to_free = NULL;
 -        }
 +    /* force the input stream PTS */
 +    if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
 +        audio_size += pkt->size;
 +    else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
 +        video_size += pkt->size;
 +        ost->sync_opts++;
      }
 - discard_packet:
 -    if (pkt == NULL) {
 -        /* EOF handling */
  
 -        for(i=0;i<nb_ostreams;i++) {
 -            ost = ost_table[i];
 -            if (ost->source_index == ist_index) {
 -                AVCodecContext *enc= ost->st->codec;
 -                os = output_files[ost->file_index];
 -
 -                if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <=1)
 -                    continue;
 -                if(ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE))
 -                    continue;
 -
 -                if (ost->encoding_needed) {
 -                    for(;;) {
 -                        AVPacket pkt;
 -                        int fifo_bytes;
 -                        av_init_packet(&pkt);
 -                        pkt.stream_index= ost->index;
 -
 -                        switch(ost->st->codec->codec_type) {
 -                        case AVMEDIA_TYPE_AUDIO:
 -                            fifo_bytes = av_fifo_size(ost->fifo);
 -                            ret = 0;
 -                            /* encode any samples remaining in fifo */
 -                            if (fifo_bytes > 0) {
 -                                int osize = av_get_bytes_per_sample(enc->sample_fmt);
 -                                int fs_tmp = enc->frame_size;
 -
 -                                av_fifo_generic_read(ost->fifo, audio_buf, fifo_bytes, NULL);
 -                                if (enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
 -                                    enc->frame_size = fifo_bytes / (osize * enc->channels);
 -                                } else { /* pad */
 -                                    int frame_bytes = enc->frame_size*osize*enc->channels;
 -                                    if (allocated_audio_buf_size < frame_bytes)
 -                                        exit_program(1);
 -                                    generate_silence(audio_buf+fifo_bytes, enc->sample_fmt, frame_bytes - fifo_bytes);
 -                                }
 -
 -                                ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, (short *)audio_buf);
 -                                pkt.duration = av_rescale((int64_t)enc->frame_size*ost->st->time_base.den,
 -                                                          ost->st->time_base.num, enc->sample_rate);
 -                                enc->frame_size = fs_tmp;
 -                            }
 -                            if(ret <= 0) {
 -                                ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, NULL);
 -                            }
 -                            if (ret < 0) {
 -                                fprintf(stderr, "Audio encoding failed\n");
 -                                exit_program(1);
 -                            }
 -                            audio_size += ret;
 -                            pkt.flags |= AV_PKT_FLAG_KEY;
 -                            break;
 -                        case AVMEDIA_TYPE_VIDEO:
 -                            ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL);
 -                            if (ret < 0) {
 -                                fprintf(stderr, "Video encoding failed\n");
 -                                exit_program(1);
 -                            }
 -                            video_size += ret;
 -                            if(enc->coded_frame && enc->coded_frame->key_frame)
 -                                pkt.flags |= AV_PKT_FLAG_KEY;
 -                            if (ost->logfile && enc->stats_out) {
 -                                fprintf(ost->logfile, "%s", enc->stats_out);
 -                            }
 -                            break;
 -                        default:
 -                            ret=-1;
 -                        }
 +    opkt.stream_index = ost->index;
 +    if (pkt->pts != AV_NOPTS_VALUE)
 +        opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
 +    else
 +        opkt.pts = AV_NOPTS_VALUE;
  
 -                        if(ret<=0)
 -                            break;
 -                        pkt.data= bit_buffer;
 -                        pkt.size= ret;
 -                        if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
 -                            pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
 -                        write_frame(os, &pkt, ost->st->codec, ost->bitstream_filters);
 -                    }
 -                }
 -            }
 -        }
 +    if (pkt->dts == AV_NOPTS_VALUE)
 +        opkt.dts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->st->time_base);
 +    else
 +        opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
 +    opkt.dts -= ost_tb_start_time;
 +
 +    opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
 +    opkt.flags    = pkt->flags;
 +
 +    // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
 +    if (  ost->st->codec->codec_id != CODEC_ID_H264
 +       && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
 +       && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
 +       ) {
 +        if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
 +            opkt.destruct = av_destruct_packet;
 +    } else {
 +        opkt.data = pkt->data;
 +        opkt.size = pkt->size;
 +    }
 +    if (of->ctx->oformat->flags & AVFMT_RAWPICTURE) {
 +        /* store AVPicture in AVPacket, as expected by the output format */
 +        avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
 +        opkt.data = (uint8_t *)&pict;
 +        opkt.size = sizeof(AVPicture);
 +        opkt.flags |= AV_PKT_FLAG_KEY;
      }
  
 -    return 0;
 +    write_frame(of->ctx, &opkt, ost);
 +    ost->st->codec->frame_number++;
 +    av_free_packet(&opkt);
  }
  
 -static void print_sdp(AVFormatContext **avc, int n)
 +static void rate_emu_sleep(InputStream *ist)
  {
 -    char sdp[2048];
 -
 -    av_sdp_create(avc, n, sdp, sizeof(sdp));
 -    printf("SDP:\n%s\n", sdp);
 -    fflush(stdout);
 +    if (input_files[ist->file_index].rate_emu) {
 +        int64_t pts = av_rescale(ist->pts, 1000000, AV_TIME_BASE);
 +        int64_t now = av_gettime() - ist->start;
 +        if (pts > now)
 +            usleep(pts - now);
 +    }
  }
  
 -static int copy_chapters(int infile, int outfile)
 +static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
  {
 -    AVFormatContext *is = input_files[infile].ctx;
 -    AVFormatContext *os = output_files[outfile];
 -    int i;
 +    AVFrame *decoded_frame;
 +    AVCodecContext *avctx = ist->st->codec;
 +    int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
 +    int i, ret;
  
 -    for (i = 0; i < is->nb_chapters; i++) {
 -        AVChapter *in_ch = is->chapters[i], *out_ch;
 -        int64_t ts_off   = av_rescale_q(start_time - input_files[infile].ts_offset,
 -                                      AV_TIME_BASE_Q, in_ch->time_base);
 -        int64_t rt       = (recording_time == INT64_MAX) ? INT64_MAX :
 -                           av_rescale_q(recording_time, AV_TIME_BASE_Q, in_ch->time_base);
 +    if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
 +        return AVERROR(ENOMEM);
 +    else
 +        avcodec_get_frame_defaults(ist->decoded_frame);
 +    decoded_frame = ist->decoded_frame;
  
 +    ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
 +    if (ret < 0) {
 +        return ret;
 +    }
  
 -        if (in_ch->end < ts_off)
 -            continue;
 -        if (rt != INT64_MAX && in_ch->start > rt + ts_off)
 +    if (!*got_output) {
 +        /* no audio frame */
 +        return ret;
 +    }
 +
 +    /* if the decoder provides a pts, use it instead of the last packet pts.
 +       the decoder could be delaying output by a packet or more. */
 +    if (decoded_frame->pts != AV_NOPTS_VALUE)
 +        ist->next_pts = decoded_frame->pts;
 +
 +    /* increment next_pts to use for the case where the input stream does not
 +       have timestamps or there are multiple frames in the packet */
 +    ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
 +                     avctx->sample_rate;
 +
 +    // preprocess audio (volume)
 +    if (audio_volume != 256) {
 +        int decoded_data_size = decoded_frame->nb_samples * avctx->channels * bps;
 +        void *samples = decoded_frame->data[0];
 +        switch (avctx->sample_fmt) {
 +        case AV_SAMPLE_FMT_U8:
 +        {
 +            uint8_t *volp = samples;
 +            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
 +                int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128;
 +                *volp++ = av_clip_uint8(v);
 +            }
 +            break;
 +        }
 +        case AV_SAMPLE_FMT_S16:
 +        {
 +            int16_t *volp = samples;
 +            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
 +                int v = ((*volp) * audio_volume + 128) >> 8;
 +                *volp++ = av_clip_int16(v);
 +            }
 +            break;
 +        }
 +        case AV_SAMPLE_FMT_S32:
 +        {
 +            int32_t *volp = samples;
 +            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
 +                int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8);
 +                *volp++ = av_clipl_int32(v);
 +            }
 +            break;
 +        }
 +        case AV_SAMPLE_FMT_FLT:
 +        {
 +            float *volp = samples;
 +            float scale = audio_volume / 256.f;
 +            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
 +                *volp++ *= scale;
 +            }
 +            break;
 +        }
 +        case AV_SAMPLE_FMT_DBL:
 +        {
 +            double *volp = samples;
 +            double scale = audio_volume / 256.;
 +            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
 +                *volp++ *= scale;
 +            }
              break;
 +        }
 +        default:
 +            av_log(NULL, AV_LOG_FATAL,
 +                   "Audio volume adjustment on sample format %s is not supported.\n",
 +                   av_get_sample_fmt_name(ist->st->codec->sample_fmt));
 +            exit_program(1);
 +        }
 +    }
  
 -        out_ch = av_mallocz(sizeof(AVChapter));
 -        if (!out_ch)
 -            return AVERROR(ENOMEM);
 +    rate_emu_sleep(ist);
  
 -        out_ch->id        = in_ch->id;
 -        out_ch->time_base = in_ch->time_base;
 -        out_ch->start     = FFMAX(0,  in_ch->start - ts_off);
 -        out_ch->end       = FFMIN(rt, in_ch->end   - ts_off);
 +    for (i = 0; i < nb_output_streams; i++) {
 +        OutputStream *ost = &output_streams[i];
  
 -        if (metadata_chapters_autocopy)
 -            av_dict_copy(&out_ch->metadata, in_ch->metadata, 0);
 +        if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
 +            continue;
 +        do_audio_out(output_files[ost->file_index].ctx, ost, ist, decoded_frame);
 +    }
  
 -        os->nb_chapters++;
 -        os->chapters = av_realloc(os->chapters, sizeof(AVChapter)*os->nb_chapters);
 -        if (!os->chapters)
 -            return AVERROR(ENOMEM);
 -        os->chapters[os->nb_chapters - 1] = out_ch;
 -    }
 -    return 0;
 +    return ret;
  }
  
 -static void parse_forced_key_frames(char *kf, OutputStream *ost,
 -                                    AVCodecContext *avctx)
 +static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *pkt_pts, int64_t *pkt_dts)
  {
 -    char *p;
 -    int n = 1, i;
 -    int64_t t;
 +    AVFrame *decoded_frame, *filtered_frame = NULL;
 +    void *buffer_to_free = NULL;
 +    int i, ret = 0;
 +    float quality = 0;
 +#if CONFIG_AVFILTER
 +    int frame_available = 1;
 +#endif
 +    int duration=0;
 +    int64_t *best_effort_timestamp;
 +    AVRational *frame_sample_aspect;
  
 -    for (p = kf; *p; p++)
 -        if (*p == ',')
 -            n++;
 -    ost->forced_kf_count = n;
 -    ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
 -    if (!ost->forced_kf_pts) {
 -        av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
 -        exit_program(1);
 +    if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
 +        return AVERROR(ENOMEM);
 +    else
 +        avcodec_get_frame_defaults(ist->decoded_frame);
 +    decoded_frame = ist->decoded_frame;
 +    pkt->pts  = *pkt_pts;
 +    pkt->dts  = *pkt_dts;
 +    *pkt_pts  = AV_NOPTS_VALUE;
 +
 +    if (pkt->duration) {
 +        duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
 +    } else if(ist->st->codec->time_base.num != 0) {
 +        int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
 +        duration = ((int64_t)AV_TIME_BASE *
 +                          ist->st->codec->time_base.num * ticks) /
 +                          ist->st->codec->time_base.den;
      }
 -    for (i = 0; i < n; i++) {
 -        p = i ? strchr(p, ',') + 1 : kf;
 -        t = parse_time_or_die("force_key_frames", p, 1);
 -        ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
 +
 +    if(*pkt_dts != AV_NOPTS_VALUE && duration) {
 +        *pkt_dts += duration;
 +    }else
 +        *pkt_dts = AV_NOPTS_VALUE;
 +
 +    ret = avcodec_decode_video2(ist->st->codec,
 +                                decoded_frame, got_output, pkt);
 +    if (ret < 0)
 +        return ret;
 +
 +    quality = same_quant ? decoded_frame->quality : 0;
 +    if (!*got_output) {
 +        /* no picture yet */
 +        return ret;
 +    }
 +
 +    best_effort_timestamp= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "best_effort_timestamp");
 +    if(*best_effort_timestamp != AV_NOPTS_VALUE)
 +        ist->next_pts = ist->pts = *best_effort_timestamp;
 +
 +    ist->next_pts += duration;
 +    pkt->size = 0;
 +
 +    pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
 +
 +#if CONFIG_AVFILTER
 +    frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
 +    for(i=0;i<nb_output_streams;i++) {
 +        OutputStream *ost = ost = &output_streams[i];
 +        if(check_output_constraints(ist, ost) && ost->encoding_needed){
 +            if (!frame_sample_aspect->num)
 +                *frame_sample_aspect = ist->st->sample_aspect_ratio;
 +            decoded_frame->pts = ist->pts;
 +            if (ist->dr1 && decoded_frame->type==FF_BUFFER_TYPE_USER) {
 +                FrameBuffer      *buf = decoded_frame->opaque;
 +                AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
 +                                            decoded_frame->data, decoded_frame->linesize,
 +                                            AV_PERM_READ | AV_PERM_PRESERVE,
 +                                            ist->st->codec->width, ist->st->codec->height,
 +                                            ist->st->codec->pix_fmt);
 +
 +                avfilter_copy_frame_props(fb, decoded_frame);
 +                fb->pts                 = ist->pts;
 +                fb->buf->priv           = buf;
 +                fb->buf->free           = filter_release_buffer;
 +
 +                buf->refcount++;
 +                av_buffersrc_buffer(ost->input_video_filter, fb);
 +            } else
 +            if((av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame, AV_VSRC_BUF_FLAG_OVERWRITE)) < 0){
 +                av_log(0, AV_LOG_FATAL, "Failed to inject frame into filter network\n");
 +                exit_program(1);
 +            }
 +        }
 +    }
 +#endif
 +
 +    rate_emu_sleep(ist);
 +
 +    for (i = 0; i < nb_output_streams; i++) {
 +        OutputStream *ost = &output_streams[i];
 +        int frame_size;
 +
 +        if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
 +            continue;
 +
 +#if CONFIG_AVFILTER
 +        if (ost->input_video_filter) {
 +            frame_available = av_buffersink_poll_frame(ost->output_video_filter);
 +        }
 +        while (frame_available) {
 +            if (ost->output_video_filter) {
 +                AVRational ist_pts_tb = ost->output_video_filter->inputs[0]->time_base;
 +                if (av_buffersink_get_buffer_ref(ost->output_video_filter, &ost->picref, 0) < 0){
 +                    av_log(0, AV_LOG_WARNING, "AV Filter told us it has a frame available but failed to output one\n");
 +                    goto cont;
 +                }
 +                if (!ist->filtered_frame && !(ist->filtered_frame = avcodec_alloc_frame())) {
 +                    av_free(buffer_to_free);
 +                    return AVERROR(ENOMEM);
 +                } else
 +                    avcodec_get_frame_defaults(ist->filtered_frame);
 +                filtered_frame = ist->filtered_frame;
 +                *filtered_frame= *decoded_frame; //for me_threshold
 +                if (ost->picref) {
 +                    avfilter_fill_frame_from_video_buffer_ref(filtered_frame, ost->picref);
 +                    ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
 +                }
 +            }
 +            if (ost->picref->video && !ost->frame_aspect_ratio)
 +                ost->st->codec->sample_aspect_ratio = ost->picref->video->sample_aspect_ratio;
 +#else
 +            filtered_frame = decoded_frame;
 +#endif
 +
 +            do_video_out(output_files[ost->file_index].ctx, ost, ist, filtered_frame, &frame_size,
 +                         same_quant ? quality : ost->st->codec->global_quality);
 +            if (vstats_filename && frame_size)
 +                do_video_stats(output_files[ost->file_index].ctx, ost, frame_size);
 +#if CONFIG_AVFILTER
 +            cont:
 +            frame_available = ost->output_video_filter && av_buffersink_poll_frame(ost->output_video_filter);
 +            avfilter_unref_buffer(ost->picref);
 +        }
 +#endif
      }
 +
 +    av_free(buffer_to_free);
 +    return ret;
  }
  
 -/*
 - * The following code is the main loop of the file converter
 - */
 -static int transcode(AVFormatContext **output_files,
 -                     int nb_output_files,
 -                     InputFile *input_files,
 -                     int nb_input_files,
 -                     StreamMap *stream_maps, int nb_stream_maps)
 +static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
  {
 -    int ret = 0, i, j, k, n, nb_ostreams = 0;
 -    AVFormatContext *is, *os;
 -    AVCodecContext *codec, *icodec;
 -    OutputStream *ost, **ost_table = NULL;
 -    InputStream *ist;
 -    char error[1024];
 -    int want_sdp = 1;
 -    uint8_t no_packet[MAX_FILES]={0};
 -    int no_packet_count=0;
 +    AVSubtitle subtitle;
 +    int i, ret = avcodec_decode_subtitle2(ist->st->codec,
 +                                          &subtitle, got_output, pkt);
 +    if (ret < 0)
 +        return ret;
 +    if (!*got_output)
 +        return ret;
  
 -    if (rate_emu)
 -        for (i = 0; i < nb_input_streams; i++)
 -            input_streams[i].start = av_gettime();
 +    rate_emu_sleep(ist);
  
 -    /* output stream init */
 -    nb_ostreams = 0;
 -    for(i=0;i<nb_output_files;i++) {
 -        os = output_files[i];
 -        if (!os->nb_streams && !(os->oformat->flags & AVFMT_NOSTREAMS)) {
 -            av_dump_format(output_files[i], i, output_files[i]->filename, 1);
 -            fprintf(stderr, "Output file #%d does not contain any stream\n", i);
 -            ret = AVERROR(EINVAL);
 -            goto fail;
 +    for (i = 0; i < nb_output_streams; i++) {
 +        OutputStream *ost = &output_streams[i];
 +
 +        if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
 +            continue;
 +
 +        do_subtitle_out(output_files[ost->file_index].ctx, ost, ist, &subtitle, pkt->pts);
 +    }
 +
 +    avsubtitle_free(&subtitle);
 +    return ret;
 +}
 +
 +/* pkt = NULL means EOF (needed to flush decoder buffers) */
 +static int output_packet(InputStream *ist,
 +                         OutputStream *ost_table, int nb_ostreams,
 +                         const AVPacket *pkt)
 +{
 +    int ret = 0, i;
 +    int got_output;
 +    int64_t pkt_dts = AV_NOPTS_VALUE;
 +    int64_t pkt_pts = AV_NOPTS_VALUE;
 +
 +    AVPacket avpkt;
 +
 +    if (ist->next_pts == AV_NOPTS_VALUE)
 +        ist->next_pts = ist->pts;
 +
 +    if (pkt == NULL) {
 +        /* EOF handling */
 +        av_init_packet(&avpkt);
 +        avpkt.data = NULL;
 +        avpkt.size = 0;
 +        goto handle_eof;
 +    } else {
 +        avpkt = *pkt;
 +    }
 +
 +    if (pkt->dts != AV_NOPTS_VALUE) {
 +        if (ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
 +            ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
 +        pkt_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
 +    }
 +    if(pkt->pts != AV_NOPTS_VALUE)
 +        pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
 +
 +    // while we have more to decode or while the decoder did output something on EOF
 +    while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
 +    handle_eof:
 +
 +        ist->pts = ist->next_pts;
 +
 +        if (avpkt.size && avpkt.size != pkt->size) {
 +            av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
 +                   "Multiple frames in a packet from stream %d\n", pkt->stream_index);
 +            ist->showed_multi_packet_warning = 1;
 +        }
 +
 +        switch (ist->st->codec->codec_type) {
 +        case AVMEDIA_TYPE_AUDIO:
 +            ret = transcode_audio    (ist, &avpkt, &got_output);
 +            break;
 +        case AVMEDIA_TYPE_VIDEO:
 +            ret = transcode_video    (ist, &avpkt, &got_output, &pkt_pts, &pkt_dts);
 +            break;
 +        case AVMEDIA_TYPE_SUBTITLE:
 +            ret = transcode_subtitles(ist, &avpkt, &got_output);
 +            break;
 +        default:
 +            return -1;
 +        }
 +
 +        if (ret < 0)
 +            return ret;
 +
 +        avpkt.dts=
 +        avpkt.pts= AV_NOPTS_VALUE;
 +
 +        // touch data and size only if not EOF
 +        if (pkt) {
 +            if(ist->st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
 +                ret = avpkt.size;
 +            avpkt.data += ret;
 +            avpkt.size -= ret;
 +        }
 +        if (!got_output) {
 +            continue;
          }
 -        nb_ostreams += os->nb_streams;
      }
 -    if (nb_stream_maps > 0 && nb_stream_maps != nb_ostreams) {
 -        fprintf(stderr, "Number of stream maps must match number of output streams\n");
 -        ret = AVERROR(EINVAL);
 -        goto fail;
 +
 +    /* handle stream copy */
 +    if (!ist->decoding_needed) {
 +        rate_emu_sleep(ist);
 +        ist->pts = ist->next_pts;
 +        switch (ist->st->codec->codec_type) {
 +        case AVMEDIA_TYPE_AUDIO:
 +            ist->next_pts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
 +                             ist->st->codec->sample_rate;
 +            break;
 +        case AVMEDIA_TYPE_VIDEO:
 +            if (pkt->duration) {
 +                ist->next_pts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
 +            } else if(ist->st->codec->time_base.num != 0) {
 +                int ticks= ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
 +                ist->next_pts += ((int64_t)AV_TIME_BASE *
 +                                  ist->st->codec->time_base.num * ticks) /
 +                                  ist->st->codec->time_base.den;
 +            }
 +            break;
 +        }
      }
 +    for (i = 0; pkt && i < nb_ostreams; i++) {
 +        OutputStream *ost = &ost_table[i];
  
 -    /* Sanity check the mapping args -- do the input files & streams exist? */
 -    for(i=0;i<nb_stream_maps;i++) {
 -        int fi = stream_maps[i].file_index;
 -        int si = stream_maps[i].stream_index;
 +        if (!check_output_constraints(ist, ost) || ost->encoding_needed)
 +            continue;
  
 -        if (fi < 0 || fi > nb_input_files - 1 ||
 -            si < 0 || si > input_files[fi].nb_streams - 1) {
 -            fprintf(stderr,"Could not find input stream #%d.%d\n", fi, si);
 -            ret = AVERROR(EINVAL);
 -            goto fail;
 +        do_streamcopy(ist, ost, pkt);
 +    }
 +
 +    return 0;
 +}
 +
 +static void print_sdp(OutputFile *output_files, int n)
 +{
 +    char sdp[2048];
 +    int i;
 +    AVFormatContext **avc = av_malloc(sizeof(*avc) * n);
 +
 +    if (!avc)
 +        exit_program(1);
 +    for (i = 0; i < n; i++)
 +        avc[i] = output_files[i].ctx;
 +
 +    av_sdp_create(avc, n, sdp, sizeof(sdp));
 +    printf("SDP:\n%s\n", sdp);
 +    fflush(stdout);
 +    av_freep(&avc);
 +}
 +
 +static int init_input_stream(int ist_index, OutputStream *output_streams, int nb_output_streams,
 +                             char *error, int error_len)
 +{
 +    InputStream *ist = &input_streams[ist_index];
 +    if (ist->decoding_needed) {
 +        AVCodec *codec = ist->dec;
 +        if (!codec) {
 +            snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
 +                    avcodec_get_name(ist->st->codec->codec_id), ist->file_index, ist->st->index);
 +            return AVERROR(EINVAL);
          }
 -        fi = stream_maps[i].sync_file_index;
 -        si = stream_maps[i].sync_stream_index;
 -        if (fi < 0 || fi > nb_input_files - 1 ||
 -            si < 0 || si > input_files[fi].nb_streams - 1) {
 -            fprintf(stderr,"Could not find sync stream #%d.%d\n", fi, si);
 -            ret = AVERROR(EINVAL);
 -            goto fail;
 +
 +        ist->dr1 = codec->capabilities & CODEC_CAP_DR1;
 +        if (codec->type == AVMEDIA_TYPE_VIDEO && ist->dr1) {
 +            ist->st->codec->get_buffer     = codec_get_buffer;
 +            ist->st->codec->release_buffer = codec_release_buffer;
 +            ist->st->codec->opaque         = ist;
          }
 +
 +        if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
 +            snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
 +                    ist->file_index, ist->st->index);
 +            return AVERROR(EINVAL);
 +        }
 +        assert_codec_experimental(ist->st->codec, 0);
 +        assert_avoptions(ist->opts);
      }
  
 -    ost_table = av_mallocz(sizeof(OutputStream *) * nb_ostreams);
 -    if (!ost_table)
 -        goto fail;
 -    n = 0;
 -    for(k=0;k<nb_output_files;k++) {
 -        os = output_files[k];
 -        for(i=0;i<os->nb_streams;i++,n++) {
 -            int found;
 -            ost = ost_table[n] = output_streams_for_file[k][i];
 -            if (nb_stream_maps > 0) {
 -                ost->source_index = input_files[stream_maps[n].file_index].ist_index +
 -                    stream_maps[n].stream_index;
 -
 -                /* Sanity check that the stream types match */
 -                if (input_streams[ost->source_index].st->codec->codec_type != ost->st->codec->codec_type) {
 -                    int i= ost->file_index;
 -                    av_dump_format(output_files[i], i, output_files[i]->filename, 1);
 -                    fprintf(stderr, "Codec type mismatch for mapping #%d.%d -> #%d.%d\n",
 -                        stream_maps[n].file_index, stream_maps[n].stream_index,
 -                        ost->file_index, ost->index);
 -                    exit_program(1);
 -                }
 +    ist->pts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
 +    ist->next_pts = AV_NOPTS_VALUE;
 +    ist->is_start = 1;
  
 -            } else {
 -                int best_nb_frames=-1;
 -                /* get corresponding input stream index : we select the first one with the right type */
 -                found = 0;
 -                for (j = 0; j < nb_input_streams; j++) {
 -                    int skip=0;
 -                    ist = &input_streams[j];
 -                    if(opt_programid){
 -                        int pi,si;
 -                        AVFormatContext *f = input_files[ist->file_index].ctx;
 -                        skip=1;
 -                        for(pi=0; pi<f->nb_programs; pi++){
 -                            AVProgram *p= f->programs[pi];
 -                            if(p->id == opt_programid)
 -                                for(si=0; si<p->nb_stream_indexes; si++){
 -                                    if(f->streams[ p->stream_index[si] ] == ist->st)
 -                                        skip=0;
 -                                }
 -                        }
 -                    }
 -                    if (ist->discard && ist->st->discard != AVDISCARD_ALL && !skip &&
 -                        ist->st->codec->codec_type == ost->st->codec->codec_type) {
 -                        if(best_nb_frames < ist->st->codec_info_nb_frames){
 -                            best_nb_frames= ist->st->codec_info_nb_frames;
 -                            ost->source_index = j;
 -                            found = 1;
 -                        }
 -                    }
 -                }
 +    return 0;
 +}
  
 -                if (!found) {
 -                    if(! opt_programid) {
 -                        /* try again and reuse existing stream */
 -                        for (j = 0; j < nb_input_streams; j++) {
 -                            ist = &input_streams[j];
 -                            if (   ist->st->codec->codec_type == ost->st->codec->codec_type
 -                                && ist->st->discard != AVDISCARD_ALL) {
 -                                ost->source_index = j;
 -                                found = 1;
 -                            }
 -                        }
 -                    }
 -                    if (!found) {
 -                        int i= ost->file_index;
 -                        av_dump_format(output_files[i], i, output_files[i]->filename, 1);
 -                        fprintf(stderr, "Could not find input stream matching output stream #%d.%d\n",
 -                                ost->file_index, ost->index);
 -                        exit_program(1);
 -                    }
 -                }
 -            }
 -            ist = &input_streams[ost->source_index];
 -            ist->discard = 0;
 -            ost->sync_ist = (nb_stream_maps > 0) ?
 -                &input_streams[input_files[stream_maps[n].sync_file_index].ist_index +
 -                         stream_maps[n].sync_stream_index] : ist;
 +static int transcode_init(OutputFile *output_files, int nb_output_files,
 +                          InputFile  *input_files,  int nb_input_files)
 +{
 +    int ret = 0, i, j, k;
 +    AVFormatContext *oc;
 +    AVCodecContext *codec, *icodec;
 +    OutputStream *ost;
 +    InputStream *ist;
 +    char error[1024];
 +    int want_sdp = 1;
 +
 +    /* init framerate emulation */
 +    for (i = 0; i < nb_input_files; i++) {
 +        InputFile *ifile = &input_files[i];
 +        if (ifile->rate_emu)
 +            for (j = 0; j < ifile->nb_streams; j++)
 +                input_streams[j + ifile->ist_index].start = av_gettime();
 +    }
 +
 +    /* output stream init */
 +    for (i = 0; i < nb_output_files; i++) {
 +        oc = output_files[i].ctx;
 +        if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
 +            av_dump_format(oc, i, oc->filename, 1);
 +            av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
 +            return AVERROR(EINVAL);
          }
      }
  
@@@ -816,9 -799,9 +821,13 @@@ typedef struct RcOverride
   */
  #define CODEC_CAP_AUTO_THREADS     0x8000
  /**
+  * Audio encoder supports receiving a different number of samples in each call.
+  */
+ #define CODEC_CAP_VARIABLE_FRAME_SIZE 0x10000
++/**
 + * Codec is lossless.
 + */
 +#define CODEC_CAP_LOSSLESS         0x80000000
  
  //The following defines may change, don't expect compatibility if you use them.
  #define MB_TYPE_INTRA4x4   0x0001
Simple merge
Simple merge
@@@ -107,34 -101,35 +109,35 @@@ static int decode_frame(AVCodecContext 
                          AVPacket *avpkt)
  {
      PicContext *s = avctx->priv_data;
-     int buf_size = avpkt->size;
-     const uint8_t *buf = avpkt->data;
-     const uint8_t *buf_end = avpkt->data + buf_size;
      uint32_t *palette;
-     int bits_per_plane, bpp, etype, esize, npal;
-     int i, x, y, plane;
+     int bits_per_plane, bpp, etype, esize, npal, pos_after_pal;
+     int i, x, y, plane, tmp;
  
-     if (buf_size < 11)
+     bytestream2_init(&s->g, avpkt->data, avpkt->size);
+     if (bytestream2_get_bytes_left(&s->g) < 11)
          return AVERROR_INVALIDDATA;
  
-     if (bytestream_get_le16(&buf) != 0x1234)
+     if (bytestream2_get_le16u(&s->g) != 0x1234)
          return AVERROR_INVALIDDATA;
-     s->width  = bytestream_get_le16(&buf);
-     s->height = bytestream_get_le16(&buf);
-     buf += 4;
-     bits_per_plane    = *buf & 0xF;
-     s->nb_planes      = (*buf++ >> 4) + 1;
-     bpp               = s->nb_planes ? bits_per_plane*s->nb_planes : bits_per_plane;
+     s->width       = bytestream2_get_le16u(&s->g);
+     s->height      = bytestream2_get_le16u(&s->g);
+     bytestream2_skip(&s->g, 4);
+     tmp            = bytestream2_get_byteu(&s->g);
+     bits_per_plane = tmp & 0xF;
+     s->nb_planes   = (tmp >> 4) + 1;
+     bpp            = bits_per_plane * s->nb_planes;
      if (bits_per_plane > 8 || bpp < 1 || bpp > 32) {
 -        av_log_ask_for_sample(s, "unsupported bit depth\n");
 +        av_log_ask_for_sample(avctx, "unsupported bit depth\n");
          return AVERROR_INVALIDDATA;
      }
  
-     if (*buf == 0xFF || bpp == 8) {
-         buf += 2;
-         etype  = bytestream_get_le16(&buf);
-         esize  = bytestream_get_le16(&buf);
-         if (buf_end - buf < esize)
 -    if (bytestream2_peek_byte(&s->g) == 0xFF) {
++    if (bytestream2_peek_byte(&s->g) == 0xFF || bpp == 8) {
+         bytestream2_skip(&s->g, 2);
+         etype = bytestream2_get_le16(&s->g);
+         esize = bytestream2_get_le16(&s->g);
+         if (bytestream2_get_bytes_left(&s->g) < esize)
              return AVERROR_INVALIDDATA;
      } else {
          etype = -1;
              palette[i] = ff_cga_palette[ cga_mode45_index[idx][i] ];
      } else if (etype == 2) {
          npal = FFMIN(esize, 16);
-         for (i = 0; i < npal; i++)
-             palette[i] = ff_cga_palette[ FFMIN(buf[i], 16)];
+         for (i = 0; i < npal; i++) {
+             int pal_idx = bytestream2_get_byte(&s->g);
+             palette[i]  = ff_cga_palette[FFMIN(pal_idx, 16)];
+         }
      } else if (etype == 3) {
          npal = FFMIN(esize, 16);
-         for (i = 0; i < npal; i++)
-             palette[i] = ff_ega_palette[ FFMIN(buf[i], 63)];
+         for (i = 0; i < npal; i++) {
+             int pal_idx = bytestream2_get_byte(&s->g);
+             palette[i]  = ff_ega_palette[FFMIN(pal_idx, 63)];
+         }
      } else if (etype == 4 || etype == 5) {
          npal = FFMIN(esize / 3, 256);
 -        for (i = 0; i < npal; i++)
 +        for (i = 0; i < npal; i++) {
-             palette[i] = AV_RB24(buf + i*3) << 2;
-             palette[i] |= 0xFF << 24 | palette[i] >> 6 & 0x30303;
+             palette[i] = bytestream2_get_be24(&s->g) << 2;
++            palette[i] |= 0xFFU << 24 | palette[i] >> 6 & 0x30303;
 +        }
      } else {
          if (bpp == 1) {
              npal = 2;
      }
      // fill remaining palette entries
      memset(palette + npal, 0, AVPALETTE_SIZE - npal * 4);
-     buf += esize;
+     // skip remaining palette bytes
+     bytestream2_seek(&s->g, pos_after_pal, SEEK_SET);
  
 -    x = 0;
      y = s->height - 1;
-     if (bytestream_get_le16(&buf)) {
 -    plane = 0;
+     if (bytestream2_get_le16(&s->g)) {
 -        while (bytestream2_get_bytes_left(&s->g) >= 6) {
 +        x = 0;
 +        plane = 0;
-         while (y >= 0 && buf_end - buf >= 6) {
-             const uint8_t *buf_pend = buf + FFMIN(AV_RL16(buf), buf_end - buf);
-             //ignore uncompressed block size reported at buf[2]
-             int marker = buf[4];
-             buf += 5;
++        while (y >= 0 && bytestream2_get_bytes_left(&s->g) >= 6) {
+             int stop_size, marker, t1, t2;
+             t1        = bytestream2_get_bytes_left(&s->g);
+             t2        = bytestream2_get_le16(&s->g);
+             stop_size = t1 - FFMIN(t1, t2);
+             // ignore uncompressed block size
+             bytestream2_skip(&s->g, 2);
+             marker    = bytestream2_get_byte(&s->g);
  
-             while (plane < s->nb_planes && y >= 0 && buf_pend - buf >= 1) {
 -            while (plane < s->nb_planes &&
++            while (plane < s->nb_planes && y >= 0 &&
+                    bytestream2_get_bytes_left(&s->g) > stop_size) {
                  int run = 1;
-                 int val = *buf++;
+                 int val = bytestream2_get_byte(&s->g);
                  if (val == marker) {
-                     run = *buf++;
+                     run = bytestream2_get_byte(&s->g);
                      if (run == 0)
-                         run = bytestream_get_le16(&buf);
-                     val = *buf++;
+                         run = bytestream2_get_le16(&s->g);
+                     val = bytestream2_get_byte(&s->g);
                  }
-                 if (buf > buf_end)
+                 if (!bytestream2_get_bytes_left(&s->g))
                      break;
  
                  if (bits_per_plane == 8) {
              }
          }
      } else {
-         while (y >= 0 && buf < buf_end) {
-             memcpy(s->frame.data[0] + y * s->frame.linesize[0], buf, FFMIN(avctx->width, buf_end - buf));
-             buf += avctx->width;
 -        av_log_ask_for_sample(s, "uncompressed image\n");
 -        return avpkt->size;
++        while (y >= 0 && bytestream2_get_bytes_left(&s->g) > 0) {
++            memcpy(s->frame.data[0] + y * s->frame.linesize[0], s->g.buffer, FFMIN(avctx->width, bytestream2_get_bytes_left(&s->g)));
++            bytestream2_skip(&s->g, avctx->width);
 +            y--;
 +        }
      }
  
      *data_size = sizeof(AVFrame);
Simple merge
@@@ -244,22 -250,42 +255,58 @@@ void avcodec_align_dimensions(AVCodecCo
      *width=FFALIGN(*width, align);
  }
  
 +void ff_init_buffer_info(AVCodecContext *s, AVFrame *pic)
 +{
 +    if (s->pkt) {
 +        pic->pkt_pts = s->pkt->pts;
 +        pic->pkt_pos = s->pkt->pos;
 +    } else {
 +        pic->pkt_pts = AV_NOPTS_VALUE;
 +        pic->pkt_pos = -1;
 +    }
 +    pic->reordered_opaque= s->reordered_opaque;
 +    pic->sample_aspect_ratio = s->sample_aspect_ratio;
 +    pic->width               = s->width;
 +    pic->height              = s->height;
 +    pic->format              = s->pix_fmt;
 +}
 +
+ int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
+                              enum AVSampleFormat sample_fmt, const uint8_t *buf,
+                              int buf_size, int align)
+ {
+     int ch, planar, needed_size, ret = 0;
+     needed_size = av_samples_get_buffer_size(NULL, nb_channels,
+                                              frame->nb_samples, sample_fmt,
+                                              align);
+     if (buf_size < needed_size)
+         return AVERROR(EINVAL);
+     planar = av_sample_fmt_is_planar(sample_fmt);
+     if (planar && nb_channels > AV_NUM_DATA_POINTERS) {
+         if (!(frame->extended_data = av_mallocz(nb_channels *
+                                                 sizeof(*frame->extended_data))))
+             return AVERROR(ENOMEM);
+     } else {
+         frame->extended_data = frame->data;
+     }
+     if ((ret = av_samples_fill_arrays(frame->extended_data, &frame->linesize[0],
+                                       buf, nb_channels, frame->nb_samples,
+                                       sample_fmt, align)) < 0) {
+         if (frame->extended_data != frame->data)
+             av_free(frame->extended_data);
+         return ret;
+     }
+     if (frame->extended_data != frame->data) {
+         for (ch = 0; ch < AV_NUM_DATA_POINTERS; ch++)
+             frame->data[ch] = frame->extended_data[ch];
+     }
+     return ret;
+ }
  static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
  {
      AVCodecInternal *avci = avctx->internal;
      /* if there is no previous buffer or the previous buffer cannot be used
         as-is, allocate a new buffer and/or rearrange the channel pointers */
      if (!buf->extended_data) {
-         /* if the channel pointers will fit, just set extended_data to data,
-            otherwise allocate the extended_data channel pointers */
-         if (needs_extended_data) {
-             buf->extended_data = av_mallocz(avctx->channels *
-                                             sizeof(*buf->extended_data));
-             if (!buf->extended_data)
+         if (!buf->data[0]) {
+             if (!(buf->data[0] = av_mallocz(buf_size)))
                  return AVERROR(ENOMEM);
-         } else {
-             buf->extended_data = buf->data;
-         }
-         /* if there is a previous buffer and it is large enough, reuse it and
-            just fill-in new channel pointers and linesize, otherwise allocate
-            a new buffer */
-         if (buf->extended_data[0]) {
-             ret = av_samples_fill_arrays(buf->extended_data, &buf->linesize[0],
-                                          buf->extended_data[0], avctx->channels,
-                                          frame->nb_samples, avctx->sample_fmt,
-                                          32);
-         } else {
-             ret = av_samples_alloc(buf->extended_data, &buf->linesize[0],
-                                    avctx->channels, frame->nb_samples,
-                                    avctx->sample_fmt, 32);
+             buf->audio_data_size = buf_size;
          }
-         if (ret)
+         if ((ret = avcodec_fill_audio_frame(frame, avctx->channels,
+                                             avctx->sample_fmt, buf->data[0],
+                                             buf->audio_data_size, 32)))
              return ret;
  
-         /* if data was not used for extended_data, we need to copy as many of
-            the extended_data channel pointers as will fit */
-         if (needs_extended_data) {
-             for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
-                 buf->data[i] = buf->extended_data[i];
-         }
-         buf->audio_data_size = buf_size;
-         buf->nb_channels     = avctx->channels;
+         if (frame->extended_data == frame->data)
+             buf->extended_data = buf->data;
+         else
+             buf->extended_data = frame->extended_data;
+         memcpy(buf->data, frame->data, sizeof(frame->data));
+         buf->linesize[0] = frame->linesize[0];
+         buf->nb_channels = avctx->channels;
+     } else {
+         /* copy InternalBuffer info to the AVFrame */
+         frame->extended_data = buf->extended_data;
+         frame->linesize[0]   = buf->linesize[0];
+         memcpy(frame->data, buf->data, sizeof(frame->data));
      }
  
-     /* copy InternalBuffer info to the AVFrame */
      frame->type          = FF_BUFFER_TYPE_INTERNAL;
-     frame->extended_data = buf->extended_data;
-     frame->linesize[0]   = buf->linesize[0];
-     memcpy(frame->data, buf->data, sizeof(frame->data));
  
 -    if (avctx->pkt) frame->pkt_pts = avctx->pkt->pts;
 -    else            frame->pkt_pts = AV_NOPTS_VALUE;
 +    if (avctx->pkt) {
 +        frame->pkt_pts = avctx->pkt->pts;
 +        frame->pkt_pos = avctx->pkt->pos;
 +    } else {
 +        frame->pkt_pts = AV_NOPTS_VALUE;
 +        frame->pkt_pos = -1;
 +    }
 +
      frame->reordered_opaque = avctx->reordered_opaque;
  
      if (avctx->debug & FF_DEBUG_BUFFERS)
@@@ -870,21 -823,222 +897,225 @@@ free_and_end
      goto end;
  }
  
- int attribute_align_arg avcodec_encode_audio(AVCodecContext *avctx, uint8_t *buf, int buf_size,
-                          const short *samples)
+ int ff_alloc_packet(AVPacket *avpkt, int size)
  {
-     if(buf_size < FF_MIN_BUFFER_SIZE && 0){
-         av_log(avctx, AV_LOG_ERROR, "buffer smaller than minimum size\n");
-         return -1;
+     if (size > INT_MAX - FF_INPUT_BUFFER_PADDING_SIZE)
+         return AVERROR(EINVAL);
+     if (avpkt->data) {
+         uint8_t *pkt_data;
+         int pkt_size;
+         if (avpkt->size < size)
+             return AVERROR(EINVAL);
+         pkt_data = avpkt->data;
+         pkt_size = avpkt->size;
+         av_init_packet(avpkt);
+         avpkt->data = pkt_data;
+         avpkt->size = pkt_size;
+         return 0;
+     } else {
+         return av_new_packet(avpkt, size);
      }
-     if((avctx->codec->capabilities & CODEC_CAP_DELAY) || samples){
-         int ret = avctx->codec->encode(avctx, buf, buf_size, samples);
-         avctx->frame_number++;
-         return ret;
-     }else
+ }
+ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
+                                               AVPacket *avpkt,
+                                               const AVFrame *frame,
+                                               int *got_packet_ptr)
+ {
+     int ret;
+     int user_packet = !!avpkt->data;
+     int nb_samples;
+     if (!(avctx->codec->capabilities & CODEC_CAP_DELAY) && !frame) {
+         av_init_packet(avpkt);
+         avpkt->size = 0;
          return 0;
+     }
+     /* check for valid frame size */
+     if (frame) {
+         nb_samples = frame->nb_samples;
+         if (avctx->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
+             if (nb_samples > avctx->frame_size)
+                 return AVERROR(EINVAL);
+         } else if (!(avctx->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) {
+             if (nb_samples != avctx->frame_size)
+                 return AVERROR(EINVAL);
+         }
+     } else {
+         nb_samples = avctx->frame_size;
+     }
+     if (avctx->codec->encode2) {
+         *got_packet_ptr = 0;
+         ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
+         if (!ret && *got_packet_ptr &&
+             !(avctx->codec->capabilities & CODEC_CAP_DELAY)) {
+             avpkt->pts = frame->pts;
+             avpkt->duration = av_rescale_q(frame->nb_samples,
+                                            (AVRational){ 1, avctx->sample_rate },
+                                            avctx->time_base);
+         }
+     } else {
+         /* for compatibility with encoders not supporting encode2(), we need to
+            allocate a packet buffer if the user has not provided one or check
+            the size otherwise */
+         int fs_tmp   = 0;
+         int buf_size = avpkt->size;
+         if (!user_packet) {
+             if (avctx->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) {
+                 av_assert0(av_get_bits_per_sample(avctx->codec_id) != 0);
+                 buf_size = nb_samples * avctx->channels *
+                            av_get_bits_per_sample(avctx->codec_id) / 8;
+             } else {
+                 /* this is a guess as to the required size.
+                    if an encoder needs more than this, it should probably
+                    implement encode2() */
+                 buf_size = 2 * avctx->frame_size * avctx->channels *
+                            av_get_bytes_per_sample(avctx->sample_fmt);
+                 buf_size += FF_MIN_BUFFER_SIZE;
+             }
+         }
+         if ((ret = ff_alloc_packet(avpkt, buf_size)))
+             return ret;
+         /* Encoders using AVCodec.encode() that support
+            CODEC_CAP_SMALL_LAST_FRAME require avctx->frame_size to be set to
+            the smaller size when encoding the last frame.
+            This code can be removed once all encoders supporting
+            CODEC_CAP_SMALL_LAST_FRAME use encode2() */
+         if ((avctx->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) &&
+             nb_samples < avctx->frame_size) {
+             fs_tmp = avctx->frame_size;
+             avctx->frame_size = nb_samples;
+         }
+         /* encode the frame */
+         ret = avctx->codec->encode(avctx, avpkt->data, avpkt->size,
+                                    frame ? frame->data[0] : NULL);
+         if (ret >= 0) {
+             if (!ret) {
+                 /* no output. if the packet data was allocated by libavcodec,
+                    free it */
+                 if (!user_packet)
+                     av_freep(&avpkt->data);
+             } else {
+                 if (avctx->coded_frame)
+                     avpkt->pts = avctx->coded_frame->pts;
+                 /* Set duration for final small packet. This can be removed
+                    once all encoders supporting CODEC_CAP_SMALL_LAST_FRAME use
+                    encode2() */
+                 if (fs_tmp) {
+                     avpkt->duration = av_rescale_q(avctx->frame_size,
+                                                    (AVRational){ 1, avctx->sample_rate },
+                                                    avctx->time_base);
+                 }
+             }
+             avpkt->size = ret;
+             *got_packet_ptr = (ret > 0);
+             ret = 0;
+         }
+         if (fs_tmp)
+             avctx->frame_size = fs_tmp;
+     }
+     if (!ret)
+         avctx->frame_number++;
+     /* NOTE: if we add any audio encoders which output non-keyframe packets,
+              this needs to be moved to the encoders, but for now we can do it
+              here to simplify things */
+     avpkt->flags |= AV_PKT_FLAG_KEY;
+     return ret;
  }
  
 -        frame->pts = av_rescale_q(avctx->internal->sample_count,
+ #if FF_API_OLD_DECODE_AUDIO
+ int attribute_align_arg avcodec_encode_audio(AVCodecContext *avctx,
+                                              uint8_t *buf, int buf_size,
+                                              const short *samples)
+ {
+     AVPacket pkt;
+     AVFrame frame0;
+     AVFrame *frame;
+     int ret, samples_size, got_packet;
+     av_init_packet(&pkt);
+     pkt.data = buf;
+     pkt.size = buf_size;
+     if (samples) {
+         frame = &frame0;
+         avcodec_get_frame_defaults(frame);
+         if (avctx->frame_size) {
+             frame->nb_samples = avctx->frame_size;
+         } else {
+             /* if frame_size is not set, the number of samples must be
+                calculated from the buffer size */
+             int64_t nb_samples;
+             if (!av_get_bits_per_sample(avctx->codec_id)) {
+                 av_log(avctx, AV_LOG_ERROR, "avcodec_encode_audio() does not "
+                        "support this codec\n");
+                 return AVERROR(EINVAL);
+             }
+             nb_samples = (int64_t)buf_size * 8 /
+                          (av_get_bits_per_sample(avctx->codec_id) *
+                          avctx->channels);
+             if (nb_samples >= INT_MAX)
+                 return AVERROR(EINVAL);
+             frame->nb_samples = nb_samples;
+         }
+         /* it is assumed that the samples buffer is large enough based on the
+            relevant parameters */
+         samples_size = av_samples_get_buffer_size(NULL, avctx->channels,
+                                                   frame->nb_samples,
+                                                   avctx->sample_fmt, 1);
+         if ((ret = avcodec_fill_audio_frame(frame, avctx->channels,
+                                             avctx->sample_fmt,
+                                             samples, samples_size, 1)))
+             return ret;
+         /* fabricate frame pts from sample count.
+            this is needed because the avcodec_encode_audio() API does not have
+            a way for the user to provide pts */
++        if(avctx->sample_rate && avctx->time_base.num)
++            frame->pts = av_rescale_q(avctx->internal->sample_count,
+                                   (AVRational){ 1, avctx->sample_rate },
+                                   avctx->time_base);
++        else
++            frame->pts = AV_NOPTS_VALUE;
+         avctx->internal->sample_count += frame->nb_samples;
+     } else {
+         frame = NULL;
+     }
+     got_packet = 0;
+     ret = avcodec_encode_audio2(avctx, &pkt, frame, &got_packet);
+     if (!ret && got_packet && avctx->coded_frame) {
+         avctx->coded_frame->pts       = pkt.pts;
+         avctx->coded_frame->key_frame = !!(pkt.flags & AV_PKT_FLAG_KEY);
+     }
+     /* free any side data since we cannot return it */
+     if (pkt.side_data_elems > 0) {
+         int i;
+         for (i = 0; i < pkt.side_data_elems; i++)
+             av_free(pkt.side_data[i].data);
+         av_freep(&pkt.side_data);
+         pkt.side_data_elems = 0;
+     }
+     if (frame && frame->extended_data != frame->data)
+         av_free(frame->extended_data);
+     return ret ? ret : pkt.size;
+ }
+ #endif
  int attribute_align_arg avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size,
                           const AVFrame *pict)
  {
@@@ -1214,9 -1306,8 +1445,9 @@@ AVCodec *avcodec_find_encoder(enum Code
  {
      AVCodec *p, *experimental=NULL;
      p = first_avcodec;
 +    id= remap_deprecated_codec_id(id);
      while (p) {
-         if (p->encode != NULL && p->id == id) {
+         if (codec_is_encoder(p) && p->id == id) {
              if (p->capabilities & CODEC_CAP_EXPERIMENTAL && !experimental) {
                  experimental = p;
              } else
@@@ -1243,19 -1334,14 +1474,19 @@@ AVCodec *avcodec_find_encoder_by_name(c
  
  AVCodec *avcodec_find_decoder(enum CodecID id)
  {
 -    AVCodec *p;
 +    AVCodec *p, *experimental=NULL;
      p = first_avcodec;
 +    id= remap_deprecated_codec_id(id);
      while (p) {
-         if (p->decode != NULL && p->id == id) {
 -        if (codec_is_decoder(p) && p->id == id)
 -            return p;
++        if (codec_is_decoder(p) && p->id == id) {
 +            if (p->capabilities & CODEC_CAP_EXPERIMENTAL && !experimental) {
 +                experimental = p;
 +            } else
 +                return p;
 +        }
          p = p->next;
      }
 -    return NULL;
 +    return experimental;
  }
  
  AVCodec *avcodec_find_decoder_by_name(const char *name)
@@@ -21,8 -21,8 +21,8 @@@
  #define AVCODEC_VERSION_H
  
  #define LIBAVCODEC_VERSION_MAJOR 53
- #define LIBAVCODEC_VERSION_MINOR 55
 -#define LIBAVCODEC_VERSION_MINOR 34
 -#define LIBAVCODEC_VERSION_MICRO  0
++#define LIBAVCODEC_VERSION_MINOR 56
 +#define LIBAVCODEC_VERSION_MICRO 105
  
  #define LIBAVCODEC_VERSION_INT  AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
                                                 LIBAVCODEC_VERSION_MINOR, \
@@@ -35,16 -35,16 +35,16 @@@ SECTION .tex
      sar    %1, 10
  %endmacro
  
- %macro rv34_idct_dequant4x4_dc 1
- cglobal rv34_idct_dequant4x4_%1_mmx2, 1, 2, 0
+ %macro rv34_idct 1
+ cglobal rv34_idct_%1_mmx2, 1, 2, 0
      movsx   r1, word [r0]
      IDCT_DC r1
-     movd    mm0, r1d
-     pshufw  mm0, mm0, 0
-     movq    [r0+ 0], mm0
-     movq    [r0+16], mm0
-     movq    [r0+32], mm0
-     movq    [r0+48], mm0
 -    movd    m0, r1
++    movd    m0, r1d
+     pshufw  m0, m0, 0
+     movq    [r0+ 0], m0
+     movq    [r0+ 8], m0
+     movq    [r0+16], m0
+     movq    [r0+24], m0
      REP_RET
  %endmacro
  
Simple merge
Simple merge
@@@ -1,10 -1,10 +1,10 @@@
  /*
-  * SMJPEG demuxer
-  * Copyright (c) 2011 Paul B Mahol
+  * SMJPEG common code
+  * Copyright (c) 2011-2012 Paul B Mahol
   *
 - * This file is part of Libav.
 + * This file is part of FFmpeg.
   *
 - * Libav is free software; you can redistribute it and/or
 + * FFmpeg is free software; you can redistribute it and/or
   * modify it under the terms of the GNU Lesser General Public
   * License as published by the Free Software Foundation; either
   * version 2.1 of the License, or (at your option) any later version.
@@@ -30,8 -30,8 +30,8 @@@
  #include "libavutil/avutil.h"
  
  #define LIBAVFORMAT_VERSION_MAJOR 53
- #define LIBAVFORMAT_VERSION_MINOR 29
 -#define LIBAVFORMAT_VERSION_MINOR 20
 -#define LIBAVFORMAT_VERSION_MICRO  0
++#define LIBAVFORMAT_VERSION_MINOR 30
 +#define LIBAVFORMAT_VERSION_MICRO 100
  
  #define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
                                                 LIBAVFORMAT_VERSION_MINOR, \