Merge commit 'f154ef1ae5b03f288dd8c025dab1884b4cb20c1a'
authorMichael Niedermayer <michaelni@gmx.at>
Thu, 9 Aug 2012 13:14:57 +0000 (15:14 +0200)
committerMichael Niedermayer <michaelni@gmx.at>
Thu, 9 Aug 2012 13:23:00 +0000 (15:23 +0200)
* commit 'f154ef1ae5b03f288dd8c025dab1884b4cb20c1a':
  avconv: send EOF to lavfi even if flushing the decoder fails
  avconv: get rid of pointless temporary variable.
  avconv: simplify transcode().
  avconv: cosmetics
  avconv: replace no_packet array in transcode() with a var in InputStream
  avconv: remove unused variable from InputFile.
  avconv: remove commented out cruft.
  avconv: maintain sync on lavfi outputs.

Conflicts:
ffmpeg.c
ffmpeg.h

Merged-by: Michael Niedermayer <michaelni@gmx.at>
1  2 
ffmpeg.c
ffmpeg.h

diff --combined ffmpeg.c
+++ b/ffmpeg.c
@@@ -1,42 -1,36 +1,42 @@@
  /*
 - * avconv main
 - * Copyright (c) 2000-2011 The libav developers.
 + * Copyright (c) 2000-2003 Fabrice Bellard
   *
 - * This file is part of Libav.
 + * This file is part of FFmpeg.
   *
 - * Libav is free software; you can redistribute it and/or
 + * FFmpeg is free software; you can redistribute it and/or
   * modify it under the terms of the GNU Lesser General Public
   * License as published by the Free Software Foundation; either
   * version 2.1 of the License, or (at your option) any later version.
   *
 - * Libav is distributed in the hope that it will be useful,
 + * FFmpeg is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   * Lesser General Public License for more details.
   *
   * You should have received a copy of the GNU Lesser General Public
 - * License along with Libav; if not, write to the Free Software
 + * License along with FFmpeg; if not, write to the Free Software
   * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
   */
  
 +/**
 + * @file
 + * multimedia converter based on the FFmpeg libraries
 + */
 +
  #include "config.h"
  #include <ctype.h>
  #include <string.h>
  #include <math.h>
  #include <stdlib.h>
  #include <errno.h>
 -#include <signal.h>
  #include <limits.h>
 +#if HAVE_ISATTY
 +#include <unistd.h>
 +#endif
  #include "libavformat/avformat.h"
  #include "libavdevice/avdevice.h"
  #include "libswscale/swscale.h"
 -#include "libavresample/avresample.h"
 +#include "libswresample/swresample.h"
  #include "libavutil/opt.h"
  #include "libavutil/audioconvert.h"
  #include "libavutil/parseutils.h"
  #include "libavutil/avstring.h"
  #include "libavutil/libm.h"
  #include "libavutil/imgutils.h"
 +#include "libavutil/timestamp.h"
 +#include "libavutil/bprint.h"
  #include "libavutil/time.h"
  #include "libavformat/os_support.h"
  
 +#include "libavformat/ffm.h" // not public API
 +
 +# include "libavfilter/avcodec.h"
  # include "libavfilter/avfilter.h"
  # include "libavfilter/avfiltergraph.h"
  # include "libavfilter/buffersrc.h"
  #include <sys/select.h>
  #endif
  
 +#if HAVE_TERMIOS_H
 +#include <fcntl.h>
 +#include <sys/ioctl.h>
 +#include <sys/time.h>
 +#include <termios.h>
 +#elif HAVE_KBHIT
 +#include <conio.h>
 +#endif
 +
  #if HAVE_PTHREADS
  #include <pthread.h>
  #endif
  
  #include <time.h>
  
 -#include "avconv.h"
 +#include "ffmpeg.h"
  #include "cmdutils.h"
  
  #include "libavutil/avassert.h"
  
 -const char program_name[] = "avconv";
 +const char program_name[] = "ffmpeg";
  const int program_birth_year = 2000;
  
  static FILE *vstats_file;
  
 +static void do_video_stats(AVFormatContext *os, OutputStream *ost, int frame_size);
 +static int64_t getutime(void);
 +
 +static int run_as_daemon  = 0;
  static int64_t video_size = 0;
  static int64_t audio_size = 0;
 +static int64_t subtitle_size = 0;
  static int64_t extra_size = 0;
  static int nb_frames_dup = 0;
  static int nb_frames_drop = 0;
  
 +static int current_time;
 +AVIOContext *progress_avio = NULL;
  
 +static uint8_t *subtitle_out;
  
  #if HAVE_PTHREADS
  /* signal to input threads that they should exit; set by the main thread */
  static int transcoding_finished;
  #endif
  
 -#define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
 +#define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
  
  InputStream **input_streams = NULL;
  int        nb_input_streams = 0;
@@@ -139,125 -111,9 +139,125 @@@ int         nb_output_files   = 0
  FilterGraph **filtergraphs;
  int        nb_filtergraphs;
  
 -static void term_exit(void)
 +#if HAVE_TERMIOS_H
 +
 +/* init terminal so that we can grab keys */
 +static struct termios oldtty;
 +static int restore_tty;
 +#endif
 +
 +
 +/* sub2video hack:
 +   Convert subtitles to video with alpha to insert them in filter graphs.
 +   This is a temporary solution until libavfilter gets real subtitles support.
 + */
 +
 +
 +
 +static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
 +                                AVSubtitleRect *r)
 +{
 +    uint32_t *pal, *dst2;
 +    uint8_t *src, *src2;
 +    int x, y;
 +
 +    if (r->type != SUBTITLE_BITMAP) {
 +        av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
 +        return;
 +    }
 +    if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
 +        av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle overflowing\n");
 +        return;
 +    }
 +
 +    dst += r->y * dst_linesize + r->x * 4;
 +    src = r->pict.data[0];
 +    pal = (uint32_t *)r->pict.data[1];
 +    for (y = 0; y < r->h; y++) {
 +        dst2 = (uint32_t *)dst;
 +        src2 = src;
 +        for (x = 0; x < r->w; x++)
 +            *(dst2++) = pal[*(src2++)];
 +        dst += dst_linesize;
 +        src += r->pict.linesize[0];
 +    }
 +}
 +
 +static void sub2video_push_ref(InputStream *ist, int64_t pts)
 +{
 +    AVFilterBufferRef *ref = ist->sub2video.ref;
 +    int i;
 +
 +    ist->sub2video.last_pts = ref->pts = pts;
 +    for (i = 0; i < ist->nb_filters; i++)
 +        av_buffersrc_add_ref(ist->filters[i]->filter,
 +                             avfilter_ref_buffer(ref, ~0),
 +                             AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT |
 +                             AV_BUFFERSRC_FLAG_NO_COPY);
 +}
 +
 +static void sub2video_update(InputStream *ist, AVSubtitle *sub, int64_t pts)
 +{
 +    int w = ist->sub2video.w, h = ist->sub2video.h;
 +    AVFilterBufferRef *ref = ist->sub2video.ref;
 +    int8_t *dst;
 +    int     dst_linesize;
 +    int i;
 +
 +    if (!ref)
 +        return;
 +    dst          = ref->data    [0];
 +    dst_linesize = ref->linesize[0];
 +    memset(dst, 0, h * dst_linesize);
 +    for (i = 0; i < sub->num_rects; i++)
 +        sub2video_copy_rect(dst, dst_linesize, w, h, sub->rects[i]);
 +    sub2video_push_ref(ist, pts);
 +}
 +
 +static void sub2video_heartbeat(InputStream *ist, int64_t pts)
 +{
 +    InputFile *infile = input_files[ist->file_index];
 +    int i, j, nb_reqs;
 +    int64_t pts2;
 +
 +    /* When a frame is read from a file, examine all sub2video streams in
 +       the same file and send the sub2video frame again. Otherwise, decoded
 +       video frames could be accumulating in the filter graph while a filter
 +       (possibly overlay) is desperately waiting for a subtitle frame. */
 +    for (i = 0; i < infile->nb_streams; i++) {
 +        InputStream *ist2 = input_streams[infile->ist_index + i];
 +        if (!ist2->sub2video.ref)
 +            continue;
 +        /* subtitles seem to be usually muxed ahead of other streams;
 +           if not, substracting a larger time here is necessary */
 +        pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
 +        /* do not send the heartbeat frame if the subtitle is already ahead */
 +        if (pts2 <= ist2->sub2video.last_pts)
 +            continue;
 +        for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
 +            nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
 +        if (nb_reqs)
 +            sub2video_push_ref(ist2, pts2);
 +    }
 +}
 +
 +static void sub2video_flush(InputStream *ist)
 +{
 +    int i;
 +
 +    for (i = 0; i < ist->nb_filters; i++)
 +        av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
 +}
 +
 +/* end of sub2video hack */
 +
 +void term_exit(void)
  {
 -    av_log(NULL, AV_LOG_QUIET, "");
 +    av_log(NULL, AV_LOG_QUIET, "%s", "");
 +#if HAVE_TERMIOS_H
 +    if(restore_tty)
 +        tcsetattr (0, TCSANOW, &oldtty);
 +#endif
  }
  
  static volatile int received_sigterm = 0;
@@@ -269,40 -125,10 +269,40 @@@ sigterm_handler(int sig
      received_sigterm = sig;
      received_nb_signals++;
      term_exit();
 +    if(received_nb_signals > 3)
 +        exit(123);
  }
  
 -static void term_init(void)
 +void term_init(void)
  {
 +#if HAVE_TERMIOS_H
 +    if(!run_as_daemon){
 +        struct termios tty;
 +        int istty = 1;
 +#if HAVE_ISATTY
 +        istty = isatty(0) && isatty(2);
 +#endif
 +        if (istty && tcgetattr (0, &tty) == 0) {
 +            oldtty = tty;
 +            restore_tty = 1;
 +            atexit(term_exit);
 +
 +            tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
 +                             |INLCR|IGNCR|ICRNL|IXON);
 +            tty.c_oflag |= OPOST;
 +            tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
 +            tty.c_cflag &= ~(CSIZE|PARENB);
 +            tty.c_cflag |= CS8;
 +            tty.c_cc[VMIN] = 1;
 +            tty.c_cc[VTIME] = 0;
 +
 +            tcsetattr (0, TCSANOW, &tty);
 +        }
 +        signal(SIGQUIT, sigterm_handler); /* Quit (POSIX).  */
 +    }
 +#endif
 +    avformat_network_deinit();
 +
      signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
      signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
  #ifdef SIGXCPU
  #endif
  }
  
 +/* read a key without blocking */
 +static int read_key(void)
 +{
 +    unsigned char ch;
 +#if HAVE_TERMIOS_H
 +    int n = 1;
 +    struct timeval tv;
 +    fd_set rfds;
 +
 +    FD_ZERO(&rfds);
 +    FD_SET(0, &rfds);
 +    tv.tv_sec = 0;
 +    tv.tv_usec = 0;
 +    n = select(1, &rfds, NULL, NULL, &tv);
 +    if (n > 0) {
 +        n = read(0, &ch, 1);
 +        if (n == 1)
 +            return ch;
 +
 +        return n;
 +    }
 +#elif HAVE_KBHIT
 +#    if HAVE_PEEKNAMEDPIPE
 +    static int is_pipe;
 +    static HANDLE input_handle;
 +    DWORD dw, nchars;
 +    if(!input_handle){
 +        input_handle = GetStdHandle(STD_INPUT_HANDLE);
 +        is_pipe = !GetConsoleMode(input_handle, &dw);
 +    }
 +
 +    if (stdin->_cnt > 0) {
 +        read(0, &ch, 1);
 +        return ch;
 +    }
 +    if (is_pipe) {
 +        /* When running under a GUI, you will end here. */
 +        if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL))
 +            return -1;
 +        //Read it
 +        if(nchars != 0) {
 +            read(0, &ch, 1);
 +            return ch;
 +        }else{
 +            return -1;
 +        }
 +    }
 +#    endif
 +    if(kbhit())
 +        return(getch());
 +#endif
 +    return -1;
 +}
 +
  static int decode_interrupt_cb(void *ctx)
  {
      return received_nb_signals > 1;
  
  const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
  
 -void exit_program(int ret)
 +void av_noreturn exit_program(int ret)
  {
      int i, j;
  
      }
      av_freep(&filtergraphs);
  
 +    av_freep(&subtitle_out);
 +
      /* close files */
      for (i = 0; i < nb_output_files; i++) {
          AVFormatContext *s = output_files[i]->ctx;
          av_freep(&input_streams[i]->decoded_frame);
          av_dict_free(&input_streams[i]->opts);
          free_buffer_pool(&input_streams[i]->buffer_pool);
 +        avfilter_unref_bufferp(&input_streams[i]->sub2video.ref);
          av_freep(&input_streams[i]->filters);
          av_freep(&input_streams[i]);
      }
@@@ -478,20 -247,43 +478,20 @@@ static void assert_codec_experimental(A
      }
  }
  
 -/**
 - * Update the requested input sample format based on the output sample format.
 - * This is currently only used to request float output from decoders which
 - * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT.
 - * Ideally this will be removed in the future when decoders do not do format
 - * conversion and only output in their native format.
 - */
 -static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec,
 -                              AVCodecContext *enc)
 +static void update_benchmark(const char *fmt, ...)
  {
 -    /* if sample formats match or a decoder sample format has already been
 -       requested, just return */
 -    if (enc->sample_fmt == dec->sample_fmt ||
 -        dec->request_sample_fmt > AV_SAMPLE_FMT_NONE)
 -        return;
 -
 -    /* if decoder supports more than one output format */
 -    if (dec_codec && dec_codec->sample_fmts &&
 -        dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE &&
 -        dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) {
 -        const enum AVSampleFormat *p;
 -        int min_dec = -1, min_inc = -1;
 -
 -        /* find a matching sample format in the encoder */
 -        for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) {
 -            if (*p == enc->sample_fmt) {
 -                dec->request_sample_fmt = *p;
 -                return;
 -            } else if (*p > enc->sample_fmt) {
 -                min_inc = FFMIN(min_inc, *p - enc->sample_fmt);
 -            } else
 -                min_dec = FFMIN(min_dec, enc->sample_fmt - *p);
 +    if (do_benchmark_all) {
 +        int64_t t = getutime();
 +        va_list va;
 +        char buf[1024];
 +
 +        if (fmt) {
 +            va_start(va, fmt);
 +            vsnprintf(buf, sizeof(buf), fmt, va);
 +            va_end(va);
 +            printf("bench: %8"PRIu64" %s \n", t - current_time, buf);
          }
 -
 -        /* if none match, provide the one that matches quality closest */
 -        dec->request_sample_fmt = min_inc > 0 ? enc->sample_fmt + min_inc :
 -                                  enc->sample_fmt - min_dec;
 +        current_time = t;
      }
  }
  
@@@ -501,18 -293,6 +501,18 @@@ static void write_frame(AVFormatContex
      AVCodecContext          *avctx = ost->st->codec;
      int ret;
  
 +    if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
 +        (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
 +        pkt->pts = pkt->dts = AV_NOPTS_VALUE;
 +
 +    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
 +        int64_t max = ost->st->cur_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
 +        if (ost->st->cur_dts && ost->st->cur_dts != AV_NOPTS_VALUE &&  max > pkt->dts) {
 +            av_log(s, max - pkt->dts > 2 ? AV_LOG_WARNING : AV_LOG_DEBUG, "Audio timestamp %"PRId64" < %"PRId64" invalid, cliping\n", pkt->dts, max);
 +            pkt->pts = pkt->dts = max;
 +        }
 +    }
 +
      /*
       * Audio encoders may split the packets --  #frames in != #packets out.
       * But there is no reordering, so we can limit the number of output packets
              av_free_packet(pkt);
              new_pkt.destruct = av_destruct_packet;
          } else if (a < 0) {
 -            av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
 +            av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
                     bsfc->filter->name, pkt->stream_index,
                     avctx->codec ? avctx->codec->name : "copy");
              print_error("", a);
@@@ -589,13 -369,10 +589,13 @@@ static void do_audio_out(AVFormatContex
          frame->pts = ost->sync_opts;
      ost->sync_opts = frame->pts + frame->nb_samples;
  
 +    av_assert0(pkt.size || !pkt.data);
 +    update_benchmark(NULL);
      if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
 -        av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
 +        av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
          exit_program(1);
      }
 +    update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
  
      if (got_packet) {
          if (pkt.pts != AV_NOPTS_VALUE)
          if (pkt.duration > 0)
              pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
  
 +        if (debug_ts) {
 +            av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
 +                   "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
 +                   av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
 +                   av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
 +        }
 +
          write_frame(s, &pkt, ost);
  
          audio_size += pkt.size;
 +        av_free_packet(&pkt);
      }
  }
  
@@@ -664,6 -433,7 +664,6 @@@ static void do_subtitle_out(AVFormatCon
                              AVSubtitle *sub,
                              int64_t pts)
  {
 -    static uint8_t *subtitle_out = NULL;
      int subtitle_out_max_size = 1024 * 1024;
      int subtitle_out_size, nb, i;
      AVCodecContext *enc;
      else
          nb = 1;
  
 +    /* shift timestamp to honor -ss and make check_recording_time() work with -t */
 +    pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q)
 +        - output_files[ost->file_index]->start_time;
      for (i = 0; i < nb; i++) {
 -        ost->sync_opts = av_rescale_q(pts, ist->st->time_base, enc->time_base);
 +        ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
          if (!check_recording_time(ost))
              return;
  
 -        sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
 +        sub->pts = pts;
          // start_display_time is required to be 0
          sub->pts               += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
          sub->end_display_time  -= sub->start_display_time;
          sub->start_display_time = 0;
 +        if (i == 1)
 +            sub->num_rects = 0;
          subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
                                                      subtitle_out_max_size, sub);
          if (subtitle_out_size < 0) {
          pkt.data = subtitle_out;
          pkt.size = subtitle_out_size;
          pkt.pts  = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
 +        pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
          if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
              /* XXX: the pts correction is handled here. Maybe handling
                 it in the codec would be better */
                  pkt.pts += 90 * sub->end_display_time;
          }
          write_frame(s, &pkt, ost);
 +        subtitle_size += pkt.size;
      }
  }
  
  static void do_video_out(AVFormatContext *s,
                           OutputStream *ost,
                           AVFrame *in_picture,
 -                         int *frame_size, float quality)
 +                         float quality)
  {
      int ret, format_video_sync;
      AVPacket pkt;
      AVCodecContext *enc = ost->st->codec;
 +    int nb_frames, i;
 +    double sync_ipts, delta;
 +    double duration = 0;
 +    int frame_size = 0;
 +    InputStream *ist = NULL;
 +
 +    if (ost->source_index >= 0)
 +        ist = input_streams[ost->source_index];
  
 -    *frame_size = 0;
 +    if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
 +        duration = 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base));
 +
 +    sync_ipts = in_picture->pts;
 +    delta = sync_ipts - ost->sync_opts + duration;
 +
 +    /* by default, we output a single frame */
 +    nb_frames = 1;
  
      format_video_sync = video_sync_method;
      if (format_video_sync == VSYNC_AUTO)
 -        format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
 -                            (s->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR;
 -    if (format_video_sync != VSYNC_PASSTHROUGH &&
 -        ost->frame_number &&
 -        in_picture->pts != AV_NOPTS_VALUE &&
 -        in_picture->pts < ost->sync_opts) {
 +        format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : 1;
 +
 +    switch (format_video_sync) {
 +    case VSYNC_CFR:
 +        // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
 +        if (delta < -1.1)
 +            nb_frames = 0;
 +        else if (delta > 1.1)
 +            nb_frames = lrintf(delta);
 +        break;
 +    case VSYNC_VFR:
 +        if (delta <= -0.6)
 +            nb_frames = 0;
 +        else if (delta > 0.6)
 +            ost->sync_opts = lrint(sync_ipts);
 +        break;
 +    case VSYNC_DROP:
 +    case VSYNC_PASSTHROUGH:
 +        ost->sync_opts = lrint(sync_ipts);
 +        break;
 +    default:
 +        av_assert0(0);
 +    }
 +
 +    nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
 +    if (nb_frames == 0) {
          nb_frames_drop++;
          av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
          return;
 +    } else if (nb_frames > 1) {
 +        if (nb_frames > dts_error_threshold * 30) {
 +            av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skiping\n", nb_frames - 1);
 +            nb_frames_drop++;
 +            return;
 +        }
 +        nb_frames_dup += nb_frames - 1;
 +        av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
      }
  
 -    if (in_picture->pts == AV_NOPTS_VALUE)
 -        in_picture->pts = ost->sync_opts;
 -    ost->sync_opts = in_picture->pts;
 -
 -
 -    if (!ost->frame_number)
 -        ost->first_pts = in_picture->pts;
 -
 +  /* duplicates frame if needed */
 +  for (i = 0; i < nb_frames; i++) {
      av_init_packet(&pkt);
      pkt.data = NULL;
      pkt.size = 0;
  
 -    if (!check_recording_time(ost) ||
 -        ost->frame_number >= ost->max_frames)
 +    in_picture->pts = ost->sync_opts;
 +
 +    if (!check_recording_time(ost))
          return;
  
      if (s->oformat->flags & AVFMT_RAWPICTURE &&
          pkt.flags |= AV_PKT_FLAG_KEY;
  
          write_frame(s, &pkt, ost);
 +        video_size += pkt.size;
      } else {
          int got_packet;
          AVFrame big_picture;
              big_picture.pict_type = AV_PICTURE_TYPE_I;
              ost->forced_kf_index++;
          }
 +        update_benchmark(NULL);
          ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet);
 +        update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
          if (ret < 0) {
              av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
              exit_program(1);
          }
  
          if (got_packet) {
 +            if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY))
 +                pkt.pts = ost->sync_opts;
 +
              if (pkt.pts != AV_NOPTS_VALUE)
                  pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
              if (pkt.dts != AV_NOPTS_VALUE)
                  pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
  
 +            if (debug_ts) {
 +                av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
 +                    "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
 +                    av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
 +                    av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
 +            }
 +
              write_frame(s, &pkt, ost);
 -            *frame_size = pkt.size;
 +            frame_size = pkt.size;
              video_size += pkt.size;
 +            av_free_packet(&pkt);
  
              /* if two pass, output log */
              if (ost->logfile && enc->stats_out) {
       * flush, we need to limit them here, before they go into encoder.
       */
      ost->frame_number++;
 +  }
 +
 +    if (vstats_filename && frame_size)
 +        do_video_stats(output_files[ost->file_index]->ctx, ost, frame_size);
  }
  
  static double psnr(double d)
@@@ -937,127 -644,141 +937,127 @@@ static void do_video_stats(AVFormatCont
      }
  }
  
 -/**
 - * Read one frame for lavfi output for ost and encode it.
 - */
 -static int poll_filter(OutputStream *ost)
 +/* check for new output on any of the filtergraphs */
 +static int poll_filters(void)
  {
 -    OutputFile    *of = output_files[ost->file_index];
      AVFilterBufferRef *picref;
      AVFrame *filtered_frame = NULL;
 -    int frame_size, ret;
 +    int i, ret, ret_all;
 +    unsigned nb_success = 1, av_uninit(nb_eof);
 +    int64_t frame_pts;
  
 -    if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
 -        return AVERROR(ENOMEM);
 -    } else
 -        avcodec_get_frame_defaults(ost->filtered_frame);
 -    filtered_frame = ost->filtered_frame;
 -
 -    if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
 -        !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
 -        ret = av_buffersink_read_samples(ost->filter->filter, &picref,
 -                                         ost->st->codec->frame_size);
 -    else
 -        ret = av_buffersink_read(ost->filter->filter, &picref);
 -
 -    if (ret < 0)
 -        return ret;
 -
 -    avfilter_copy_buf_props(filtered_frame, picref);
 -    if (picref->pts != AV_NOPTS_VALUE) {
 -        filtered_frame->pts = av_rescale_q(picref->pts,
 -                                           ost->filter->filter->inputs[0]->time_base,
 -                                           ost->st->codec->time_base) -
 -                              av_rescale_q(of->start_time,
 -                                           AV_TIME_BASE_Q,
 -                                           ost->st->codec->time_base);
 -
 -        if (of->start_time && filtered_frame->pts < 0) {
 -            avfilter_unref_buffer(picref);
 -            return 0;
 -        }
 -    }
 -
 -    switch (ost->filter->filter->inputs[0]->type) {
 -    case AVMEDIA_TYPE_VIDEO:
 -        if (!ost->frame_aspect_ratio)
 -            ost->st->codec->sample_aspect_ratio = picref->video->pixel_aspect;
 -
 -        do_video_out(of->ctx, ost, filtered_frame, &frame_size,
 -                     same_quant ? ost->last_quality :
 -                                  ost->st->codec->global_quality);
 -        if (vstats_filename && frame_size)
 -            do_video_stats(of->ctx, ost, frame_size);
 -        break;
 -    case AVMEDIA_TYPE_AUDIO:
 -        do_audio_out(of->ctx, ost, filtered_frame);
 -        break;
 -    default:
 -        // TODO support subtitle filters
 -        av_assert0(0);
 -    }
 +    while (1) {
 +        /* Reap all buffers present in the buffer sinks */
 +        for (i = 0; i < nb_output_streams; i++) {
 +            OutputStream *ost = output_streams[i];
 +            OutputFile    *of = output_files[ost->file_index];
 +            int ret = 0;
  
 -    avfilter_unref_buffer(picref);
 +            if (!ost->filter)
 +                continue;
  
 -    return 0;
 -}
 +            if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
 +                return AVERROR(ENOMEM);
 +            } else
 +                avcodec_get_frame_defaults(ost->filtered_frame);
 +            filtered_frame = ost->filtered_frame;
  
 -/**
 - * Read as many frames from possible from lavfi and encode them.
 - *
 - * Always read from the active stream with the lowest timestamp. If no frames
 - * are available for it then return EAGAIN and wait for more input. This way we
 - * can use lavfi sources that generate unlimited amount of frames without memory
 - * usage exploding.
 - */
 -static int poll_filters(void)
 -{
 -    int i, ret = 0;
 +            while (1) {
 +                ret = av_buffersink_get_buffer_ref(ost->filter->filter, &picref,
 +                                                   AV_BUFFERSINK_FLAG_NO_REQUEST);
 +                if (ret < 0) {
 +                    if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
 +                        char buf[256];
 +                        av_strerror(ret, buf, sizeof(buf));
 +                        av_log(NULL, AV_LOG_WARNING,
 +                               "Error in av_buffersink_get_buffer_ref(): %s\n", buf);
 +                    }
 +                    break;
 +                }
 +                frame_pts = AV_NOPTS_VALUE;
 +                if (picref->pts != AV_NOPTS_VALUE) {
 +                    filtered_frame->pts = frame_pts = av_rescale_q(picref->pts,
 +                                                    ost->filter->filter->inputs[0]->time_base,
 +                                                    ost->st->codec->time_base) -
 +                                        av_rescale_q(of->start_time,
 +                                                    AV_TIME_BASE_Q,
 +                                                    ost->st->codec->time_base);
 +
 +                    if (of->start_time && filtered_frame->pts < 0) {
 +                        avfilter_unref_buffer(picref);
 +                        continue;
 +                    }
 +                }
 +                //if (ost->source_index >= 0)
 +                //    *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
  
 -    while (ret >= 0 && !received_sigterm) {
 -        OutputStream *ost = NULL;
 -        int64_t min_pts = INT64_MAX;
  
 -        /* choose output stream with the lowest timestamp */
 -        for (i = 0; i < nb_output_streams; i++) {
 -            int64_t pts = output_streams[i]->sync_opts;
 +                switch (ost->filter->filter->inputs[0]->type) {
 +                case AVMEDIA_TYPE_VIDEO:
 +                    avfilter_copy_buf_props(filtered_frame, picref);
 +                    filtered_frame->pts = frame_pts;
 +                    if (!ost->frame_aspect_ratio)
 +                        ost->st->codec->sample_aspect_ratio = picref->video->sample_aspect_ratio;
  
 -            if (!output_streams[i]->filter ||
 -                output_streams[i]->is_past_recording_time)
 -                continue;
 +                    do_video_out(of->ctx, ost, filtered_frame,
 +                                 same_quant ? ost->last_quality :
 +                                              ost->st->codec->global_quality);
 +                    break;
 +                case AVMEDIA_TYPE_AUDIO:
 +                    avfilter_copy_buf_props(filtered_frame, picref);
 +                    filtered_frame->pts = frame_pts;
 +                    do_audio_out(of->ctx, ost, filtered_frame);
 +                    break;
 +                default:
 +                    // TODO support subtitle filters
 +                    av_assert0(0);
 +                }
  
 -            pts = av_rescale_q(pts, output_streams[i]->st->codec->time_base,
 -                               AV_TIME_BASE_Q);
 -            if (pts < min_pts) {
 -                min_pts = pts;
 -                ost = output_streams[i];
 +                avfilter_unref_buffer(picref);
              }
          }
 -
 -        if (!ost)
 +        if (!nb_success) /* from last round */
              break;
 -
 -        ret = poll_filter(ost);
 -
 -        if (ret == AVERROR_EOF) {
 -            ost->is_past_recording_time = 1;
 -
 -            if (opt_shortest)
 -                return ret;
 -
 -            ret = 0;
 -        } else if (ret == AVERROR(EAGAIN))
 -            return 0;
 +        /* Request frames through all the graphs */
 +        ret_all = nb_success = nb_eof = 0;
 +        for (i = 0; i < nb_filtergraphs; i++) {
 +            ret = avfilter_graph_request_oldest(filtergraphs[i]->graph);
 +            if (!ret) {
 +                nb_success++;
 +            } else if (ret == AVERROR_EOF) {
 +                nb_eof++;
 +            } else if (ret != AVERROR(EAGAIN)) {
 +                char buf[256];
 +                av_strerror(ret, buf, sizeof(buf));
 +                av_log(NULL, AV_LOG_WARNING,
 +                       "Error in request_frame(): %s\n", buf);
 +                ret_all = ret;
 +            }
 +        }
 +        /* Try again if anything succeeded */
      }
 -
 -    return ret;
 +    return nb_eof == nb_filtergraphs ? AVERROR_EOF : ret_all;
  }
  
 -static void print_report(int is_last_report, int64_t timer_start)
 +static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
  {
      char buf[1024];
 +    AVBPrint buf_script;
      OutputStream *ost;
      AVFormatContext *oc;
      int64_t total_size;
      AVCodecContext *enc;
      int frame_number, vid, i;
 -    double bitrate, ti1, pts;
 +    double bitrate;
 +    int64_t pts = INT64_MAX;
      static int64_t last_time = -1;
      static int qp_histogram[52];
 +    int hours, mins, secs, us;
  
 -    if (!print_stats && !is_last_report)
 +    if (!print_stats && !is_last_report && !progress_avio)
          return;
  
      if (!is_last_report) {
 -        int64_t cur_time;
 -        /* display the report every 0.5 seconds */
 -        cur_time = av_gettime();
          if (last_time == -1) {
              last_time = cur_time;
              return;
      oc = output_files[0]->ctx;
  
      total_size = avio_size(oc->pb);
 -    if (total_size < 0) // FIXME improve avio_size() so it works with non seekable output too
 +    if (total_size < 0) // FIXME improve avio_size() so it works with non seekable output too
          total_size = avio_tell(oc->pb);
 +        if (total_size < 0)
 +            total_size = 0;
 +    }
  
      buf[0] = '\0';
 -    ti1 = 1e10;
      vid = 0;
 +    av_bprint_init(&buf_script, 0, 1);
      for (i = 0; i < nb_output_streams; i++) {
          float q = -1;
          ost = output_streams[i];
              q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
          if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
              snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
 +            av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
 +                       ost->file_index, ost->index, q);
          }
          if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
 -            float t = (av_gettime() - timer_start) / 1000000.0;
 +            float fps, t = (cur_time-timer_start) / 1000000.0;
  
              frame_number = ost->frame_number;
 -            snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
 -                     frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
 +            fps = t > 1 ? frame_number / t : 0;
 +            snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
 +                     frame_number, fps < 9.95, fps, q);
 +            av_bprintf(&buf_script, "frame=%d\n", frame_number);
 +            av_bprintf(&buf_script, "fps=%.1f\n", fps);
 +            av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
 +                       ost->file_index, ost->index, q);
              if (is_last_report)
                  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
              if (qp_hist) {
                  int j;
                  double error, error_sum = 0;
                  double scale, scale_sum = 0;
 +                double p;
                  char type[3] = { 'Y','U','V' };
                  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
                  for (j = 0; j < 3; j++) {
                          scale /= 4;
                      error_sum += error;
                      scale_sum += scale;
 -                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
 +                    p = psnr(error / scale);
 +                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
 +                    av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
 +                               ost->file_index, ost->index, type[i] | 32, p);
                  }
 +                p = psnr(error_sum / scale_sum);
                  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
 +                av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
 +                           ost->file_index, ost->index, p);
              }
              vid = 1;
          }
          /* compute min output value */
 -        pts = (double)ost->st->pts.val * av_q2d(ost->st->time_base);
 -        if ((pts < ti1) && (pts > 0))
 -            ti1 = pts;
 +        pts = FFMIN(pts, av_rescale_q(ost->st->pts.val,
 +                                      ost->st->time_base, AV_TIME_BASE_Q));
      }
 -    if (ti1 < 0.01)
 -        ti1 = 0.01;
  
 -    bitrate = (double)(total_size * 8) / ti1 / 1000.0;
 +    secs = pts / AV_TIME_BASE;
 +    us = pts % AV_TIME_BASE;
 +    mins = secs / 60;
 +    secs %= 60;
 +    hours = mins / 60;
 +    mins %= 60;
 +
 +    bitrate = pts ? total_size * 8 / (pts / 1000.0) : 0;
  
      snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
 -            "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
 -            (double)total_size / 1024, ti1, bitrate);
 +             "size=%8.0fkB time=", total_size / 1024.0);
 +    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
 +             "%02d:%02d:%02d.%02d ", hours, mins, secs,
 +             (100 * us) / AV_TIME_BASE);
 +    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
 +             "bitrate=%6.1fkbits/s", bitrate);
 +    av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
 +    av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
 +    av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
 +               hours, mins, secs, us);
  
      if (nb_frames_dup || nb_frames_drop)
          snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
                  nb_frames_dup, nb_frames_drop);
 +    av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
 +    av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
  
 +    if (print_stats || is_last_report) {
      av_log(NULL, AV_LOG_INFO, "%s    \r", buf);
  
      fflush(stderr);
 +    }
 +
 +    if (progress_avio) {
 +        av_bprintf(&buf_script, "progress=%s\n",
 +                   is_last_report ? "end" : "continue");
 +        avio_write(progress_avio, buf_script.str,
 +                   FFMIN(buf_script.len, buf_script.size - 1));
 +        avio_flush(progress_avio);
 +        av_bprint_finalize(&buf_script, NULL);
 +        if (is_last_report) {
 +            avio_close(progress_avio);
 +            progress_avio = NULL;
 +        }
 +    }
  
      if (is_last_report) {
 -        int64_t raw= audio_size + video_size + extra_size;
 +        int64_t raw= audio_size + video_size + subtitle_size + extra_size;
          av_log(NULL, AV_LOG_INFO, "\n");
 -        av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
 +        av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0f global headers:%1.0fkB muxing overhead %f%%\n",
                 video_size / 1024.0,
                 audio_size / 1024.0,
 +               subtitle_size / 1024.0,
                 extra_size / 1024.0,
                 100.0 * (total_size - raw) / raw
          );
 +        if(video_size + audio_size + subtitle_size + extra_size == 0){
 +            av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded (check -ss / -t / -frames parameters if used)\n");
 +        }
      }
  }
  
@@@ -1255,14 -926,12 +1255,14 @@@ static void flush_encoders(void
                  pkt.data = NULL;
                  pkt.size = 0;
  
 +                update_benchmark(NULL);
                  ret = encode(enc, &pkt, NULL, &got_packet);
 +                update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
                  if (ret < 0) {
                      av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
                      exit_program(1);
                  }
 -                *size += ret;
 +                *size += pkt.size;
                  if (ost->logfile && enc->stats_out) {
                      fprintf(ost->logfile, "%s", enc->stats_out);
                  }
@@@ -1294,7 -963,7 +1294,7 @@@ static int check_output_constraints(Inp
      if (ost->source_index != ist_index)
          return 0;
  
 -    if (of->start_time && ist->last_dts < of->start_time)
 +    if (of->start_time && ist->pts < of->start_time)
          return 0;
  
      return 1;
@@@ -1304,7 -973,6 +1304,7 @@@ static void do_streamcopy(InputStream *
  {
      OutputFile *of = output_files[ost->file_index];
      int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
 +    AVPicture pict;
      AVPacket opkt;
  
      av_init_packet(&opkt);
          return;
  
      if (of->recording_time != INT64_MAX &&
 -        ist->last_dts >= of->recording_time + of->start_time) {
 +        ist->pts >= of->recording_time + of->start_time) {
          ost->is_past_recording_time = 1;
          return;
      }
      else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
          video_size += pkt->size;
          ost->sync_opts++;
 +    } else if (ost->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
 +        subtitle_size += pkt->size;
      }
  
      if (pkt->pts != AV_NOPTS_VALUE)
          opkt.pts = AV_NOPTS_VALUE;
  
      if (pkt->dts == AV_NOPTS_VALUE)
 -        opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->st->time_base);
 +        opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
      else
          opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
      opkt.dts -= ost_tb_start_time;
          opkt.size = pkt->size;
      }
  
 +    if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
 +        /* store AVPicture in AVPacket, as expected by the output format */
 +        avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
 +        opkt.data = (uint8_t *)&pict;
 +        opkt.size = sizeof(AVPicture);
 +        opkt.flags |= AV_PKT_FLAG_KEY;
 +    }
 +
      write_frame(of->ctx, &opkt, ost);
      ost->st->codec->frame_number++;
      av_free_packet(&opkt);
  static void rate_emu_sleep(InputStream *ist)
  {
      if (input_files[ist->file_index]->rate_emu) {
 -        int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
 +        int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
          int64_t now = av_gettime() - ist->start;
          if (pts > now)
              av_usleep(pts - now);
@@@ -1401,8 -1059,8 +1401,8 @@@ static int decode_audio(InputStream *is
  {
      AVFrame *decoded_frame;
      AVCodecContext *avctx = ist->st->codec;
 -    int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
      int i, ret, resample_changed;
 +    AVRational decoded_frame_tb;
  
      if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
          return AVERROR(ENOMEM);
          avcodec_get_frame_defaults(ist->decoded_frame);
      decoded_frame = ist->decoded_frame;
  
 +    update_benchmark(NULL);
      ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
-     if (ret < 0) {
-         return ret;
-     }
-     if (avctx->sample_rate <= 0) {
 +    update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
-     if (!*got_output) {
-         /* no audio frame */
-         if (!pkt->size)
++
++    if (ret >= 0 && avctx->sample_rate <= 0) {
 +        av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
 +        return AVERROR_INVALIDDATA;
 +    }
 +
+     if (!*got_output || ret < 0) {
+         if (!pkt->size) {
              for (i = 0; i < ist->nb_filters; i++)
 -                av_buffersrc_buffer(ist->filters[i]->filter, NULL);
 +                av_buffersrc_add_ref(ist->filters[i]->filter, NULL,
 +                                     AV_BUFFERSRC_FLAG_NO_COPY);
+         }
          return ret;
      }
  
 -    /* if the decoder provides a pts, use it instead of the last packet pts.
 -       the decoder could be delaying output by a packet or more. */
 -    if (decoded_frame->pts != AV_NOPTS_VALUE)
 -        ist->next_dts = decoded_frame->pts;
 -    else if (pkt->pts != AV_NOPTS_VALUE) {
 -        decoded_frame->pts = pkt->pts;
 -        pkt->pts           = AV_NOPTS_VALUE;
 -    }
 -
 -    // preprocess audio (volume)
 -    if (audio_volume != 256) {
 -        int decoded_data_size = decoded_frame->nb_samples * avctx->channels * bps;
 -        void *samples = decoded_frame->data[0];
 -        switch (avctx->sample_fmt) {
 -        case AV_SAMPLE_FMT_U8:
 -        {
 -            uint8_t *volp = samples;
 -            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
 -                int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128;
 -                *volp++ = av_clip_uint8(v);
 -            }
 -            break;
 -        }
 -        case AV_SAMPLE_FMT_S16:
 -        {
 -            int16_t *volp = samples;
 -            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
 -                int v = ((*volp) * audio_volume + 128) >> 8;
 -                *volp++ = av_clip_int16(v);
 -            }
 -            break;
 -        }
 -        case AV_SAMPLE_FMT_S32:
 -        {
 -            int32_t *volp = samples;
 -            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
 -                int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8);
 -                *volp++ = av_clipl_int32(v);
 -            }
 -            break;
 -        }
 -        case AV_SAMPLE_FMT_FLT:
 -        {
 -            float *volp = samples;
 -            float scale = audio_volume / 256.f;
 -            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
 -                *volp++ *= scale;
 -            }
 -            break;
 -        }
 -        case AV_SAMPLE_FMT_DBL:
 -        {
 -            double *volp = samples;
 -            double scale = audio_volume / 256.;
 -            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
 -                *volp++ *= scale;
 -            }
 -            break;
 -        }
 -        default:
 -            av_log(NULL, AV_LOG_FATAL,
 -                   "Audio volume adjustment on sample format %s is not supported.\n",
 -                   av_get_sample_fmt_name(ist->st->codec->sample_fmt));
 -            exit_program(1);
 -        }
 -    }
 +#if 1
 +    /* increment next_dts to use for the case where the input stream does not
 +       have timestamps or there are multiple frames in the packet */
 +    ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
 +                     avctx->sample_rate;
 +    ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
 +                     avctx->sample_rate;
 +#endif
  
      rate_emu_sleep(ist);
  
          ist->resample_channels       = avctx->channels;
  
          for (i = 0; i < nb_filtergraphs; i++)
 -            if (ist_in_filtergraph(filtergraphs[i], ist) &&
 -                configure_filtergraph(filtergraphs[i]) < 0) {
 -                av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
 -                exit_program(1);
 +            if (ist_in_filtergraph(filtergraphs[i], ist)) {
 +                FilterGraph *fg = filtergraphs[i];
 +                int j;
 +                if (configure_filtergraph(fg) < 0) {
 +                    av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
 +                    exit_program(1);
 +                }
 +                for (j = 0; j < fg->nb_outputs; j++) {
 +                    OutputStream *ost = fg->outputs[j]->ost;
 +                    if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
 +                        !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
 +                        av_buffersink_set_frame_size(ost->filter->filter,
 +                                                     ost->st->codec->frame_size);
 +                }
              }
      }
  
 +    /* if the decoder provides a pts, use it instead of the last packet pts.
 +       the decoder could be delaying output by a packet or more. */
 +    if (decoded_frame->pts != AV_NOPTS_VALUE) {
 +        ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
 +        decoded_frame_tb   = avctx->time_base;
 +    } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
 +        decoded_frame->pts = decoded_frame->pkt_pts;
 +        pkt->pts           = AV_NOPTS_VALUE;
 +        decoded_frame_tb   = ist->st->time_base;
 +    } else if (pkt->pts != AV_NOPTS_VALUE) {
 +        decoded_frame->pts = pkt->pts;
 +        pkt->pts           = AV_NOPTS_VALUE;
 +        decoded_frame_tb   = ist->st->time_base;
 +    }else {
 +        decoded_frame->pts = ist->dts;
 +        decoded_frame_tb   = AV_TIME_BASE_Q;
 +    }
      if (decoded_frame->pts != AV_NOPTS_VALUE)
          decoded_frame->pts = av_rescale_q(decoded_frame->pts,
 -                                          ist->st->time_base,
 +                                          decoded_frame_tb,
                                            (AVRational){1, ist->st->codec->sample_rate});
      for (i = 0; i < ist->nb_filters; i++)
 -        av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame);
 +        av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame, 0);
 +
 +    decoded_frame->pts = AV_NOPTS_VALUE;
  
      return ret;
  }
@@@ -1526,8 -1202,6 +1524,8 @@@ static int decode_video(InputStream *is
      AVFrame *decoded_frame;
      void *buffer_to_free = NULL;
      int i, ret = 0, resample_changed;
 +    int64_t best_effort_timestamp;
 +    AVRational *frame_sample_aspect;
      float quality;
  
      if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
      else
          avcodec_get_frame_defaults(ist->decoded_frame);
      decoded_frame = ist->decoded_frame;
 +    pkt->dts  = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
  
 +    update_benchmark(NULL);
      ret = avcodec_decode_video2(ist->st->codec,
                                  decoded_frame, got_output, pkt);
-     if (ret < 0)
-         return ret;
-     quality = same_quant ? decoded_frame->quality : 0;
-     if (!*got_output) {
-         /* no picture yet */
-         if (!pkt->size)
 +    update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
+     if (!*got_output || ret < 0) {
+         if (!pkt->size) {
              for (i = 0; i < ist->nb_filters; i++)
 -                av_buffersrc_buffer(ist->filters[i]->filter, NULL);
 +                av_buffersrc_add_ref(ist->filters[i]->filter, NULL, AV_BUFFERSRC_FLAG_NO_COPY);
+         }
          return ret;
      }
  
 -    decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
 -                                           decoded_frame->pkt_dts);
+     quality = same_quant ? decoded_frame->quality : 0;
++
 +    if(ist->top_field_first>=0)
 +        decoded_frame->top_field_first = ist->top_field_first;
 +
 +    best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
 +    if(best_effort_timestamp != AV_NOPTS_VALUE)
 +        ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
 +
 +    if (debug_ts) {
 +        av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
 +                "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d \n",
 +                ist->st->index, av_ts2str(decoded_frame->pts),
 +                av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
 +                best_effort_timestamp,
 +                av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
 +                decoded_frame->key_frame, decoded_frame->pict_type);
 +    }
 +
      pkt->size = 0;
      pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
  
              }
      }
  
 +    frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
      for (i = 0; i < ist->nb_filters; i++) {
 +        int changed =      ist->st->codec->width   != ist->filters[i]->filter->outputs[0]->w
 +                        || ist->st->codec->height  != ist->filters[i]->filter->outputs[0]->h
 +                        || ist->st->codec->pix_fmt != ist->filters[i]->filter->outputs[0]->format;
          // XXX what an ugly hack
          if (ist->filters[i]->graph->nb_outputs == 1)
              ist->filters[i]->graph->outputs[0]->ost->last_quality = quality;
  
 -        if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) {
 +        if (!frame_sample_aspect->num)
 +            *frame_sample_aspect = ist->st->sample_aspect_ratio;
 +        if (ist->dr1 && decoded_frame->type==FF_BUFFER_TYPE_USER && !changed) {
              FrameBuffer      *buf = decoded_frame->opaque;
              AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
                                          decoded_frame->data, decoded_frame->linesize,
              fb->buf->priv           = buf;
              fb->buf->free           = filter_release_buffer;
  
 +            av_assert0(buf->refcount>0);
              buf->refcount++;
 -            av_buffersrc_buffer(ist->filters[i]->filter, fb);
 +            av_buffersrc_add_ref(ist->filters[i]->filter, fb,
 +                                 AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT |
 +                                 AV_BUFFERSRC_FLAG_NO_COPY);
          } else
 -            av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame);
 +        if(av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame, 0)<0) {
 +            av_log(NULL, AV_LOG_FATAL, "Failed to inject frame into filter network\n");
 +            exit_program(1);
 +        }
 +
      }
  
      av_free(buffer_to_free);
@@@ -1645,16 -1285,13 +1641,16 @@@ static int transcode_subtitles(InputStr
      AVSubtitle subtitle;
      int i, ret = avcodec_decode_subtitle2(ist->st->codec,
                                            &subtitle, got_output, pkt);
 -    if (ret < 0)
 -        return ret;
 -    if (!*got_output)
 +    if (ret < 0 || !*got_output) {
 +        if (!pkt->size)
 +            sub2video_flush(ist);
          return ret;
 +    }
  
      rate_emu_sleep(ist);
  
 +    sub2video_update(ist, &subtitle, pkt->pts);
 +
      for (i = 0; i < nb_output_streams; i++) {
          OutputStream *ost = output_streams[i];
  
  /* pkt = NULL means EOF (needed to flush decoder buffers) */
  static int output_packet(InputStream *ist, const AVPacket *pkt)
  {
 -    int i;
 +    int ret = 0, i;
      int got_output;
 +
      AVPacket avpkt;
 +    if (!ist->saw_first_ts) {
 +        ist->dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
 +        ist->pts = 0;
 +        if (pkt != NULL && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
 +            ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
 +            ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
 +        }
 +        ist->saw_first_ts = 1;
 +    }
  
      if (ist->next_dts == AV_NOPTS_VALUE)
 -        ist->next_dts = ist->last_dts;
 +        ist->next_dts = ist->dts;
 +    if (ist->next_pts == AV_NOPTS_VALUE)
 +        ist->next_pts = ist->pts;
  
      if (pkt == NULL) {
          /* EOF handling */
          avpkt = *pkt;
      }
  
 -    if (pkt->dts != AV_NOPTS_VALUE)
 -        ist->next_dts = ist->last_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
 +    if (pkt->dts != AV_NOPTS_VALUE) {
 +        ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
 +        if (ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
 +            ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
 +    }
  
      // while we have more to decode or while the decoder did output something on EOF
      while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
 -        int ret = 0;
 +        int duration;
      handle_eof:
  
 -        ist->last_dts = ist->next_dts;
 +        ist->pts = ist->next_pts;
 +        ist->dts = ist->next_dts;
  
          if (avpkt.size && avpkt.size != pkt->size) {
              av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
              break;
          case AVMEDIA_TYPE_VIDEO:
              ret = decode_video    (ist, &avpkt, &got_output);
 -            if (avpkt.duration)
 -                ist->next_dts += av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
 -            else if (ist->st->avg_frame_rate.num)
 -                ist->next_dts += av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate),
 -                                              AV_TIME_BASE_Q);
 -            else if (ist->st->codec->time_base.num != 0) {
 -                int ticks      = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
 -                                                   ist->st->codec->ticks_per_frame;
 -                ist->next_dts += av_rescale_q(ticks, ist->st->codec->time_base, AV_TIME_BASE_Q);
 -            }
 +            if (avpkt.duration) {
 +                duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
 +            } else if(ist->st->codec->time_base.num != 0 && ist->st->codec->time_base.den != 0) {
 +                int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
 +                duration = ((int64_t)AV_TIME_BASE *
 +                                ist->st->codec->time_base.num * ticks) /
 +                                ist->st->codec->time_base.den;
 +            } else
 +                duration = 0;
 +
 +            if(ist->dts != AV_NOPTS_VALUE && duration) {
 +                ist->next_dts += duration;
 +            }else
 +                ist->next_dts = AV_NOPTS_VALUE;
 +
 +            if (got_output)
 +                ist->next_pts += duration; //FIXME the duration is not correct in some cases
              break;
          case AVMEDIA_TYPE_SUBTITLE:
              ret = transcode_subtitles(ist, &avpkt, &got_output);
  
          if (ret < 0)
              return ret;
 +
 +        avpkt.dts=
 +        avpkt.pts= AV_NOPTS_VALUE;
 +
          // touch data and size only if not EOF
          if (pkt) {
 +            if(ist->st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
 +                ret = avpkt.size;
              avpkt.data += ret;
              avpkt.size -= ret;
          }
      /* handle stream copy */
      if (!ist->decoding_needed) {
          rate_emu_sleep(ist);
 -        ist->last_dts = ist->next_dts;
 +        ist->dts = ist->next_dts;
          switch (ist->st->codec->codec_type) {
          case AVMEDIA_TYPE_AUDIO:
              ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
                               ist->st->codec->sample_rate;
              break;
          case AVMEDIA_TYPE_VIDEO:
 -            if (ist->st->codec->time_base.num != 0) {
 -                int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
 +            if (pkt->duration) {
 +                ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
 +            } else if(ist->st->codec->time_base.num != 0) {
 +                int ticks= ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
                  ist->next_dts += ((int64_t)AV_TIME_BASE *
                                    ist->st->codec->time_base.num * ticks) /
                                    ist->st->codec->time_base.den;
              }
              break;
          }
 +        ist->pts = ist->dts;
 +        ist->next_pts = ist->next_dts;
      }
      for (i = 0; pkt && i < nb_output_streams; i++) {
          OutputStream *ost = output_streams[i];
@@@ -1823,18 -1427,27 +1819,18 @@@ static void print_sdp(void
  
  static int init_input_stream(int ist_index, char *error, int error_len)
  {
 -    int i;
      InputStream *ist = input_streams[ist_index];
 +
      if (ist->decoding_needed) {
          AVCodec *codec = ist->dec;
          if (!codec) {
 -            snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
 -                    ist->st->codec->codec_id, ist->file_index, ist->st->index);
 +            snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
 +                    avcodec_get_name(ist->st->codec->codec_id), ist->file_index, ist->st->index);
              return AVERROR(EINVAL);
          }
  
 -        /* update requested sample format for the decoder based on the
 -           corresponding encoder sample format */
 -        for (i = 0; i < nb_output_streams; i++) {
 -            OutputStream *ost = output_streams[i];
 -            if (ost->source_index == ist_index) {
 -                update_sample_fmt(ist->st->codec, codec, ost->st->codec);
 -                break;
 -            }
 -        }
 -
 -        if (codec->type == AVMEDIA_TYPE_VIDEO && codec->capabilities & CODEC_CAP_DR1) {
 +        ist->dr1 = (codec->capabilities & CODEC_CAP_DR1) && !do_deinterlace;
 +        if (codec->type == AVMEDIA_TYPE_VIDEO && ist->dr1) {
              ist->st->codec->get_buffer     = codec_get_buffer;
              ist->st->codec->release_buffer = codec_release_buffer;
              ist->st->codec->opaque         = &ist->buffer_pool;
          assert_avoptions(ist->opts);
      }
  
 -    ist->last_dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
 +    ist->next_pts = AV_NOPTS_VALUE;
      ist->next_dts = AV_NOPTS_VALUE;
 -    init_pts_correction(&ist->pts_ctx);
      ist->is_start = 1;
  
      return 0;
@@@ -1862,6 -1476,16 +1858,6 @@@ static InputStream *get_input_stream(Ou
  {
      if (ost->source_index >= 0)
          return input_streams[ost->source_index];
 -
 -    if (ost->filter) {
 -        FilterGraph *fg = ost->filter->graph;
 -        int i;
 -
 -        for (i = 0; i < fg->nb_inputs; i++)
 -            if (fg->inputs[i]->ist->st->codec->codec_type == ost->st->codec->codec_type)
 -                return fg->inputs[i]->ist;
 -    }
 -
      return NULL;
  }
  
@@@ -1896,26 -1520,11 +1892,26 @@@ static void parse_forced_key_frames(cha
      }
  }
  
 +static void report_new_stream(int input_index, AVPacket *pkt)
 +{
 +    InputFile *file = input_files[input_index];
 +    AVStream *st = file->ctx->streams[pkt->stream_index];
 +
 +    if (pkt->stream_index < file->nb_streams_warn)
 +        return;
 +    av_log(file->ctx, AV_LOG_WARNING,
 +           "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
 +           av_get_media_type_string(st->codec->codec_type),
 +           input_index, pkt->stream_index,
 +           pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
 +    file->nb_streams_warn = pkt->stream_index + 1;
 +}
 +
  static int transcode_init(void)
  {
      int ret = 0, i, j, k;
      AVFormatContext *oc;
 -    AVCodecContext *codec, *icodec;
 +    AVCodecContext *codec, *icodec = NULL;
      OutputStream *ost;
      InputStream *ist;
      char error[1024];
                  return AVERROR(ENOMEM);
              }
              memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
 -            codec->extradata_size = icodec->extradata_size;
 -            if (!copy_tb) {
 -                codec->time_base      = icodec->time_base;
 -                codec->time_base.num *= icodec->ticks_per_frame;
 -                av_reduce(&codec->time_base.num, &codec->time_base.den,
 -                          codec->time_base.num, codec->time_base.den, INT_MAX);
 -            } else
 -                codec->time_base = ist->st->time_base;
 +            codec->extradata_size= icodec->extradata_size;
 +            codec->bits_per_coded_sample  = icodec->bits_per_coded_sample;
 +
 +            codec->time_base = ist->st->time_base;
 +            /*
 +             * Avi is a special case here because it supports variable fps but
 +             * having the fps and timebase differe significantly adds quite some
 +             * overhead
 +             */
 +            if(!strcmp(oc->oformat->name, "avi")) {
 +                if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
 +                               && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
 +                               && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(icodec->time_base)
 +                               && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(icodec->time_base) < 1.0/500
 +                     || copy_tb==2){
 +                    codec->time_base.num = ist->st->r_frame_rate.den;
 +                    codec->time_base.den = 2*ist->st->r_frame_rate.num;
 +                    codec->ticks_per_frame = 2;
 +                } else if (   copy_tb<0 && av_q2d(icodec->time_base)*icodec->ticks_per_frame > 2*av_q2d(ist->st->time_base)
 +                                 && av_q2d(ist->st->time_base) < 1.0/500
 +                    || copy_tb==0){
 +                    codec->time_base = icodec->time_base;
 +                    codec->time_base.num *= icodec->ticks_per_frame;
 +                    codec->time_base.den *= 2;
 +                    codec->ticks_per_frame = 2;
 +                }
 +            } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
 +                      && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
 +                      && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
 +            ) {
 +                if(   copy_tb<0 && av_q2d(icodec->time_base)*icodec->ticks_per_frame > av_q2d(ist->st->time_base)
 +                                && av_q2d(ist->st->time_base) < 1.0/500
 +                   || copy_tb==0){
 +                    codec->time_base = icodec->time_base;
 +                    codec->time_base.num *= icodec->ticks_per_frame;
 +                }
 +            }
 +
 +            if(ost->frame_rate.num)
 +                codec->time_base = av_inv_q(ost->frame_rate);
 +
 +            av_reduce(&codec->time_base.num, &codec->time_base.den,
 +                        codec->time_base.num, codec->time_base.den, INT_MAX);
  
              switch (codec->codec_type) {
              case AVMEDIA_TYPE_AUDIO:
                  codec->frame_size         = icodec->frame_size;
                  codec->audio_service_type = icodec->audio_service_type;
                  codec->block_align        = icodec->block_align;
 +                if((codec->block_align == 1 || codec->block_align == 1152) && codec->codec_id == AV_CODEC_ID_MP3)
 +                    codec->block_align= 0;
 +                if(codec->codec_id == AV_CODEC_ID_AC3)
 +                    codec->block_align= 0;
                  break;
              case AVMEDIA_TYPE_VIDEO:
                  codec->pix_fmt            = icodec->pix_fmt;
                          ist->st->codec->sample_aspect_ratio.num ?
                          ist->st->codec->sample_aspect_ratio : (AVRational){0, 1};
                  }
 +                ost->st->avg_frame_rate = ist->st->avg_frame_rate;
                  break;
              case AVMEDIA_TYPE_SUBTITLE:
                  codec->width  = icodec->width;
                  abort();
              }
          } else {
 +            if (!ost->enc)
 +                ost->enc = avcodec_find_encoder(codec->codec_id);
              if (!ost->enc) {
                  /* should only happen when a default codec is not present. */
 -                snprintf(error, sizeof(error), "Automatic encoder selection "
 -                         "failed for output stream #%d:%d. Default encoder for "
 -                         "format %s is probably disabled. Please choose an "
 -                         "encoder manually.\n", ost->file_index, ost->index,
 -                         oc->oformat->name);
 +                snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
 +                         avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
                  ret = AVERROR(EINVAL);
                  goto dump_format;
              }
                  ist->decoding_needed = 1;
              ost->encoding_needed = 1;
  
 -            /*
 -             * We want CFR output if and only if one of those is true:
 -             * 1) user specified output framerate with -r
 -             * 2) user specified -vsync cfr
 -             * 3) output format is CFR and the user didn't force vsync to
 -             *    something else than CFR
 -             *
 -             * in such a case, set ost->frame_rate
 -             */
 -            if (codec->codec_type == AVMEDIA_TYPE_VIDEO &&
 -                !ost->frame_rate.num && ist &&
 -                (video_sync_method ==  VSYNC_CFR ||
 -                 (video_sync_method ==  VSYNC_AUTO &&
 -                  !(oc->oformat->flags & (AVFMT_NOTIMESTAMPS | AVFMT_VARIABLE_FPS))))) {
 -                ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
 -                if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
 -                    int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
 -                    ost->frame_rate = ost->enc->supported_framerates[idx];
 -                }
 -            }
 -
              if (!ost->filter &&
                  (codec->codec_type == AVMEDIA_TYPE_VIDEO ||
                   codec->codec_type == AVMEDIA_TYPE_AUDIO)) {
                      }
              }
  
 +            if (codec->codec_type == AVMEDIA_TYPE_VIDEO) {
 +                if (ost->filter && !ost->frame_rate.num)
 +                    ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
 +                if (ist && !ost->frame_rate.num)
 +                    ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25, 1};
 +//                    ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
 +                if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
 +                    int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
 +                    ost->frame_rate = ost->enc->supported_framerates[idx];
 +                }
 +            }
 +
              switch (codec->codec_type) {
              case AVMEDIA_TYPE_AUDIO:
                  codec->sample_fmt     = ost->filter->filter->inputs[0]->format;
                  codec->time_base      = (AVRational){ 1, codec->sample_rate };
                  break;
              case AVMEDIA_TYPE_VIDEO:
 -                codec->time_base = ost->filter->filter->inputs[0]->time_base;
 +                codec->time_base = av_inv_q(ost->frame_rate);
 +                if (ost->filter && !(codec->time_base.num && codec->time_base.den))
 +                    codec->time_base = ost->filter->filter->inputs[0]->time_base;
 +                if (   av_q2d(codec->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
 +                   && (video_sync_method == VSYNC_CFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
 +                    av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
 +                                               "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
 +                }
 +                for (j = 0; j < ost->forced_kf_count; j++)
 +                    ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
 +                                                         AV_TIME_BASE_Q,
 +                                                         codec->time_base);
  
                  codec->width  = ost->filter->filter->inputs[0]->w;
                  codec->height = ost->filter->filter->inputs[0]->h;
                      ost->filter->filter->inputs[0]->sample_aspect_ratio;
                  codec->pix_fmt = ost->filter->filter->inputs[0]->format;
  
 -                if (codec->width   != icodec->width  ||
 +                if (!icodec ||
 +                    codec->width   != icodec->width  ||
                      codec->height  != icodec->height ||
                      codec->pix_fmt != icodec->pix_fmt) {
 -                    codec->bits_per_raw_sample = 0;
 +                    codec->bits_per_raw_sample = frame_bits_per_raw_sample;
                  }
  
                  if (ost->forced_keyframes)
                  break;
              }
              /* two pass mode */
 -            if ((codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
 +            if (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2)) {
                  char logfilename[1024];
                  FILE *f;
  
                  if (!strcmp(ost->enc->name, "libx264")) {
                      av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
                  } else {
 -                    if (codec->flags & CODEC_FLAG_PASS1) {
 -                        f = fopen(logfilename, "wb");
 -                        if (!f) {
 -                            av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
 -                                   logfilename, strerror(errno));
 -                            exit_program(1);
 -                        }
 -                        ost->logfile = f;
 -                    } else {
 +                    if (codec->flags & CODEC_FLAG_PASS2) {
                          char  *logbuffer;
                          size_t logbuffer_size;
                          if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
                          }
                          codec->stats_in = logbuffer;
                      }
 +                    if (codec->flags & CODEC_FLAG_PASS1) {
 +                        f = fopen(logfilename, "wb");
 +                        if (!f) {
 +                            av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
 +                                logfilename, strerror(errno));
 +                            exit_program(1);
 +                        }
 +                        ost->logfile = f;
 +                    }
                  }
              }
          }
              if ((ist = get_input_stream(ost)))
                  dec = ist->st->codec;
              if (dec && dec->subtitle_header) {
 -                ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size);
 +                /* ASS code assumes this buffer is null terminated so add extra byte. */
 +                ost->st->codec->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
                  if (!ost->st->codec->subtitle_header) {
                      ret = AVERROR(ENOMEM);
                      goto dump_format;
                  ret = AVERROR(EINVAL);
                  goto dump_format;
              }
 +            if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
 +                !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
 +                av_buffersink_set_frame_size(ost->filter->filter,
 +                                             ost->st->codec->frame_size);
              assert_codec_experimental(ost->st->codec, 1);
              assert_avoptions(ost->opts);
              if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
                  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
 -                                             "It takes bits/s as argument, not kbits/s\n");
 +                                             " It takes bits/s as argument, not kbits/s\n");
              extra_size += ost->st->codec->extradata_size;
  
              if (ost->st->codec->me_threshold)
              ret = AVERROR(EINVAL);
              goto dump_format;
          }
 -        assert_avoptions(output_files[i]->opts);
 +//         assert_avoptions(output_files[i]->opts);
          if (strcmp(oc->oformat->name, "rtp")) {
              want_sdp = 0;
          }
@@@ -2387,177 -1948,27 +2383,177 @@@ static int need_output(void
      return 0;
  }
  
 -static InputFile *select_input_file(void)
 +static int input_acceptable(InputStream *ist)
  {
 -    InputFile *ifile = NULL;
 -    int64_t ipts_min = INT64_MAX;
 -    int i;
 +    av_assert1(!ist->discard);
-     return !input_files[ist->file_index]->unavailable &&
++    return !input_files[ist->file_index]->eagain &&
 +           !input_files[ist->file_index]->eof_reached;
 +}
  
 -    for (i = 0; i < nb_input_streams; i++) {
 -        InputStream *ist = input_streams[i];
 -        int64_t ipts     = ist->last_dts;
 +static int find_graph_input(FilterGraph *graph)
 +{
 +    int i, nb_req_max = 0, file_index = -1;
 +
 +    for (i = 0; i < graph->nb_inputs; i++) {
 +        int nb_req = av_buffersrc_get_nb_failed_requests(graph->inputs[i]->filter);
 +        if (nb_req > nb_req_max) {
 +            InputStream *ist = graph->inputs[i]->ist;
 +            if (input_acceptable(ist)) {
 +                nb_req_max = nb_req;
 +                file_index = ist->file_index;
 +            }
 +        }
 +    }
  
 -        if (ist->discard || input_files[ist->file_index]->eagain)
 -            continue;
 -        if (!input_files[ist->file_index]->eof_reached) {
 -            if (ipts < ipts_min) {
 -                ipts_min = ipts;
 -                ifile    = input_files[ist->file_index];
 +    return file_index;
 +}
 +
 +/**
 + * Select the input file to read from.
 + *
 + * @return  >=0 index of the input file to use;
 + *          -1  if no file is acceptable;
 + *          -2  to read from filters without reading from a file
 + */
 +static int select_input_file(void)
 +{
 +    int i, ret, nb_active_out = nb_output_streams, ost_index = -1;
 +    int64_t opts_min;
 +    OutputStream *ost;
 +    AVFilterBufferRef *dummy;
 +
 +    for (i = 0; i < nb_output_streams; i++)
 +        nb_active_out -= output_streams[i]->unavailable =
 +            output_streams[i]->is_past_recording_time;
 +    while (nb_active_out) {
 +        opts_min = INT64_MAX;
 +        ost_index = -1;
 +        for (i = 0; i < nb_output_streams; i++) {
 +            OutputStream *ost = output_streams[i];
 +            int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
 +                                        AV_TIME_BASE_Q);
 +            if (!ost->unavailable && opts < opts_min) {
 +                opts_min  = opts;
 +                ost_index = i;
              }
          }
 +        if (ost_index < 0)
 +            return -1;
 +
 +        ost = output_streams[ost_index];
 +        if (ost->source_index >= 0) {
 +            /* ost is directly connected to an input */
 +            InputStream *ist = input_streams[ost->source_index];
 +            if (input_acceptable(ist))
 +                return ist->file_index;
 +        } else {
 +            /* ost is connected to a complex filtergraph */
 +            av_assert1(ost->filter);
 +            ret = av_buffersink_get_buffer_ref(ost->filter->filter, &dummy,
 +                                               AV_BUFFERSINK_FLAG_PEEK);
 +            if (ret >= 0)
 +                return -2;
 +            ret = find_graph_input(ost->filter->graph);
 +            if (ret >= 0)
 +                return ret;
 +        }
 +        ost->unavailable = 1;
 +        nb_active_out--;
      }
 +    return -1;
 +}
  
 -    return ifile;
 +static int check_keyboard_interaction(int64_t cur_time)
 +{
 +    int i, ret, key;
 +    static int64_t last_time;
 +    if (received_nb_signals)
 +        return AVERROR_EXIT;
 +    /* read_key() returns 0 on EOF */
 +    if(cur_time - last_time >= 100000 && !run_as_daemon){
 +        key =  read_key();
 +        last_time = cur_time;
 +    }else
 +        key = -1;
 +    if (key == 'q')
 +        return AVERROR_EXIT;
 +    if (key == '+') av_log_set_level(av_log_get_level()+10);
 +    if (key == '-') av_log_set_level(av_log_get_level()-10);
 +    if (key == 's') qp_hist     ^= 1;
 +    if (key == 'h'){
 +        if (do_hex_dump){
 +            do_hex_dump = do_pkt_dump = 0;
 +        } else if(do_pkt_dump){
 +            do_hex_dump = 1;
 +        } else
 +            do_pkt_dump = 1;
 +        av_log_set_level(AV_LOG_DEBUG);
 +    }
 +    if (key == 'c' || key == 'C'){
 +        char buf[4096], target[64], command[256], arg[256] = {0};
 +        double time;
 +        int k, n = 0;
 +        fprintf(stderr, "\nEnter command: <target> <time> <command>[ <argument>]\n");
 +        i = 0;
 +        while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
 +            if (k > 0)
 +                buf[i++] = k;
 +        buf[i] = 0;
 +        if (k > 0 &&
 +            (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
 +            av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
 +                   target, time, command, arg);
 +            for (i = 0; i < nb_filtergraphs; i++) {
 +                FilterGraph *fg = filtergraphs[i];
 +                if (fg->graph) {
 +                    if (time < 0) {
 +                        ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
 +                                                          key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
 +                        fprintf(stderr, "Command reply for stream %d: ret:%d res:%s\n", i, ret, buf);
 +                    } else {
 +                        ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
 +                    }
 +                }
 +            }
 +        } else {
 +            av_log(NULL, AV_LOG_ERROR,
 +                   "Parse error, at least 3 arguments were expected, "
 +                   "only %d given in string '%s'\n", n, buf);
 +        }
 +    }
 +    if (key == 'd' || key == 'D'){
 +        int debug=0;
 +        if(key == 'D') {
 +            debug = input_streams[0]->st->codec->debug<<1;
 +            if(!debug) debug = 1;
 +            while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
 +                debug += debug;
 +        }else
 +            if(scanf("%d", &debug)!=1)
 +                fprintf(stderr,"error parsing debug value\n");
 +        for(i=0;i<nb_input_streams;i++) {
 +            input_streams[i]->st->codec->debug = debug;
 +        }
 +        for(i=0;i<nb_output_streams;i++) {
 +            OutputStream *ost = output_streams[i];
 +            ost->st->codec->debug = debug;
 +        }
 +        if(debug) av_log_set_level(AV_LOG_DEBUG);
 +        fprintf(stderr,"debug=%d\n", debug);
 +    }
 +    if (key == '?'){
 +        fprintf(stderr, "key    function\n"
 +                        "?      show this help\n"
 +                        "+      increase verbosity\n"
 +                        "-      decrease verbosity\n"
 +                        "c      Send command to filtergraph\n"
 +                        "D      cycle through available debug modes\n"
 +                        "h      dump packets/hex press to cycle through the 3 states\n"
 +                        "q      quit\n"
 +                        "s      Show QP histogram\n"
 +        );
 +    }
 +    return 0;
  }
  
  #if HAVE_PTHREADS
@@@ -2679,6 -2090,22 +2675,22 @@@ static int get_input_packet(InputFile *
      return av_read_frame(f->ctx, pkt);
  }
  
+ static int got_eagain(void)
+ {
+     int i;
+     for (i = 0; i < nb_input_files; i++)
+         if (input_files[i]->eagain)
+             return 1;
+     return 0;
+ }
+ static void reset_eagain(void)
+ {
+     int i;
+     for (i = 0; i < nb_input_files; i++)
+         input_files[i]->eagain = 0;
+ }
  /*
   * The following code is the main loop of the file converter
   */
@@@ -2688,16 -2115,14 +2700,15 @@@ static int transcode(void
      AVFormatContext *is, *os;
      OutputStream *ost;
      InputStream *ist;
-     int no_packet_count = 0;
      int64_t timer_start;
  
      ret = transcode_init();
      if (ret < 0)
          goto fail;
  
 -    av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
 -    term_init();
 +    if (stdin_interaction) {
 +        av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
 +    }
  
      timer_start = av_gettime();
  
          goto fail;
  #endif
  
-     for (; received_sigterm == 0;) {
-         int file_index, ist_index;
+     while (!received_sigterm) {
+         InputFile *ifile;
          AVPacket pkt;
++        int file_index;
 +        int64_t cur_time= av_gettime();
 +
 +        /* if 'q' pressed, exits */
 +        if (stdin_interaction)
 +            if (check_keyboard_interaction(cur_time) < 0)
 +                break;
  
          /* check if there's any stream where output is still needed */
          if (!need_output()) {
          }
  
          /* select the stream that we must read now */
 -        ifile = select_input_file();
 +        file_index = select_input_file();
          /* if none, if is finished */
 -        if (!ifile) {
 +        if (file_index == -2) {
 +            poll_filters() ;
 +            continue;
 +        }
 +        if (file_index < 0) {
-             if (no_packet_count) {
-                 no_packet_count = 0;
-                 for (i = 0; i < nb_input_files; i++)
-                     input_files[i]->unavailable = 0;
+             if (got_eagain()) {
+                 reset_eagain();
                  av_usleep(10000);
                  continue;
              }
              av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
              break;
          }
++        ifile = input_files[file_index];
  
-         is  = input_files[file_index]->ctx;
-         ret = get_input_packet(input_files[file_index], &pkt);
+         is  = ifile->ctx;
+         ret = get_input_packet(ifile, &pkt);
  
          if (ret == AVERROR(EAGAIN)) {
-             input_files[file_index]->unavailable = 1;
-             no_packet_count++;
+             ifile->eagain = 1;
              continue;
          }
          if (ret < 0) {
                  if (exit_on_error)
                      exit_program(1);
              }
-             input_files[file_index]->eof_reached = 1;
+             ifile->eof_reached = 1;
  
-             for (i = 0; i < input_files[file_index]->nb_streams; i++) {
-                 ist = input_streams[input_files[file_index]->ist_index + i];
+             for (i = 0; i < ifile->nb_streams; i++) {
+                 ist = input_streams[ifile->ist_index + i];
                  if (ist->decoding_needed)
                      output_packet(ist, NULL);
 +                poll_filters();
              }
  
              if (opt_shortest)
                  continue;
          }
  
-         no_packet_count = 0;
-         for (i = 0; i < nb_input_files; i++)
-             input_files[i]->unavailable = 0;
+         reset_eagain();
  
          if (do_pkt_dump) {
              av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
          }
          /* the following test is needed in case new streams appear
             dynamically in stream : we ignore them */
-         if (pkt.stream_index >= input_files[file_index]->nb_streams) {
 -        if (pkt.stream_index >= ifile->nb_streams)
++        if (pkt.stream_index >= ifile->nb_streams) {
 +            report_new_stream(file_index, &pkt);
              goto discard_packet;
-         ist_index = input_files[file_index]->ist_index + pkt.stream_index;
-         ist = input_streams[ist_index];
 +        }
+         ist = input_streams[ifile->ist_index + pkt.stream_index];
          if (ist->discard)
              goto discard_packet;
  
 +        if(!ist->wrap_correction_done && input_files[file_index]->ctx->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
 +            uint64_t stime = av_rescale_q(input_files[file_index]->ctx->start_time, AV_TIME_BASE_Q, ist->st->time_base);
 +            uint64_t stime2= stime + (1LL<<ist->st->pts_wrap_bits);
 +            ist->wrap_correction_done = 1;
 +            if(pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime && pkt.dts - stime > stime2 - pkt.dts) {
 +                pkt.dts -= 1LL<<ist->st->pts_wrap_bits;
 +                ist->wrap_correction_done = 0;
 +            }
 +            if(pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime && pkt.pts - stime > stime2 - pkt.pts) {
 +                pkt.pts -= 1LL<<ist->st->pts_wrap_bits;
 +                ist->wrap_correction_done = 0;
 +            }
 +        }
 +
          if (pkt.dts != AV_NOPTS_VALUE)
-             pkt.dts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
+             pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
          if (pkt.pts != AV_NOPTS_VALUE)
-             pkt.pts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
+             pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
  
          if (pkt.pts != AV_NOPTS_VALUE)
              pkt.pts *= ist->ts_scale;
          if (pkt.dts != AV_NOPTS_VALUE)
              pkt.dts *= ist->ts_scale;
  
 -        if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE
 -            && (is->iformat->flags & AVFMT_TS_DISCONT)) {
 +        if (debug_ts) {
 +            av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
 +                    "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s  pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%"PRId64"\n",
-                     ist_index, av_get_media_type_string(ist->st->codec->codec_type),
++                    ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->st->codec->codec_type),
 +                    av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
 +                    av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
 +                    av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
 +                    av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
 +                    input_files[ist->file_index]->ts_offset);
 +        }
 +
 +        if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE && !copy_ts) {
              int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
              int64_t delta   = pkt_dts - ist->next_dts;
 -            if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
 +            if (is->iformat->flags & AVFMT_TS_DISCONT) {
 +            if(delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
 +                (delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
 +                 ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE) ||
 +                pkt_dts+1<ist->pts){
-                 input_files[ist->file_index]->ts_offset -= delta;
+                 ifile->ts_offset -= delta;
                  av_log(NULL, AV_LOG_DEBUG,
                         "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
-                        delta, input_files[ist->file_index]->ts_offset);
+                        delta, ifile->ts_offset);
                  pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
                  if (pkt.pts != AV_NOPTS_VALUE)
                      pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
              }
 +            } else {
 +                if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
 +                    (delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE) ||
 +                     pkt_dts+1<ist->pts){
 +                    av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
 +                    pkt.dts = AV_NOPTS_VALUE;
 +                }
 +                if (pkt.pts != AV_NOPTS_VALUE){
 +                    int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
 +                    delta   = pkt_pts - ist->next_dts;
 +                    if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
 +                        (delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE) ||
 +                        pkt_pts+1<ist->pts) {
 +                        av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
 +                        pkt.pts = AV_NOPTS_VALUE;
 +                    }
 +                }
 +            }
          }
  
 -        if (output_packet(ist, &pkt) < 0 || poll_filters() < 0) {
 -            av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
 -                   ist->file_index, ist->st->index);
 +        sub2video_heartbeat(ist, pkt.pts);
 +
-         // fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
 +        if ((ret = output_packet(ist, &pkt)) < 0 ||
 +            ((ret = poll_filters()) < 0 && ret != AVERROR_EOF)) {
 +            char buf[128];
 +            av_strerror(ret, buf, sizeof(buf));
 +            av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
 +                   ist->file_index, ist->st->index, buf);
              if (exit_on_error)
                  exit_program(1);
              av_free_packet(&pkt);
          av_free_packet(&pkt);
  
          /* dump report by using the output first video and audio streams */
 -        print_report(0, timer_start);
 +        print_report(0, timer_start, cur_time);
      }
  #if HAVE_PTHREADS
      free_input_threads();
      }
  
      /* dump report by using the first video and audio streams */
 -    print_report(1, timer_start);
 +    print_report(1, timer_start, av_gettime());
  
      /* close each encoder */
      for (i = 0; i < nb_output_streams; i++) {
      return ret;
  }
  
 +
  static int64_t getutime(void)
  {
  #if HAVE_GETRUSAGE
@@@ -2987,10 -2341,6 +2994,10 @@@ static int64_t getmaxrss(void
  #endif
  }
  
 +static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
 +{
 +}
 +
  static void parse_cpuflags(int argc, char **argv, const OptionDef *options)
  {
      int idx = locate_option(argc, argv, options, "cpuflags");
@@@ -3003,18 -2353,11 +3010,18 @@@ int main(int argc, char **argv
      OptionsContext o = { 0 };
      int64_t ti;
  
 -    reset_options(&o);
 +    reset_options(&o, 0);
  
      av_log_set_flags(AV_LOG_SKIP_REPEATED);
      parse_loglevel(argc, argv, options);
  
 +    if(argc>1 && !strcmp(argv[1], "-d")){
 +        run_as_daemon=1;
 +        av_log_set_callback(log_callback_null);
 +        argc--;
 +        argv++;
 +    }
 +
      avcodec_register_all();
  #if CONFIG_AVDEVICE
      avdevice_register_all();
      av_register_all();
      avformat_network_init();
  
 -    show_banner();
 +    show_banner(argc, argv, options);
 +
 +    term_init();
  
      parse_cpuflags(argc, argv, options);
  
  
      /* file converter / grab */
      if (nb_output_files <= 0) {
 -        fprintf(stderr, "At least one output file must be specified\n");
 +        av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
          exit_program(1);
      }
  
          exit_program(1);
      }
  
 -    ti = getutime();
 +    current_time = ti = getutime();
      if (transcode() < 0)
          exit_program(1);
      ti = getutime() - ti;
diff --combined ffmpeg.h
+++ b/ffmpeg.h
@@@ -1,29 -1,28 +1,29 @@@
  /*
 - * This file is part of Libav.
 + * This file is part of FFmpeg.
   *
 - * Libav is free software; you can redistribute it and/or
 + * FFmpeg is free software; you can redistribute it and/or
   * modify it under the terms of the GNU Lesser General Public
   * License as published by the Free Software Foundation; either
   * version 2.1 of the License, or (at your option) any later version.
   *
 - * Libav is distributed in the hope that it will be useful,
 + * FFmpeg is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   * Lesser General Public License for more details.
   *
   * You should have received a copy of the GNU Lesser General Public
 - * License along with Libav; if not, write to the Free Software
 + * License along with FFmpeg; if not, write to the Free Software
   * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
   */
  
 -#ifndef AVCONV_H
 -#define AVCONV_H
 +#ifndef FFMPEG_H
 +#define FFMPEG_H
  
  #include "config.h"
  
  #include <stdint.h>
  #include <stdio.h>
 +#include <signal.h>
  
  #if HAVE_PTHREADS
  #include <pthread.h>
  #include "libavutil/pixfmt.h"
  #include "libavutil/rational.h"
  
 +#include "libswresample/swresample.h"
 +
  #define VSYNC_AUTO       -1
  #define VSYNC_PASSTHROUGH 0
  #define VSYNC_CFR         1
  #define VSYNC_VFR         2
 +#define VSYNC_DROP        0xff
 +
 +#define MAX_STREAMS 1024    /* arbitrary sanity check value */
  
  /* select an input stream for an output stream */
  typedef struct StreamMap {
      char *linklabel;       /** name of an output link, for mapping lavfi outputs */
  } StreamMap;
  
 -/**
 - * select an input file for an output file
 - */
 -typedef struct MetadataMap {
 -    int  file;      ///< file index
 -    char type;      ///< type of metadata to copy -- (g)lobal, (s)tream, (c)hapter or (p)rogram
 -    int  index;     ///< stream/chapter/program number
 -} MetadataMap;
 +typedef struct {
 +    int  file_idx,  stream_idx,  channel_idx; // input
 +    int ofile_idx, ostream_idx;               // output
 +} AudioChannelMap;
  
  typedef struct OptionsContext {
      /* input/output options */
      /* output options */
      StreamMap *stream_maps;
      int     nb_stream_maps;
 -    /* first item specifies output metadata, second is input */
 -    MetadataMap (*meta_data_maps)[2];
 -    int nb_meta_data_maps;
 +    AudioChannelMap *audio_channel_maps; /* one info entry per -map_channel */
 +    int           nb_audio_channel_maps; /* number of (valid) -map_channel settings */
      int metadata_global_manual;
      int metadata_streams_manual;
      int metadata_chapters_manual;
@@@ -199,20 -198,16 +199,20 @@@ typedef struct InputStream 
  
      int64_t       start;     /* time when read started */
      /* predicted dts of the next packet read for this stream or (when there are
 -     * several frames in a packet) of the next frame in current packet */
 +     * several frames in a packet) of the next frame in current packet (in AV_TIME_BASE units) */
      int64_t       next_dts;
 -    /* dts of the last packet read for this stream */
 -    int64_t       last_dts;
 -    PtsCorrectionContext pts_ctx;
 +    int64_t       dts;       ///< dts of the last packet read for this stream (in AV_TIME_BASE units)
 +
 +    int64_t       next_pts;  ///< synthetic pts for the next decode frame (in AV_TIME_BASE units)
 +    int64_t       pts;       ///< current pts of the decoded frame  (in AV_TIME_BASE units)
 +    int           wrap_correction_done;
      double ts_scale;
      int is_start;            /* is 1 at the start and after a discontinuity */
 +    int saw_first_ts;
      int showed_multi_packet_warning;
      AVDictionary *opts;
      AVRational framerate;               /* framerate forced with -r */
 +    int top_field_first;
  
      int resample_height;
      int resample_width;
      int      resample_channels;
      uint64_t resample_channel_layout;
  
 +    struct sub2video {
 +        int64_t last_pts;
 +        AVFilterBufferRef *ref;
 +        int w, h;
 +    } sub2video;
 +
      /* a pool of free buffers for decoded data */
      FrameBuffer *buffer_pool;
 +    int dr1;
  
      /* decoded data from this stream goes into all those filters
       * currently video and audio only */
  typedef struct InputFile {
      AVFormatContext *ctx;
      int eof_reached;      /* true if eof reached */
-     int unavailable;      /* true if the file is unavailable (possibly temporarily) */
+     int eagain;           /* true if last read attempt returned EAGAIN */
 -    int ist_index;        /* index of first stream in ist_table */
 +    int ist_index;        /* index of first stream in input_streams */
      int64_t ts_offset;
 -    int nb_streams;       /* number of stream that avconv is aware of; may be different
 +    int nb_streams;       /* number of stream that ffmpeg is aware of; may be different
                               from ctx.nb_streams if new streams appear during av_read_frame() */
 +    int nb_streams_warn;  /* number of streams that the user was warned of */
      int rate_emu;
  
  #if HAVE_PTHREADS
@@@ -269,6 -256,7 +269,6 @@@ typedef struct OutputStream 
      int frame_number;
      /* input pts and corresponding output pts
         for A/V sync */
 -    // double sync_ipts;        /* dts from the AVPacket of the demuxer in second units */
      struct InputStream *sync_ist; /* input stream to sync against */
      int64_t sync_opts;       /* output frame counter, could be changed to some true timestamp */ // FIXME look at frame_number
      /* pts of the first frame encoded for this stream, used for limiting
      int forced_kf_index;
      char *forced_keyframes;
  
 +    /* audio only */
 +    int audio_channels_map[SWR_CH_MAX];  /* list of the channels id to pick from the source stream */
 +    int audio_channels_mapped;           /* number of channels in audio_channels_map */
 +
      FILE *logfile;
  
      OutputFilter *filter;
      char *avfilter;
  
      int64_t sws_flags;
 +    int64_t swr_dither_method;
 +    double swr_dither_scale;
      AVDictionary *opts;
      int is_past_recording_time;
 +    int unavailable;                     /* true if the steram is unavailable (possibly temporarily) */
      int stream_copy;
      const char *attachment_filename;
      int copy_initial_nonkeyframes;
  
 -    enum PixelFormat pix_fmts[2];
 +    int keep_pix_fmt;
  } OutputStream;
  
  typedef struct OutputFile {
      AVFormatContext *ctx;
      AVDictionary *opts;
      int ost_index;       /* index of the first stream in output_streams */
 -    int64_t recording_time; /* desired length of the resulting file in microseconds */
 -    int64_t start_time;     /* start time in microseconds */
 -    uint64_t limit_filesize;
 +    int64_t recording_time;  ///< desired length of the resulting file in microseconds == AV_TIME_BASE units
 +    int64_t start_time;      ///< start time in microseconds == AV_TIME_BASE units
 +    uint64_t limit_filesize; /* filesize limit expressed in bytes */
  } OutputFile;
  
  extern InputStream **input_streams;
@@@ -337,42 -318,35 +337,42 @@@ extern int         nb_output_files
  extern FilterGraph **filtergraphs;
  extern int        nb_filtergraphs;
  
 -extern char *pass_logfilename_prefix;
 +extern const char *pass_logfilename_prefix;
  extern char *vstats_filename;
  
  extern float audio_drift_threshold;
  extern float dts_delta_threshold;
 +extern float dts_error_threshold;
  
  extern int audio_volume;
  extern int audio_sync_method;
  extern int video_sync_method;
  extern int do_benchmark;
 +extern int do_benchmark_all;
  extern int do_deinterlace;
  extern int do_hex_dump;
  extern int do_pkt_dump;
  extern int copy_ts;
  extern int copy_tb;
 +extern int debug_ts;
  extern int opt_shortest;
  extern int exit_on_error;
  extern int print_stats;
  extern int qp_hist;
  extern int same_quant;
 +extern int stdin_interaction;
 +extern int frame_bits_per_raw_sample;
 +extern AVIOContext *progress_avio;
  
  extern const AVIOInterruptCB int_cb;
  
  extern const OptionDef options[];
  
 -void reset_options(OptionsContext *o);
 -void show_usage(void);
 +void term_init(void);
 +void term_exit(void);
  
 -int opt_cpuflags(const char *opt, const char *arg);
 +void reset_options(OptionsContext *o, int is_input);
 +void show_usage(void);
  
  void opt_output_file(void *optctx, const char *filename);
  
@@@ -380,12 -354,9 +380,12 @@@ void assert_avoptions(AVDictionary *m)
  
  int guess_input_channel_layout(InputStream *ist);
  
 +enum PixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum PixelFormat target);
 +void choose_sample_fmt(AVStream *st, AVCodec *codec);
 +
  int configure_filtergraph(FilterGraph *fg);
  int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out);
  int ist_in_filtergraph(FilterGraph *fg, InputStream *ist);
  FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost);
  
 -#endif /* AVCONV_H */
 +#endif /* FFMPEG_H */