Merge commit '3b266da3d35f3f7a61258b78384dfe920d875d29'
authorMichael Niedermayer <michaelni@gmx.at>
Tue, 17 Apr 2012 02:01:17 +0000 (04:01 +0200)
committerMichael Niedermayer <michaelni@gmx.at>
Tue, 17 Apr 2012 02:03:50 +0000 (04:03 +0200)
* commit '3b266da3d35f3f7a61258b78384dfe920d875d29':
  avconv: add support for complex filtergraphs.
  avconv: make filtergraphs global.
  avconv: move filtered_frame from InputStream to OutputStream.
  avconv: don't set output width/height directly from input value.
  avconv: move resample_{width,height,pix_fmt} to InputStream.
  avconv: remove a useless variable from OutputStream.
  avconv: get output pixel format from lavfi.
  graphparser: fix the order in which unlabeled input links are returned.
  avconv: change {input,output}_{streams,files} into arrays of pointers.
  avconv: don't pass input/output streams to some functions.

Conflicts:
cmdutils.c
cmdutils.h
doc/ffmpeg.texi
ffmpeg.c
ffplay.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
1  2 
cmdutils.h
doc/ffmpeg.texi
doc/filters.texi
ffmpeg.c
ffplay.c
libavfilter/graphparser.c
libavfilter/sink_buffer.c
tests/codec-regression.sh

diff --cc cmdutils.h
@@@ -370,6 -367,21 +370,7 @@@ int cmdutils_read_file(const char *file
  FILE *get_preset_file(char *filename, size_t filename_size,
                        const char *preset_name, int is_path, const char *codec_name);
  
 -typedef struct {
 -    const enum PixelFormat *pix_fmts;
 -} SinkContext;
 -
 -extern AVFilter sink;
 -
 -/**
 - * Extract a frame from sink.
 - *
 - * @return a negative error in case of failure, 1 if one frame has
 - * been extracted successfully.
 - */
 -int get_filtered_video_frame(AVFilterContext *sink, AVFrame *frame,
 -                             AVFilterBufferRef **picref, AVRational *pts_tb);
  /**
   * Do all the necessary cleanup and abort.
   * This function is implemented in the avtools, not cmdutils.
diff --cc doc/ffmpeg.texi
@@@ -538,9 -479,13 +541,13 @@@ the source for output stream 1, etc
  A @code{-} character before the stream identifier creates a "negative" mapping.
  It disables matching streams from already created mappings.
  
+ An alternative @var{[linklabel]} form will map outputs from complex filter
+ graphs (see the @option{-filter_complex} option) to the output file.
+ @var{linklabel} must correspond to a defined output link label in the graph.
  For example, to map ALL streams from the first input file to output
  @example
 -avconv -i INPUT -map 0 output
 +ffmpeg -i INPUT -map 0 output
  @end example
  
  For example, if you have two audio streams in the first input file,
@@@ -817,55 -634,57 +824,93 @@@ Set bitstream filters for matching stre
  a comma-separated list of bitstream filters. Use the @code{-bsfs} option
  to get the list of bitstream filters.
  @example
 -avconv -i h264.mp4 -c:v copy -bsf:v h264_mp4toannexb -an out.h264
 +ffmpeg -i h264.mp4 -c:v copy -bsf:v h264_mp4toannexb -an out.h264
  @end example
  @example
 -avconv -i file.mov -an -vn -bsf:s mov2textsub -c:s copy -f rawvideo sub.txt
 +ffmpeg -i file.mov -an -vn -bsf:s mov2textsub -c:s copy -f rawvideo sub.txt
  @end example
  
 -@item -tag[:@var{stream_specifier}] @var{codec_tag} (@emph{output,per-stream})
 +@item -tag[:@var{stream_specifier}] @var{codec_tag} (@emph{per-stream})
  Force a tag/fourcc for matching streams.
  
 -@item -cpuflags mask (@emph{global})
 -Set a mask that's applied to autodetected CPU flags.  This option is intended
 -for testing. Do not use it unless you know what you're doing.
 +@item -timecode @var{hh}:@var{mm}:@var{ss}SEP@var{ff}
 +Specify Timecode for writing. @var{SEP} is ':' for non drop timecode and ';'
 +(or '.') for drop.
 +@example
 +ffmpeg -i input.mpg -timecode 01:02:03.04 -r 30000/1001 -s ntsc output.mpg
 +@end example
+ @item -filter_complex @var{filtergraph} (@emph{global})
+ Define a complex filter graph, i.e. one with arbitrary number of inputs and/or
+ outputs. For simple graphs -- those with one input and one output of the same
+ type -- see the @option{-filter} options. @var{filtergraph} is a description of
+ the filter graph, as described in @ref{Filtergraph syntax}.
+ Input link labels must refer to input streams using the
+ @code{[file_index:stream_specifier]} syntax (i.e. the same as @option{-map}
+ uses). If @var{stream_specifier} matches multiple streams, the first one will be
+ used. An unlabeled input will be connected to the first unused input stream of
+ the matching type.
+ Output link labels are referred to with @option{-map}. Unlabeled outputs are
+ added to the first output file.
+ For example, to overlay an image over video
+ @example
 -avconv -i video.mkv -i image.png -filter_complex '[0:v][1:v]overlay[out]' -map
++ffmpeg -i video.mkv -i image.png -filter_complex '[0:v][1:v]overlay[out]' -map
+ '[out]' out.mkv
+ @end example
+ Here @code{[0:v]} refers to the first video stream in the first input file,
+ which is linked to the first (main) input of the overlay filter. Similarly the
+ first video stream in the second input is linked to the second (overlay) input
+ of overlay.
+ Assuming there is only one video stream in each input file, we can omit input
+ labels, so the above is equivalent to
+ @example
 -avconv -i video.mkv -i image.png -filter_complex 'overlay[out]' -map
++ffmpeg -i video.mkv -i image.png -filter_complex 'overlay[out]' -map
+ '[out]' out.mkv
+ @end example
+ Furthermore we can omit the output label and the single output from the filter
+ graph will be added to the output file automatically, so we can simply write
+ @example
 -avconv -i video.mkv -i image.png -filter_complex 'overlay' out.mkv
++ffmpeg -i video.mkv -i image.png -filter_complex 'overlay' out.mkv
+ @end example
  @end table
 +
 +@section Preset files
 +A preset file contains a sequence of @var{option}=@var{value} pairs,
 +one for each line, specifying a sequence of options which would be
 +awkward to specify on the command line. Lines starting with the hash
 +('#') character are ignored and are used to provide comments. Check
 +the @file{presets} directory in the FFmpeg source tree for examples.
 +
 +Preset files are specified with the @code{vpre}, @code{apre},
 +@code{spre}, and @code{fpre} options. The @code{fpre} option takes the
 +filename of the preset instead of a preset name as input and can be
 +used for any kind of codec. For the @code{vpre}, @code{apre}, and
 +@code{spre} options, the options specified in a preset file are
 +applied to the currently selected codec of the same type as the preset
 +option.
 +
 +The argument passed to the @code{vpre}, @code{apre}, and @code{spre}
 +preset options identifies the preset file to use according to the
 +following rules:
 +
 +First ffmpeg searches for a file named @var{arg}.ffpreset in the
 +directories @file{$FFMPEG_DATADIR} (if set), and @file{$HOME/.ffmpeg}, and in
 +the datadir defined at configuration time (usually @file{PREFIX/share/ffmpeg})
 +or in a @file{ffpresets} folder along the executable on win32,
 +in that order. For example, if the argument is @code{libx264-max}, it will
 +search for the file @file{libx264-max.ffpreset}.
 +
 +If no such file is found, then ffmpeg will search for a file named
 +@var{codec_name}-@var{arg}.ffpreset in the above-mentioned
 +directories, where @var{codec_name} is the name of the codec to which
 +the preset file options will be applied. For example, if you select
 +the video codec with @code{-vcodec libx264} and use @code{-vpre max},
 +then it will search for the file @file{libx264-max.ffpreset}.
  @c man end OPTIONS
  
  @chapter Tips
Simple merge
diff --cc ffmpeg.c
+++ b/ffmpeg.c
@@@ -108,12 -89,17 +108,13 @@@ typedef struct StreamMap 
      int stream_index;
      int sync_file_index;
      int sync_stream_index;
+     char *linklabel;       /** name of an output link, for mapping lavfi outputs */
  } StreamMap;
  
 -/**
 - * select an input file for an output file
 - */
 -typedef struct MetadataMap {
 -    int  file;      ///< file index
 -    char type;      ///< type of metadata to copy -- (g)lobal, (s)tream, (c)hapter or (p)rogram
 -    int  index;     ///< stream/chapter/program number
 -} MetadataMap;
 +typedef struct {
 +    int  file_idx,  stream_idx,  channel_idx; // input
 +    int ofile_idx, ostream_idx;               // output
 +} AudioChannelMap;
  
  static const OptionDef options[];
  
@@@ -173,8 -144,35 +174,35 @@@ static unsigned int allocated_audio_buf
  static uint8_t *async_buf;
  static unsigned int allocated_async_buf_size;
  
 -#define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
 +#define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
  
+ typedef struct InputFilter {
+     AVFilterContext    *filter;
+     struct InputStream *ist;
+     struct FilterGraph *graph;
+ } InputFilter;
+ typedef struct OutputFilter {
+     AVFilterContext     *filter;
+     struct OutputStream *ost;
+     struct FilterGraph  *graph;
+     /* temporary storage until stream maps are processed */
+     AVFilterInOut       *out_tmp;
+ } OutputFilter;
+ typedef struct FilterGraph {
+     int            index;
+     const char    *graph_desc;
+     AVFilterGraph *graph;
+     InputFilter   **inputs;
+     int          nb_inputs;
+     OutputFilter **outputs;
+     int         nb_outputs;
+ } FilterGraph;
  typedef struct FrameBuffer {
      uint8_t *base[4];
      uint8_t *data[4];
@@@ -211,9 -206,17 +238,18 @@@ typedef struct InputStream 
      int showed_multi_packet_warning;
      AVDictionary *opts;
  
+     int resample_height;
+     int resample_width;
+     int resample_pix_fmt;
      /* a pool of free buffers for decoded data */
      FrameBuffer *buffer_pool;
 +    int dr1;
+     /* decoded data from this stream goes into all those filters
+      * currently video only */
+     InputFilter **filters;
+     int        nb_filters;
  } InputStream;
  
  typedef struct InputFile {
@@@ -270,16 -275,10 +304,14 @@@ typedef struct OutputStream 
      AVFifoBuffer *fifo;     /* for compression: one audio fifo per codec */
      FILE *logfile;
  
-     AVFilterContext *output_video_filter;
-     AVFilterContext *input_video_filter;
 +    SwrContext *swr;
 +
+     OutputFilter *filter;
      char *avfilter;
-     AVFilterGraph *graph;
  
      int64_t sws_flags;
 +    int64_t swr_dither_method;
 +    double swr_dither_scale;
      AVDictionary *opts;
      int is_past_recording_time;
      int stream_copy;
@@@ -301,18 -295,21 +335,21 @@@ typedef struct OutputFile 
      int ost_index;       /* index of the first stream in output_streams */
      int64_t recording_time; /* desired length of the resulting file in microseconds */
      int64_t start_time;     /* start time in microseconds */
 -    uint64_t limit_filesize;
 +    uint64_t limit_filesize; /* filesize limit expressed in bytes */
  } OutputFile;
  
- static InputStream *input_streams   = NULL;
- static int         nb_input_streams = 0;
- static InputFile   *input_files     = NULL;
- static int         nb_input_files   = 0;
+ static InputStream **input_streams = NULL;
+ static int        nb_input_streams = 0;
+ static InputFile   **input_files   = NULL;
+ static int        nb_input_files   = 0;
  
- static OutputStream *output_streams = NULL;
- static int        nb_output_streams = 0;
- static OutputFile   *output_files   = NULL;
- static int        nb_output_files   = 0;
+ static OutputStream **output_streams = NULL;
+ static int         nb_output_streams = 0;
+ static OutputFile   **output_files   = NULL;
+ static int         nb_output_files   = 0;
+ static FilterGraph **filtergraphs;
+ int               nb_filtergraphs;
  
  typedef struct OptionsContext {
      /* input/output options */
      }\
  }
  
 -static void reset_options(OptionsContext *o)
 +static int64_t getutime(void)
 +{
 +#if HAVE_GETRUSAGE
 +    struct rusage rusage;
 +
 +    getrusage(RUSAGE_SELF, &rusage);
 +    return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
 +#elif HAVE_GETPROCESSTIMES
 +    HANDLE proc;
 +    FILETIME c, e, k, u;
 +    proc = GetCurrentProcess();
 +    GetProcessTimes(proc, &c, &e, &k, &u);
 +    return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
 +#else
 +    return av_gettime();
 +#endif
 +}
 +
 +static void update_benchmark(const char *fmt, ...)
 +{
 +    if (do_benchmark_all) {
 +        int64_t t = getutime();
 +        va_list va;
 +        char buf[1024];
 +
 +        if (fmt) {
 +            va_start(va, fmt);
 +            vsnprintf(buf, sizeof(buf), fmt, va);
 +            va_end(va);
 +            printf("bench: %8"PRIu64" %s \n", t - current_time, buf);
 +        }
 +        current_time = t;
 +    }
 +}
 +
 +static void reset_options(OptionsContext *o, int is_input)
  {
      const OptionDef *po = options;
 +    OptionsContext bak= *o;
+     int i;
  
      /* all OPT_SPEC and OPT_STRING can be freed in generic way */
      while (po->name) {
          po++;
      }
  
+     for (i = 0; i < o->nb_stream_maps; i++)
+         av_freep(&o->stream_maps[i].linklabel);
      av_freep(&o->stream_maps);
 -    av_freep(&o->meta_data_maps);
 +    av_freep(&o->audio_channel_maps);
      av_freep(&o->streamid_map);
  
      memset(o, 0, sizeof(*o));
@@@ -627,51 -583,53 +667,96 @@@ static void filter_release_buffer(AVFil
      unref_buffer(buf->ist, buf);
  }
  
- static int configure_video_filters(InputStream *ist, OutputStream *ost)
++static void choose_pixel_fmt(AVStream *st, AVCodec *codec)
 +{
++    if (codec && codec->pix_fmts) {
++        const enum PixelFormat *p = codec->pix_fmts;
++        int has_alpha= av_pix_fmt_descriptors[st->codec->pix_fmt].nb_components % 2 == 0;
++        enum PixelFormat best= PIX_FMT_NONE;
++        if (st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
++            if (st->codec->codec_id == CODEC_ID_MJPEG) {
++                p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE };
++            } else if (st->codec->codec_id == CODEC_ID_LJPEG) {
++                p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUV420P,
++                                                 PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_BGRA, PIX_FMT_NONE };
++            }
++        }
++        for (; *p != PIX_FMT_NONE; p++) {
++            best= avcodec_find_best_pix_fmt2(best, *p, st->codec->pix_fmt, has_alpha, NULL);
++            if (*p == st->codec->pix_fmt)
++                break;
++        }
++        if (*p == PIX_FMT_NONE) {
++            if (st->codec->pix_fmt != PIX_FMT_NONE)
++                av_log(NULL, AV_LOG_WARNING,
++                       "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
++                       av_pix_fmt_descriptors[st->codec->pix_fmt].name,
++                       codec->name,
++                       av_pix_fmt_descriptors[best].name);
++            st->codec->pix_fmt = best;
++        }
++    }
++}
++
+ static const enum PixelFormat *choose_pixel_fmts(OutputStream *ost)
+ {
+     if (ost->st->codec->pix_fmt != PIX_FMT_NONE) {
+         ost->pix_fmts[0] = ost->st->codec->pix_fmt;
+         return ost->pix_fmts;
+     } else if (ost->enc->pix_fmts)
+         return ost->enc->pix_fmts;
+     else
+         return NULL;
+ }
+ static int configure_video_filters(FilterGraph *fg)
+ {
+     InputStream  *ist = fg->inputs[0]->ist;
+     OutputStream *ost = fg->outputs[0]->ost;
      AVFilterContext *last_filter, *filter;
      /** filter graph containing all filters including input & output */
      AVCodecContext *codec = ost->st->codec;
-     AVCodecContext *icodec = ist->st->codec;
-     enum PixelFormat pix_fmts[] = { codec->pix_fmt, PIX_FMT_NONE };
 -    SinkContext sink_ctx = { .pix_fmts = choose_pixel_fmts(ost) };
++    enum PixelFormat *pix_fmts = choose_pixel_fmts(ost);
 +    AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
      AVRational sample_aspect_ratio;
      char args[255];
      int ret;
  
-     ost->graph = avfilter_graph_alloc();
-     if (!ost->graph)
+     avfilter_graph_free(&fg->graph);
+     fg->graph = avfilter_graph_alloc();
++    if (!fg->graph)
 +        return AVERROR(ENOMEM);
  
      if (ist->st->sample_aspect_ratio.num) {
          sample_aspect_ratio = ist->st->sample_aspect_ratio;
      } else
          sample_aspect_ratio = ist->st->codec->sample_aspect_ratio;
  
 -    snprintf(args, 255, "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
 +    snprintf(args, 255, "%d:%d:%d:%d:%d:%d:%d:flags=%d", ist->st->codec->width,
               ist->st->codec->height, ist->st->codec->pix_fmt, 1, AV_TIME_BASE,
-              sample_aspect_ratio.num, sample_aspect_ratio.den, SWS_BILINEAR + ((icodec->flags&CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0));
 -             sample_aspect_ratio.num, sample_aspect_ratio.den);
++             sample_aspect_ratio.num, sample_aspect_ratio.den, SWS_BILINEAR + ((ist->st->codec->flags&CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0));
  
-     ret = avfilter_graph_create_filter(&ost->input_video_filter, avfilter_get_by_name("buffer"),
-                                        "src", args, NULL, ost->graph);
+     ret = avfilter_graph_create_filter(&fg->inputs[0]->filter,
+                                        avfilter_get_by_name("buffer"),
+                                        "src", args, NULL, fg->graph);
      if (ret < 0)
          return ret;
 -    ret = avfilter_graph_create_filter(&fg->outputs[0]->filter, &sink,
 -                                       "out", NULL, &sink_ctx, fg->graph);
 +
 +#if FF_API_OLD_VSINK_API
-     ret = avfilter_graph_create_filter(&ost->output_video_filter, avfilter_get_by_name("buffersink"),
-                                        "out", NULL, pix_fmts, ost->graph);
++    ret = avfilter_graph_create_filter(&fg->outputs[0]->filter, avfilter_get_by_name("buffersink"),
++                                       "out", NULL, pix_fmts, fg->graph);
 +#else
 +    buffersink_params->pixel_fmts = pix_fmts;
-     ret = avfilter_graph_create_filter(&ost->output_video_filter, avfilter_get_by_name("buffersink"),
-                                        "out", NULL, buffersink_params, ost->graph);
++    ret = avfilter_graph_create_filter(&fg->outputs[0]->filter, avfilter_get_by_name("buffersink"),
++                                       "out", NULL, buffersink_params, fg->graph);
 +#endif
 +    av_freep(&buffersink_params);
 +
      if (ret < 0)
          return ret;
-     last_filter = ost->input_video_filter;
+     last_filter = fg->inputs[0]->filter;
  
-     if (codec->width != icodec->width || codec->height != icodec->height) {
+     if (codec->width || codec->height) {
          snprintf(args, 255, "%d:%d:flags=0x%X",
                   codec->width,
                   codec->height,
          inputs->pad_idx = 0;
          inputs->next    = NULL;
  
-         if ((ret = avfilter_graph_parse(ost->graph, ost->avfilter, &inputs, &outputs, NULL)) < 0)
 -        if ((ret = avfilter_graph_parse(fg->graph, ost->avfilter, inputs, outputs, NULL)) < 0)
++        if ((ret = avfilter_graph_parse(fg->graph, ost->avfilter, &inputs, &outputs, NULL)) < 0)
              return ret;
 +        av_freep(&ost->avfilter);
      } else {
-         if ((ret = avfilter_link(last_filter, 0, ost->output_video_filter, 0)) < 0)
+         if ((ret = avfilter_link(last_filter, 0, fg->outputs[0]->filter, 0)) < 0)
              return ret;
      }
  
      return 0;
  }
  
 -    SinkContext  sink_ctx;
+ static FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost)
+ {
+     FilterGraph *fg = av_mallocz(sizeof(*fg));
+     if (!fg)
+         exit_program(1);
+     fg->index = nb_filtergraphs;
+     fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs), &fg->nb_outputs,
+                              fg->nb_outputs + 1);
+     if (!(fg->outputs[0] = av_mallocz(sizeof(*fg->outputs[0]))))
+         exit_program(1);
+     fg->outputs[0]->ost   = ost;
+     fg->outputs[0]->graph = fg;
+     fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs), &fg->nb_inputs,
+                             fg->nb_inputs + 1);
+     if (!(fg->inputs[0] = av_mallocz(sizeof(*fg->inputs[0]))))
+         exit_program(1);
+     fg->inputs[0]->ist   = ist;
+     fg->inputs[0]->graph = fg;
+     ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
+                               &ist->nb_filters, ist->nb_filters + 1);
+     ist->filters[ist->nb_filters - 1] = fg->inputs[0];
+     filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
+                               &nb_filtergraphs, nb_filtergraphs + 1);
+     filtergraphs[nb_filtergraphs - 1] = fg;
+     return fg;
+ }
+ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
+ {
+     InputStream *ist;
+     enum AVMediaType type = in->filter_ctx->input_pads[in->pad_idx].type;
+     int i;
+     // TODO: support other filter types
+     if (type != AVMEDIA_TYPE_VIDEO) {
+         av_log(NULL, AV_LOG_FATAL, "Only video filters supported currently.\n");
+         exit_program(1);
+     }
+     if (in->name) {
+         AVFormatContext *s;
+         AVStream       *st = NULL;
+         char *p;
+         int file_idx = strtol(in->name, &p, 0);
+         if (file_idx < 0 || file_idx > nb_input_files) {
+             av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtegraph description %s.\n",
+                    file_idx, fg->graph_desc);
+             exit_program(1);
+         }
+         s = input_files[file_idx]->ctx;
+         for (i = 0; i < s->nb_streams; i++) {
+             if (s->streams[i]->codec->codec_type != type)
+                 continue;
+             if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
+                 st = s->streams[i];
+                 break;
+             }
+         }
+         if (!st) {
+             av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
+                    "matches no streams.\n", p, fg->graph_desc);
+             exit_program(1);
+         }
+         ist = input_streams[input_files[file_idx]->ist_index + st->index];
+     } else {
+         /* find the first unused stream of corresponding type */
+         for (i = 0; i < nb_input_streams; i++) {
+             ist = input_streams[i];
+             if (ist->st->codec->codec_type == type && ist->discard)
+                 break;
+         }
+         if (i == nb_input_streams) {
+             av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
+                    "unlabeled input pad %d on filter %s", in->pad_idx,
+                    in->filter_ctx->name);
+             exit_program(1);
+         }
+     }
+     ist->discard         = 0;
+     ist->decoding_needed = 1;
+     ist->st->discard = AVDISCARD_NONE;
+     fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs),
+                             &fg->nb_inputs, fg->nb_inputs + 1);
+     if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
+         exit_program(1);
+     fg->inputs[fg->nb_inputs - 1]->ist   = ist;
+     fg->inputs[fg->nb_inputs - 1]->graph = fg;
+     ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
+                               &ist->nb_filters, ist->nb_filters + 1);
+     ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
+ }
+ static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
+ {
 -    sink_ctx.pix_fmts = choose_pixel_fmts(ofilter->ost);
+     AVCodecContext *codec = ofilter->ost->st->codec;
+     AVFilterContext *last_filter = out->filter_ctx;
+     int pad_idx = out->pad_idx;
+     int ret;
++    enum PixelFormat *pix_fmts = choose_pixel_fmts(ofilter->ost);
++    AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
 -    ret = avfilter_graph_create_filter(&ofilter->filter, &sink,
 -                                       "out", NULL, &sink_ctx, fg->graph);
++#if FF_API_OLD_VSINK_API
++    ret = avfilter_graph_create_filter(&ofilter->filter, avfilter_get_by_name("buffersink"),
++                                       "out", NULL, pix_fmts, fg->graph);
++#else
++    buffersink_params->pixel_fmts = pix_fmts;
++    ret = avfilter_graph_create_filter(&ofilter->filter, avfilter_get_by_name("buffersink"),
++                                       "out", NULL, buffersink_params, fg->graph);
++#endif
++    av_freep(&buffersink_params);
+     if (ret < 0)
+         return ret;
+     if (codec->width || codec->height) {
+         char args[255];
+         snprintf(args, sizeof(args), "%d:%d:flags=0x%X",
+                  codec->width,
+                  codec->height,
+                  (unsigned)ofilter->ost->sws_flags);
+         if ((ret = avfilter_graph_create_filter(&last_filter, avfilter_get_by_name("scale"),
+                                                 NULL, args, NULL, fg->graph)) < 0)
+             return ret;
+         if ((ret = avfilter_link(out->filter_ctx, out->pad_idx, last_filter, 0)) < 0)
+             return ret;
+         pad_idx = 0;
+     }
+     if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
+         return ret;
+     return 0;
+ }
+ static int configure_complex_filter(FilterGraph *fg)
+ {
+     AVFilterInOut *inputs, *outputs, *cur;
+     int ret, i, init = !fg->graph;
+     avfilter_graph_free(&fg->graph);
+     if (!(fg->graph = avfilter_graph_alloc()))
+         return AVERROR(ENOMEM);
+     if ((ret = avfilter_graph_parse2(fg->graph, fg->graph_desc, &inputs, &outputs)) < 0)
+         return ret;
+     for (cur = inputs; init && cur; cur = cur->next)
+         init_input_filter(fg, cur);
+     for (cur = inputs, i = 0; cur; cur = cur->next, i++) {
+         InputFilter *ifilter = fg->inputs[i];
+         InputStream     *ist = ifilter->ist;
+         AVRational       sar;
+         char            args[255];
+         sar = ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
+                                                  ist->st->codec->sample_aspect_ratio;
+         snprintf(args, sizeof(args), "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
+                  ist->st->codec->height, ist->st->codec->pix_fmt, 1, AV_TIME_BASE,
+                  sar.num, sar.den);
+         if ((ret = avfilter_graph_create_filter(&ifilter->filter,
+                                                 avfilter_get_by_name("buffer"), cur->name,
+                                                 args, NULL, fg->graph)) < 0)
+             return ret;
+         if ((ret = avfilter_link(ifilter->filter, 0,
+                                  cur->filter_ctx, cur->pad_idx)) < 0)
+             return ret;
+     }
+     avfilter_inout_free(&inputs);
+     if (!init) {
+         /* we already know the mappings between lavfi outputs and output streams,
+          * so we can finish the setup */
+         for (cur = outputs, i = 0; cur; cur = cur->next, i++)
+             configure_output_filter(fg, fg->outputs[i], cur);
+         avfilter_inout_free(&outputs);
+         if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
+             return ret;
+     } else {
+         /* wait until output mappings are processed */
+         for (cur = outputs; cur;) {
+             fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs),
+                                      &fg->nb_outputs, fg->nb_outputs + 1);
+             if (!(fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]))))
+                 exit_program(1);
+             fg->outputs[fg->nb_outputs - 1]->graph   = fg;
+             fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
+             cur = cur->next;
+             fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
+         }
+     }
+     return 0;
+ }
+ static int configure_complex_filters(void)
+ {
+     int i, ret = 0;
+     for (i = 0; i < nb_filtergraphs; i++)
+         if (!filtergraphs[i]->graph &&
+             (ret = configure_complex_filter(filtergraphs[i])) < 0)
+             return ret;
+     return 0;
+ }
+ static int configure_filtergraph(FilterGraph *fg)
+ {
+     return fg->graph_desc ? configure_complex_filter(fg) : configure_video_filters(fg);
+ }
+ static int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
+ {
+     int i;
+     for (i = 0; i < fg->nb_inputs; i++)
+         if (fg->inputs[i]->ist == ist)
+             return 1;
+     return 0;
+ }
  static void term_exit(void)
  {
 -    av_log(NULL, AV_LOG_QUIET, "");
 +    av_log(NULL, AV_LOG_QUIET, "%s", "");
 +#if HAVE_TERMIOS_H
 +    if(restore_tty)
 +        tcsetattr (0, TCSANOW, &oldtty);
 +#endif
  }
  
  static volatile int received_sigterm = 0;
@@@ -837,9 -931,21 +1149,21 @@@ static int decode_interrupt_cb(void *ct
  
  static const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
  
 -void exit_program(int ret)
 +void av_noreturn exit_program(int ret)
  {
-     int i;
+     int i, j;
+     for (i = 0; i < nb_filtergraphs; i++) {
+         avfilter_graph_free(&filtergraphs[i]->graph);
+         for (j = 0; j < filtergraphs[i]->nb_inputs; j++)
+             av_freep(&filtergraphs[i]->inputs[j]);
+         av_freep(&filtergraphs[i]->inputs);
+         for (j = 0; j < filtergraphs[i]->nb_outputs; j++)
+             av_freep(&filtergraphs[i]->outputs[j]);
+         av_freep(&filtergraphs[i]->outputs);
+         av_freep(&filtergraphs[i]);
+     }
+     av_freep(&filtergraphs);
  
      /* close files */
      for (i = 0; i < nb_output_files; i++) {
                  av_freep(&frame->extended_data);
              av_freep(&frame);
          }
 -
 -        av_freep(&output_streams[i]->avfilter);
+         av_freep(&output_streams[i]->filtered_frame);
+         av_freep(&output_streams[i]);
      }
      for (i = 0; i < nb_input_files; i++) {
-         avformat_close_input(&input_files[i].ctx);
+         avformat_close_input(&input_files[i]->ctx);
+         av_freep(&input_files[i]);
      }
      for (i = 0; i < nb_input_streams; i++) {
-         av_freep(&input_streams[i].decoded_frame);
-         av_freep(&input_streams[i].filtered_frame);
-         av_dict_free(&input_streams[i].opts);
-         free_buffer_pool(&input_streams[i]);
+         av_freep(&input_streams[i]->decoded_frame);
+         av_dict_free(&input_streams[i]->opts);
+         free_buffer_pool(input_streams[i]);
+         av_freep(&input_streams[i]->filters);
+         av_freep(&input_streams[i]);
      }
  
      if (vstats_file)
@@@ -1474,74 -1567,19 +1772,76 @@@ static void do_subtitle_out(AVFormatCon
      }
  }
  
 -static void do_video_out(AVFormatContext *s,
 -                         OutputStream *ost,
 -                         AVFrame *in_picture,
 -                         int *frame_size, float quality)
 +static double psnr(double d)
 +{
 +    return -10.0 * log(d) / log(10.0);
 +}
 +
 +static void do_video_stats(AVFormatContext *os, OutputStream *ost,
 +                           int frame_size)
 +{
 +    AVCodecContext *enc;
 +    int frame_number;
 +    double ti1, bitrate, avg_bitrate;
 +
 +    /* this is executed just the first time do_video_stats is called */
 +    if (!vstats_file) {
 +        vstats_file = fopen(vstats_filename, "w");
 +        if (!vstats_file) {
 +            perror("fopen");
 +            exit_program(1);
 +        }
 +    }
 +
 +    enc = ost->st->codec;
 +    if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
 +        frame_number = ost->frame_number;
 +        fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
 +        if (enc->flags&CODEC_FLAG_PSNR)
 +            fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
 +
 +        fprintf(vstats_file,"f_size= %6d ", frame_size);
 +        /* compute pts value */
 +        ti1 = ost->sync_opts * av_q2d(enc->time_base);
 +        if (ti1 < 0.01)
 +            ti1 = 0.01;
 +
 +        bitrate     = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
 +        avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
 +        fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
 +               (double)video_size / 1024, ti1, bitrate, avg_bitrate);
 +        fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
 +    }
 +}
 +
 +
 +static void do_video_out(AVFormatContext *s, OutputStream *ost,
-                          InputStream *ist, AVFrame *in_picture)
++                         AVFrame *in_picture, float quality)
  {
      int nb_frames, i, ret, format_video_sync;
      AVCodecContext *enc;
      double sync_ipts, delta;
-     float quality = same_quant ? in_picture->quality
-                                : ost->st->codec->global_quality;
 +    double duration = 0;
 +    int frame_size = 0;
++    InputStream *ist = NULL;
++
++    if (ost->source_index >= 0)
++        ist = input_streams[ost->source_index];
  
      enc = ost->st->codec;
  
-     if (ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE) {
++    if (ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE) {
 +        duration = FFMAX(av_q2d(ist->st->time_base), av_q2d(ist->st->codec->time_base));
 +        if(ist->st->r_frame_rate.num)
 +            duration= FFMAX(duration, 1/av_q2d(ist->st->r_frame_rate));
 +        if(ist->st->avg_frame_rate.num && 0)
 +            duration= FFMAX(duration, 1/av_q2d(ist->st->avg_frame_rate));
 +
 +        duration /= av_q2d(enc->time_base);
 +    }
 +
      sync_ipts = get_sync_ipts(ost, in_picture->pts) / av_q2d(enc->time_base);
 -    delta = sync_ipts - ost->sync_opts;
 +    delta = sync_ipts - ost->sync_opts + duration;
  
      /* by default, we output a single frame */
      nb_frames = 1;
           */
          ost->frame_number++;
      }
 -}
 -
 -static double psnr(double d)
 -{
 -    return -10.0 * log(d) / log(10.0);
 -}
 -
 -static void do_video_stats(AVFormatContext *os, OutputStream *ost,
 -                           int frame_size)
 -{
 -    AVCodecContext *enc;
 -    int frame_number;
 -    double ti1, bitrate, avg_bitrate;
 -
 -    /* this is executed just the first time do_video_stats is called */
 -    if (!vstats_file) {
 -        vstats_file = fopen(vstats_filename, "w");
 -        if (!vstats_file) {
 -            perror("fopen");
 -            exit_program(1);
 -        }
 -    }
 -
 -    enc = ost->st->codec;
 -    if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
 -        frame_number = ost->frame_number;
 -        fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
 -        if (enc->flags&CODEC_FLAG_PSNR)
 -            fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
 -
 -        fprintf(vstats_file,"f_size= %6d ", frame_size);
 -        /* compute pts value */
 -        ti1 = ost->sync_opts * av_q2d(enc->time_base);
 -        if (ti1 < 0.01)
 -            ti1 = 0.01;
 -
 -        bitrate     = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
 -        avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
 -        fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
 -               (double)video_size / 1024, ti1, bitrate, avg_bitrate);
 -        fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
 -    }
 +    if (vstats_filename && frame_size)
-         do_video_stats(output_files[ost->file_index].ctx, ost, frame_size);
++        do_video_stats(output_files[ost->file_index]->ctx, ost, frame_size);
  }
  
- static void print_report(OutputFile *output_files,
-                          OutputStream *ost_table, int nb_ostreams,
-                          int is_last_report, int64_t timer_start, int64_t cur_time)
+ /* check for new output on any of the filtergraphs */
+ static int poll_filters(void)
+ {
+     AVFilterBufferRef *picref;
+     AVFrame *filtered_frame = NULL;
 -    int i, frame_size, ret;
++    int i, ret;
+     for (i = 0; i < nb_output_streams; i++) {
+         OutputStream *ost = output_streams[i];
+         OutputFile    *of = output_files[ost->file_index];
+         if (!ost->filter || ost->is_past_recording_time)
+             continue;
+         if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
+             return AVERROR(ENOMEM);
+         } else
+             avcodec_get_frame_defaults(ost->filtered_frame);
+         filtered_frame = ost->filtered_frame;
+         while (avfilter_poll_frame(ost->filter->filter->inputs[0])) {
 -            AVRational ist_pts_tb;
 -            if ((ret = get_filtered_video_frame(ost->filter->filter,
 -                                                filtered_frame, &picref,
 -                                                &ist_pts_tb)) < 0)
++            AVRational ist_pts_tb = ost->filter->filter->inputs[0]->time_base;
++            if ((ret = av_buffersink_get_buffer_ref(ost->filter->filter,
++                                                            &picref,
++                                                            0)) < 0) {
++                av_log(NULL, AV_LOG_WARNING, "AV Filter told us it has a frame available but failed to output one\n");
+                 return ret;
++            }
+             filtered_frame->pts = av_rescale_q(picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
++//             if (ost->source_index >= 0)
++//                 *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
+             if (of->start_time && filtered_frame->pts < of->start_time)
+                 return 0;
+             switch (ost->filter->filter->inputs[0]->type) {
+             case AVMEDIA_TYPE_VIDEO:
++                avfilter_fill_frame_from_video_buffer_ref(filtered_frame, picref);
+                 if (!ost->frame_aspect_ratio)
 -                    ost->st->codec->sample_aspect_ratio = picref->video->pixel_aspect;
++                    ost->st->codec->sample_aspect_ratio = picref->video->sample_aspect_ratio;
 -                do_video_out(of->ctx, ost, filtered_frame, &frame_size,
++                do_video_out(of->ctx, ost, filtered_frame,
+                              same_quant ? ost->last_quality :
+                                           ost->st->codec->global_quality);
 -                if (vstats_filename && frame_size)
 -                    do_video_stats(of->ctx, ost, frame_size);
+                 break;
+             default:
+                 // TODO support audio/subtitle filters
+                 av_assert0(0);
+             }
+             avfilter_unref_buffer(picref);
+         }
+     }
+     return 0;
+ }
 -static void print_report(int is_last_report, int64_t timer_start)
++static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
  {
      char buf[1024];
      OutputStream *ost;
      }
  
  
-     oc = output_files[0].ctx;
+     oc = output_files[0]->ctx;
  
      total_size = avio_size(oc->pb);
 -    if (total_size < 0) // FIXME improve avio_size() so it works with non seekable output too
 +    if (total_size < 0) // FIXME improve avio_size() so it works with non seekable output too
          total_size = avio_tell(oc->pb);
 +        if (total_size < 0)
 +            total_size = 0;
 +    }
  
      buf[0] = '\0';
 -    ti1 = 1e10;
      vid = 0;
-     for (i = 0; i < nb_ostreams; i++) {
+     for (i = 0; i < nb_output_streams; i++) {
          float q = -1;
-         ost = &ost_table[i];
+         ost = output_streams[i];
          enc = ost->st->codec;
          if (!ost->stream_copy && enc->coded_frame)
              q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
@@@ -1920,9 -2022,8 +2274,9 @@@ static int check_output_constraints(Inp
  
  static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
  {
-     OutputFile *of = &output_files[ost->file_index];
+     OutputFile *of = output_files[ost->file_index];
      int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
 +    AVPicture pict;
      AVPacket opkt;
  
      av_init_packet(&opkt);
  
  static void rate_emu_sleep(InputStream *ist)
  {
-     if (input_files[ist->file_index].rate_emu) {
+     if (input_files[ist->file_index]->rate_emu) {
 -        int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
 +        int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
          int64_t now = av_gettime() - ist->start;
          if (pts > now)
              usleep(pts - now);
@@@ -2105,9 -2196,8 +2459,10 @@@ static int transcode_video(InputStream 
  {
      AVFrame *decoded_frame;
      void *buffer_to_free = NULL;
-     int i, ret = 0;
+     int i, ret = 0, resample_changed;
 +    int64_t *best_effort_timestamp;
 +    AVRational *frame_sample_aspect;
+     float quality;
  
      if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
          return AVERROR(ENOMEM);
          /* no picture yet */
          return ret;
      }
 -    decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
 -                                           decoded_frame->pkt_dts);
 +
 +    best_effort_timestamp= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "best_effort_timestamp");
 +    if(*best_effort_timestamp != AV_NOPTS_VALUE)
 +        ist->next_pts = ist->pts = decoded_frame->pts = *best_effort_timestamp;
 +
      pkt->size = 0;
 +
      pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
  
-     frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
-     for(i=0;i<nb_output_streams;i++) {
-         OutputStream *ost = ost = &output_streams[i];
-         if(check_output_constraints(ist, ost) && ost->encoding_needed){
-             int changed =    ist->st->codec->width   != ost->input_video_filter->outputs[0]->w
-                           || ist->st->codec->height  != ost->input_video_filter->outputs[0]->h
-                           || ist->st->codec->pix_fmt != ost->input_video_filter->outputs[0]->format;
-             if (!frame_sample_aspect->num)
-                 *frame_sample_aspect = ist->st->sample_aspect_ratio;
-             decoded_frame->pts = ist->pts;
-             if (ist->dr1 && decoded_frame->type==FF_BUFFER_TYPE_USER && !changed) {
-                 FrameBuffer      *buf = decoded_frame->opaque;
-                 AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
-                                             decoded_frame->data, decoded_frame->linesize,
-                                             AV_PERM_READ | AV_PERM_PRESERVE,
-                                             ist->st->codec->width, ist->st->codec->height,
-                                             ist->st->codec->pix_fmt);
-                 avfilter_copy_frame_props(fb, decoded_frame);
-                 fb->buf->priv           = buf;
-                 fb->buf->free           = filter_release_buffer;
-                 buf->refcount++;
-                 av_buffersrc_buffer(ost->input_video_filter, fb);
-             } else
-             if((av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame, AV_VSRC_BUF_FLAG_OVERWRITE)) < 0){
-                 av_log(NULL, AV_LOG_FATAL, "Failed to inject frame into filter network\n");
-                 exit_program(1);
-             }
-         }
-     }
      rate_emu_sleep(ist);
  
      if (ist->st->sample_aspect_ratio.num)
          decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
  
-     for (i = 0; i < nb_output_streams; i++) {
-         OutputStream *ost = &output_streams[i];
-         if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
-             continue;
-         while (av_buffersink_poll_frame(ost->output_video_filter)) {
-             AVRational ist_pts_tb = ost->output_video_filter->inputs[0]->time_base;
-             AVFrame *filtered_frame;
-             AVFilterBufferRef *picref;
-             if (av_buffersink_get_buffer_ref(ost->output_video_filter, &picref, 0) < 0){
-                 av_log(NULL, AV_LOG_WARNING, "AV Filter told us it has a frame available but failed to output one\n");
-                 goto cont;
-             }
-             if (!ist->filtered_frame && !(ist->filtered_frame = avcodec_alloc_frame())) {
-                 ret = AVERROR(ENOMEM);
-                 goto fail;
+     resample_changed = ist->resample_width   != decoded_frame->width  ||
+                        ist->resample_height  != decoded_frame->height ||
+                        ist->resample_pix_fmt != decoded_frame->format;
+     if (resample_changed) {
+         av_log(NULL, AV_LOG_INFO,
+                "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
+                ist->file_index, ist->st->index,
+                ist->resample_width,  ist->resample_height,  av_get_pix_fmt_name(ist->resample_pix_fmt),
+                decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
+         ist->resample_width   = decoded_frame->width;
+         ist->resample_height  = decoded_frame->height;
+         ist->resample_pix_fmt = decoded_frame->format;
+         for (i = 0; i < nb_filtergraphs; i++)
+             if (ist_in_filtergraph(filtergraphs[i], ist) &&
+                 configure_filtergraph(filtergraphs[i]) < 0) {
+                 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
+                 exit_program(1);
              }
-             filtered_frame = ist->filtered_frame;
-             *filtered_frame= *decoded_frame; //for me_threshold
-             avfilter_fill_frame_from_video_buffer_ref(filtered_frame, picref);
-             filtered_frame->pts = av_rescale_q(picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
-             if (!ost->frame_aspect_ratio)
-                 ost->st->codec->sample_aspect_ratio = picref->video->sample_aspect_ratio;
-             do_video_out(output_files[ost->file_index].ctx, ost, ist, filtered_frame);
-             cont:
-             avfilter_unref_buffer(picref);
+     }
++    frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
+     for (i = 0; i < ist->nb_filters; i++) {
++        int changed =      ist->st->codec->width   != ist->filters[i]->filter->outputs[0]->w
++                        || ist->st->codec->height  != ist->filters[i]->filter->outputs[0]->h
++                        || ist->st->codec->pix_fmt != ist->filters[i]->filter->outputs[0]->format;
+         // XXX what an ugly hack
+         if (ist->filters[i]->graph->nb_outputs == 1)
+             ist->filters[i]->graph->outputs[0]->ost->last_quality = quality;
 -        if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) {
++        if (!frame_sample_aspect->num)
++            *frame_sample_aspect = ist->st->sample_aspect_ratio;
++        if (ist->dr1 && decoded_frame->type==FF_BUFFER_TYPE_USER && !changed) {
+             FrameBuffer      *buf = decoded_frame->opaque;
+             AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
+                                         decoded_frame->data, decoded_frame->linesize,
+                                         AV_PERM_READ | AV_PERM_PRESERVE,
+                                         ist->st->codec->width, ist->st->codec->height,
+                                         ist->st->codec->pix_fmt);
+             avfilter_copy_frame_props(fb, decoded_frame);
+             fb->buf->priv           = buf;
+             fb->buf->free           = filter_release_buffer;
+             buf->refcount++;
+             av_buffersrc_buffer(ist->filters[i]->filter, fb);
+         } else
 -            av_vsrc_buffer_add_frame(ist->filters[i]->filter, decoded_frame,
 -                                     decoded_frame->pts, decoded_frame->sample_aspect_ratio);
++        if(av_vsrc_buffer_add_frame(ist->filters[i]->filter, decoded_frame,AV_VSRC_BUF_FLAG_OVERWRITE)<0) {
++            av_log(NULL, AV_LOG_FATAL, "Failed to inject frame into filter network\n");
++            exit_program(1);
 +        }
++
      }
  
- fail:
      av_free(buffer_to_free);
      return ret;
  }
@@@ -2237,14 -2304,11 +2584,12 @@@ static int transcode_subtitles(InputStr
  }
  
  /* pkt = NULL means EOF (needed to flush decoder buffers) */
- static int output_packet(InputStream *ist,
-                          OutputStream *ost_table, int nb_ostreams,
-                          const AVPacket *pkt)
+ static int output_packet(InputStream *ist, const AVPacket *pkt)
  {
 -    int i;
 +    int ret = 0, i;
      int got_output;
      int64_t pkt_pts = AV_NOPTS_VALUE;
 +
      AVPacket avpkt;
  
      if (ist->next_dts == AV_NOPTS_VALUE)
              }
              break;
          }
 +        ist->pts = ist->dts;
 +        ist->next_pts = ist->next_dts;
      }
-     for (i = 0; pkt && i < nb_ostreams; i++) {
-         OutputStream *ost = &ost_table[i];
+     for (i = 0; pkt && i < nb_output_streams; i++) {
+         OutputStream *ost = output_streams[i];
  
          if (!check_output_constraints(ist, ost) || ost->encoding_needed)
              continue;
@@@ -2430,12 -2472,10 +2775,11 @@@ static void get_default_channel_layouts
  }
  
  
- static int init_input_stream(int ist_index, OutputStream *output_streams, int nb_output_streams,
-                              char *error, int error_len)
+ static int init_input_stream(int ist_index, char *error, int error_len)
  {
-     InputStream *ist = &input_streams[ist_index];
      int i;
+     InputStream *ist = input_streams[ist_index];
 +
      if (ist->decoding_needed) {
          AVCodec *codec = ist->dec;
          if (!codec) {
@@@ -2623,9 -2673,10 +2991,10 @@@ static int transcode_init(void
              }
          } else {
              if (!ost->enc)
 -                ost->enc = avcodec_find_encoder(ost->st->codec->codec_id);
 +                ost->enc = avcodec_find_encoder(codec->codec_id);
  
-             ist->decoding_needed = 1;
+             if (ist)
+                 ist->decoding_needed = 1;
              ost->encoding_needed = 1;
  
              switch (codec->codec_type) {
                  ost->resample_channels    = icodec->channels;
                  break;
              case AVMEDIA_TYPE_VIDEO:
-                 if (codec->pix_fmt == PIX_FMT_NONE)
-                     codec->pix_fmt = icodec->pix_fmt;
-                 choose_pixel_fmt(ost->st, ost->enc);
-                 if (ost->st->codec->pix_fmt == PIX_FMT_NONE) {
-                     av_log(NULL, AV_LOG_FATAL, "Video pixel format is unknown, stream cannot be encoded\n");
-                     exit_program(1);
-                 }
-                 if (!codec->width || !codec->height) {
-                     codec->width  = icodec->width;
-                     codec->height = icodec->height;
-                 }
-                 ost->video_resample = codec->width   != icodec->width  ||
-                                       codec->height  != icodec->height ||
-                                       codec->pix_fmt != icodec->pix_fmt;
-                 if (ost->video_resample) {
-                     codec->bits_per_raw_sample = frame_bits_per_raw_sample;
+                 if (!ost->filter) {
+                     FilterGraph *fg;
+                     fg = init_simple_filtergraph(ist, ost);
+                     if (configure_video_filters(fg)) {
+                         av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
+                         exit(1);
+                     }
                  }
  
-                 ost->resample_height  = icodec->height;
-                 ost->resample_width   = icodec->width;
-                 ost->resample_pix_fmt = icodec->pix_fmt;
-                 if (!ost->frame_rate.num)
 -                /*
 -                 * We want CFR output if and only if one of those is true:
 -                 * 1) user specified output framerate with -r
 -                 * 2) user specified -vsync cfr
 -                 * 3) output format is CFR and the user didn't force vsync to
 -                 *    something else than CFR
 -                 *
 -                 * in such a case, set ost->frame_rate
 -                 */
 -                if (!ost->frame_rate.num && ist &&
 -                    (video_sync_method ==  VSYNC_CFR ||
 -                     (video_sync_method ==  VSYNC_AUTO &&
 -                      !(oc->oformat->flags & (AVFMT_NOTIMESTAMPS | AVFMT_VARIABLE_FPS))))) {
 -                    ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25, 1};
 -                    if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
 -                        int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
 -                        ost->frame_rate = ost->enc->supported_framerates[idx];
 -                    }
++                if (ist && !ost->frame_rate.num)
 +                    ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational) { 25, 1 };
 +                if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
 +                    int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
 +                    ost->frame_rate = ost->enc->supported_framerates[idx];
                  }
 -                if (ost->frame_rate.num) {
 -                    codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num};
 -                    video_sync_method = VSYNC_CFR;
 -                } else if (ist)
 -                    codec->time_base = ist->st->time_base;
 -                else
 -                    codec->time_base = ost->filter->filter->inputs[0]->time_base;
 +                codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num};
 +                if (   av_q2d(codec->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
 +                   && (video_sync_method == VSYNC_CFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
 +                    av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
 +                                               "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
 +                }
 +                for (j = 0; j < ost->forced_kf_count; j++)
 +                    ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
 +                                                         AV_TIME_BASE_Q,
 +                                                         codec->time_base);
  
-                 if (configure_video_filters(ist, ost)) {
-                     av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
-                     exit_program(1);
+                 codec->width  = ost->filter->filter->inputs[0]->w;
+                 codec->height = ost->filter->filter->inputs[0]->h;
+                 codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =
+                     ost->frame_aspect_ratio ? // overridden by the -aspect cli option
+                     av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
+                     ost->filter->filter->inputs[0]->sample_aspect_ratio;
+                 codec->pix_fmt = ost->filter->filter->inputs[0]->format;
+                 if (codec->width   != icodec->width  ||
+                     codec->height  != icodec->height ||
+                     codec->pix_fmt != icodec->pix_fmt) {
 -                    codec->bits_per_raw_sample = 0;
++                    codec->bits_per_raw_sample = frame_bits_per_raw_sample;
                  }
                  break;
              case AVMEDIA_TYPE_SUBTITLE:
                  codec->time_base = (AVRational){1, 1000};
  
      /* open each encoder */
      for (i = 0; i < nb_output_streams; i++) {
-         ost = &output_streams[i];
+         ost = output_streams[i];
          if (ost->encoding_needed) {
              AVCodec      *codec = ost->enc;
-             AVCodecContext *dec = input_streams[ost->source_index].st->codec;
+             AVCodecContext *dec = NULL;
              if (!codec) {
 -                snprintf(error, sizeof(error), "Encoder (codec id %d) not found for output stream #%d:%d",
 -                         ost->st->codec->codec_id, ost->file_index, ost->index);
 +                snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
 +                         avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
                  ret = AVERROR(EINVAL);
                  goto dump_format;
              }
              ret = AVERROR(EINVAL);
              goto dump_format;
          }
- //        assert_avoptions(output_files[i].opts);
 -        assert_avoptions(output_files[i]->opts);
++//         assert_avoptions(output_files[i]->opts);
          if (strcmp(oc->oformat->name, "rtp")) {
              want_sdp = 0;
          }
                     ost->attachment_filename, ost->file_index, ost->index);
              continue;
          }
+         if (ost->filter && ost->filter->graph->graph_desc) {
+             /* output from a complex graph */
+             AVFilterLink *link = ost->filter->filter->inputs[0];
+             av_log(NULL, AV_LOG_INFO, "  %s", link->src->filter->name);
+             if (link->src->output_count > 1)
+                 av_log(NULL, AV_LOG_INFO, ":%s", link->srcpad->name);
+             if (nb_filtergraphs > 1)
+                 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
+             av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
+                    ost->index, ost->enc ? ost->enc->name : "?");
+             continue;
+         }
          av_log(NULL, AV_LOG_INFO, "  Stream #%d:%d -> #%d:%d",
-                input_streams[ost->source_index].file_index,
-                input_streams[ost->source_index].st->index,
+                input_streams[ost->source_index]->file_index,
+                input_streams[ost->source_index]->st->index,
                 ost->file_index,
                 ost->index);
-         if (ost->sync_ist != &input_streams[ost->source_index])
 +        if (ost->audio_channels_mapped) {
 +            av_log(NULL, AV_LOG_INFO, " [ch:");
 +            for (j = 0; j < ost->audio_channels_mapped; j++)
 +                if (ost->audio_channels_map[j] == -1)
 +                    av_log(NULL, AV_LOG_INFO, " M");
 +                else
 +                    av_log(NULL, AV_LOG_INFO, " %d", ost->audio_channels_map[j]);
 +            av_log(NULL, AV_LOG_INFO, "]");
 +        }
+         if (ost->sync_ist != input_streams[ost->source_index])
              av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
                     ost->sync_ist->file_index,
                     ost->sync_ist->st->index);
@@@ -2927,137 -2988,46 +3325,137 @@@ static int transcode(void
      timer_start = av_gettime();
  
      for (; received_sigterm == 0;) {
-         int file_index, ist_index;
+         int file_index, ist_index, past_recording_time = 1;
          AVPacket pkt;
          int64_t ipts_min;
-         double opts_min;
 +        int64_t cur_time= av_gettime();
  
          ipts_min = INT64_MAX;
-         opts_min = 1e100;
 +        /* if 'q' pressed, exits */
 +        if (!using_stdin) {
 +            static int64_t last_time;
 +            if (received_nb_signals)
 +                break;
 +            /* read_key() returns 0 on EOF */
 +            if(cur_time - last_time >= 100000 && !run_as_daemon){
 +                key =  read_key();
 +                last_time = cur_time;
 +            }else
 +                key = -1;
 +            if (key == 'q')
 +                break;
 +            if (key == '+') av_log_set_level(av_log_get_level()+10);
 +            if (key == '-') av_log_set_level(av_log_get_level()-10);
 +            if (key == 's') qp_hist     ^= 1;
 +            if (key == 'h'){
 +                if (do_hex_dump){
 +                    do_hex_dump = do_pkt_dump = 0;
 +                } else if(do_pkt_dump){
 +                    do_hex_dump = 1;
 +                } else
 +                    do_pkt_dump = 1;
 +                av_log_set_level(AV_LOG_DEBUG);
 +            }
 +            if (key == 'c' || key == 'C'){
 +                char buf[4096], target[64], command[256], arg[256] = {0};
 +                double time;
 +                int k, n = 0;
 +                fprintf(stderr, "\nEnter command: <target> <time> <command>[ <argument>]\n");
 +                i = 0;
 +                while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
 +                    if (k > 0)
 +                        buf[i++] = k;
 +                buf[i] = 0;
 +                if (k > 0 &&
 +                    (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
 +                    av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
 +                           target, time, command, arg);
-                     for (i = 0; i < nb_output_streams; i++) {
-                         ost = &output_streams[i];
-                         if (ost->graph) {
++                    for (i = 0; i < nb_filtergraphs; i++) {
++                        FilterGraph *fg = filtergraphs[i];
++                        if (fg->graph) {
 +                            if (time < 0) {
-                                 ret = avfilter_graph_send_command(ost->graph, target, command, arg, buf, sizeof(buf),
++                                ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
 +                                                                  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
 +                                fprintf(stderr, "Command reply for stream %d: ret:%d res:%s\n", i, ret, buf);
 +                            } else {
-                                 ret = avfilter_graph_queue_command(ost->graph, target, command, arg, 0, time);
++                                ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
 +                            }
 +                        }
 +                    }
 +                } else {
 +                    av_log(NULL, AV_LOG_ERROR,
 +                           "Parse error, at least 3 arguments were expected, "
 +                           "only %d given in string '%s'\n", n, buf);
 +                }
 +            }
 +            if (key == 'd' || key == 'D'){
 +                int debug=0;
 +                if(key == 'D') {
-                     debug = input_streams[0].st->codec->debug<<1;
++                    debug = input_streams[0]->st->codec->debug<<1;
 +                    if(!debug) debug = 1;
 +                    while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
 +                        debug += debug;
 +                }else
 +                    if(scanf("%d", &debug)!=1)
 +                        fprintf(stderr,"error parsing debug value\n");
 +                for(i=0;i<nb_input_streams;i++) {
-                     input_streams[i].st->codec->debug = debug;
++                    input_streams[i]->st->codec->debug = debug;
 +                }
 +                for(i=0;i<nb_output_streams;i++) {
-                     ost = &output_streams[i];
++                    ost = output_streams[i];
 +                    ost->st->codec->debug = debug;
 +                }
 +                if(debug) av_log_set_level(AV_LOG_DEBUG);
 +                fprintf(stderr,"debug=%d\n", debug);
 +            }
 +            if (key == '?'){
 +                fprintf(stderr, "key    function\n"
 +                                "?      show this help\n"
 +                                "+      increase verbosity\n"
 +                                "-      decrease verbosity\n"
 +                                "c      Send command to filtergraph\n"
 +                                "D      cycle through available debug modes\n"
 +                                "h      dump packets/hex press to cycle through the 3 states\n"
 +                                "q      quit\n"
 +                                "s      Show QP histogram\n"
 +                );
 +            }
 +        }
  
-         /* select the stream that we must read now by looking at the
-            smallest output pts */
-         file_index = -1;
+         /* check if there's any stream where output is still needed */
          for (i = 0; i < nb_output_streams; i++) {
              OutputFile *of;
-             int64_t ipts;
-             double  opts;
-             ost = &output_streams[i];
-             of = &output_files[ost->file_index];
-             os = output_files[ost->file_index].ctx;
-             ist = &input_streams[ost->source_index];
-             if (ost->is_past_recording_time || no_packet[ist->file_index] ||
+             ost = output_streams[i];
+             of  = output_files[ost->file_index];
+             os  = output_files[ost->file_index]->ctx;
+             if (ost->is_past_recording_time ||
                  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
                  continue;
-             opts = ost->st->pts.val * av_q2d(ost->st->time_base);
+             if (ost->frame_number > ost->max_frames) {
+                 int j;
+                 for (j = 0; j < of->ctx->nb_streams; j++)
+                     output_streams[of->ost_index + j]->is_past_recording_time = 1;
+                 continue;
+             }
+             past_recording_time = 0;
+         }
+         if (past_recording_time)
+             break;
+         /* select the stream that we must read now by looking at the
+            smallest output pts */
+         file_index = -1;
+         for (i = 0; i < nb_input_streams; i++) {
+             int64_t ipts;
+             ist = input_streams[i];
 -            ipts = ist->last_dts;
 +            ipts = ist->pts;
-             if (!input_files[ist->file_index].eof_reached) {
+             if (ist->discard || no_packet[ist->file_index])
+                 continue;
+             if (!input_files[ist->file_index]->eof_reached) {
                  if (ipts < ipts_min) {
                      ipts_min = ipts;
-                     if (input_sync)
-                         file_index = ist->file_index;
+                     file_index = ist->file_index;
                  }
-                 if (opts < opts_min) {
-                     opts_min = opts;
-                     if (!input_sync) file_index = ist->file_index;
-                 }
-             }
-             if (ost->frame_number >= ost->max_frames) {
-                 int j;
-                 for (j = 0; j < of->ctx->nb_streams; j++)
-                     output_streams[of->ost_index + j].is_past_recording_time = 1;
-                 continue;
              }
          }
          /* if none, if is finished */
          if (pkt.dts != AV_NOPTS_VALUE)
              pkt.dts *= ist->ts_scale;
  
 -        //fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n",
 -        //        ist->next_dts,
 -        //        pkt.dts, input_files[ist->file_index].ts_offset,
 -        //        ist->st->codec->codec_type);
 -        if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE
 -            && (is->iformat->flags & AVFMT_TS_DISCONT)) {
 +        if (debug_ts) {
 +            av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
 +                    "next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%"PRId64"\n",
 +                    ist_index, av_get_media_type_string(ist->st->codec->codec_type),
 +                    av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &ist->st->time_base),
 +                    av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
 +                    av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
-                     input_files[ist->file_index].ts_offset);
++                    input_files[ist->file_index]->ts_offset);
 +        }
 +
 +        if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE && !copy_ts) {
              int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
              int64_t delta   = pkt_dts - ist->next_dts;
 -            if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
 +            if (is->iformat->flags & AVFMT_TS_DISCONT) {
 +            if(delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
 +                (delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
 +                 ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE) ||
 +                pkt_dts+1<ist->pts){
-                 input_files[ist->file_index].ts_offset -= delta;
+                 input_files[ist->file_index]->ts_offset -= delta;
                  av_log(NULL, AV_LOG_DEBUG,
                         "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
-                        delta, input_files[ist->file_index].ts_offset);
+                        delta, input_files[ist->file_index]->ts_offset);
                  pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
                  if (pkt.pts != AV_NOPTS_VALUE)
                      pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
          av_free_packet(&pkt);
  
          /* dump report by using the output first video and audio streams */
-         print_report(output_files, output_streams, nb_output_streams, 0, timer_start, cur_time);
 -        print_report(0, timer_start);
++        print_report(0, timer_start, cur_time);
      }
  
      /* at the end of stream, we must flush the decoder buffers */
      }
  
      /* dump report by using the first video and audio streams */
-     print_report(output_files, output_streams, nb_output_streams, 1, timer_start, av_gettime());
 -    print_report(1, timer_start);
++    print_report(1, timer_start, av_gettime());
  
      /* close each encoder */
      for (i = 0; i < nb_output_streams; i++) {
@@@ -3381,66 -3336,6 +3791,66 @@@ static int opt_attach(OptionsContext *o
      return 0;
  }
  
-         m->stream_idx >= input_files[m->file_idx].nb_streams) {
 +static int opt_map_channel(OptionsContext *o, const char *opt, const char *arg)
 +{
 +    int n;
 +    AVStream *st;
 +    AudioChannelMap *m;
 +
 +    o->audio_channel_maps =
 +        grow_array(o->audio_channel_maps, sizeof(*o->audio_channel_maps),
 +                   &o->nb_audio_channel_maps, o->nb_audio_channel_maps + 1);
 +    m = &o->audio_channel_maps[o->nb_audio_channel_maps - 1];
 +
 +    /* muted channel syntax */
 +    n = sscanf(arg, "%d:%d.%d", &m->channel_idx, &m->ofile_idx, &m->ostream_idx);
 +    if ((n == 1 || n == 3) && m->channel_idx == -1) {
 +        m->file_idx = m->stream_idx = -1;
 +        if (n == 1)
 +            m->ofile_idx = m->ostream_idx = -1;
 +        return 0;
 +    }
 +
 +    /* normal syntax */
 +    n = sscanf(arg, "%d.%d.%d:%d.%d",
 +               &m->file_idx,  &m->stream_idx, &m->channel_idx,
 +               &m->ofile_idx, &m->ostream_idx);
 +
 +    if (n != 3 && n != 5) {
 +        av_log(NULL, AV_LOG_FATAL, "Syntax error, mapchan usage: "
 +               "[file.stream.channel|-1][:syncfile:syncstream]\n");
 +        exit_program(1);
 +    }
 +
 +    if (n != 5) // only file.stream.channel specified
 +        m->ofile_idx = m->ostream_idx = -1;
 +
 +    /* check input */
 +    if (m->file_idx < 0 || m->file_idx >= nb_input_files) {
 +        av_log(NULL, AV_LOG_FATAL, "mapchan: invalid input file index: %d\n",
 +               m->file_idx);
 +        exit_program(1);
 +    }
 +    if (m->stream_idx < 0 ||
-     st = input_files[m->file_idx].ctx->streams[m->stream_idx];
++        m->stream_idx >= input_files[m->file_idx]->nb_streams) {
 +        av_log(NULL, AV_LOG_FATAL, "mapchan: invalid input file stream index #%d.%d\n",
 +               m->file_idx, m->stream_idx);
 +        exit_program(1);
 +    }
++    st = input_files[m->file_idx]->ctx->streams[m->stream_idx];
 +    if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
 +        av_log(NULL, AV_LOG_FATAL, "mapchan: stream #%d.%d is not an audio stream.\n",
 +               m->file_idx, m->stream_idx);
 +        exit_program(1);
 +    }
 +    if (m->channel_idx < 0 || m->channel_idx >= st->codec->channels) {
 +        av_log(NULL, AV_LOG_FATAL, "mapchan: invalid audio channel #%d.%d.%d\n",
 +               m->file_idx, m->stream_idx, m->channel_idx);
 +        exit_program(1);
 +    }
 +    return 0;
 +}
 +
  /**
   * Parse a metadata specifier in arg.
   * @param type metadata type is written here -- g(lobal)/s(tream)/c(hapter)/p(rogram)
@@@ -3639,12 -3506,16 +4053,16 @@@ static void add_input_streams(OptionsCo
  
          switch (dec->codec_type) {
          case AVMEDIA_TYPE_VIDEO:
 +            if(!ist->dec)
 +                ist->dec = avcodec_find_decoder(dec->codec_id);
              if (dec->lowres) {
                  dec->flags |= CODEC_FLAG_EMU_EDGE;
 -                dec->height >>= dec->lowres;
 -                dec->width  >>= dec->lowres;
              }
  
+             ist->resample_height  = dec->height;
+             ist->resample_width   = dec->width;
+             ist->resample_pix_fmt = dec->pix_fmt;
              break;
          case AVMEDIA_TYPE_AUDIO:
          case AVMEDIA_TYPE_DATA:
@@@ -4023,16 -3891,9 +4447,18 @@@ static OutputStream *new_output_stream(
          st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
  
      av_opt_get_int(sws_opts, "sws_flags", 0, &ost->sws_flags);
-         ost->sync_ist = &input_streams[source_index];
-         input_streams[source_index].discard = 0;
-         input_streams[source_index].st->discard = AVDISCARD_NONE;
 +    av_opt_get_int   (swr_opts, "dither_method", 0, &ost->swr_dither_method);
 +    av_opt_get_double(swr_opts, "dither_scale" , 0, &ost->swr_dither_scale);
 +
 +    ost->source_index = source_index;
 +    if (source_index >= 0) {
++        ost->sync_ist = input_streams[source_index];
++        input_streams[source_index]->discard = 0;
++        input_streams[source_index]->st->discard = AVDISCARD_NONE;
 +    }
  
+     ost->pix_fmts[0] = ost->pix_fmts[1] = PIX_FMT_NONE;
      return ost;
  }
  
@@@ -4208,24 -4053,6 +4634,24 @@@ static OutputStream *new_audio_stream(O
          }
  
          MATCH_PER_STREAM_OPT(audio_sample_rate, i, audio_enc->sample_rate, oc, st);
-         InputStream *ist = &input_streams[ost->source_index];
 +
 +        ost->rematrix_volume=1.0;
 +        MATCH_PER_STREAM_OPT(rematrix_volume, f, ost->rematrix_volume, oc, st);
 +    }
 +
 +    /* check for channel mapping for this audio stream */
 +    for (n = 0; n < o->nb_audio_channel_maps; n++) {
 +        AudioChannelMap *map = &o->audio_channel_maps[n];
++        InputStream *ist = input_streams[ost->source_index];
 +        if ((map->channel_idx == -1 || (ist->file_index == map->file_idx && ist->st->index == map->stream_idx)) &&
 +            (map->ofile_idx   == -1 || ost->file_index == map->ofile_idx) &&
 +            (map->ostream_idx == -1 || ost->st->index  == map->ostream_idx)) {
 +            if (ost->audio_channels_mapped < FF_ARRAY_ELEMS(ost->audio_channels_map))
 +                ost->audio_channels_map[ost->audio_channels_mapped++] = map->channel_idx;
 +            else
 +                av_log(NULL, AV_LOG_FATAL, "Max channel mapping for output %d.%d reached\n",
 +                       ost->file_index, ost->st->index);
 +        }
      }
  
      return ost;
@@@ -4330,46 -4155,29 +4756,69 @@@ static int copy_chapters(InputFile *ifi
      return 0;
  }
  
 -    ost               = new_video_stream(o, oc);
 +static int read_ffserver_streams(OptionsContext *o, AVFormatContext *s, const char *filename)
 +{
 +    int i, err;
 +    AVFormatContext *ic = avformat_alloc_context();
 +
 +    ic->interrupt_callback = int_cb;
 +    err = avformat_open_input(&ic, filename, NULL, NULL);
 +    if (err < 0)
 +        return err;
 +    /* copy stream format */
 +    for(i=0;i<ic->nb_streams;i++) {
 +        AVStream *st;
 +        OutputStream *ost;
 +        AVCodec *codec;
 +        AVCodecContext *avctx;
 +
 +        codec = avcodec_find_encoder(ic->streams[i]->codec->codec_id);
 +        ost   = new_output_stream(o, s, codec->type, -1);
 +        st    = ost->st;
 +        avctx = st->codec;
 +        ost->enc = codec;
 +
 +        // FIXME: a more elegant solution is needed
 +        memcpy(st, ic->streams[i], sizeof(AVStream));
 +        st->cur_dts = 0;
 +        st->info = av_malloc(sizeof(*st->info));
 +        memcpy(st->info, ic->streams[i]->info, sizeof(*st->info));
 +        st->codec= avctx;
 +        avcodec_copy_context(st->codec, ic->streams[i]->codec);
 +
 +        if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && !ost->stream_copy)
 +            choose_sample_fmt(st, codec);
 +        else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && !ost->stream_copy)
 +            choose_pixel_fmt(st, codec);
 +    }
 +
 +    avformat_close_input(&ic);
 +    return 0;
 +}
 +
+ static void init_output_filter(OutputFilter *ofilter, OptionsContext *o,
+                                AVFormatContext *oc)
+ {
+     OutputStream *ost;
+     if (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type != AVMEDIA_TYPE_VIDEO) {
+         av_log(NULL, AV_LOG_FATAL, "Only video filters are supported currently.\n");
+         exit_program(1);
+     }
++    ost               = new_video_stream(o, oc, -1);
+     ost->source_index = -1;
+     ost->filter       = ofilter;
+     ofilter->ost      = ost;
+     if (configure_output_filter(ofilter->graph, ofilter, ofilter->out_tmp) < 0) {
+         av_log(NULL, AV_LOG_FATAL, "Error configuring filter.\n");
+         exit_program(1);
+     }
+     avfilter_inout_free(&ofilter->out_tmp);
+ }
  static void opt_output_file(void *optctx, const char *filename)
  {
      OptionsContext *o = optctx;
      if (!strcmp(filename, "-"))
          filename = "pipe:";
  
 -    oc = avformat_alloc_context();
 +    err = avformat_alloc_output_context2(&oc, NULL, o->format, filename);
      if (!oc) {
 -        print_error(filename, AVERROR(ENOMEM));
 +        print_error(filename, err);
          exit_program(1);
      }
 -
 -    if (o->format) {
 -        file_oformat = av_guess_format(o->format, NULL, NULL);
 -        if (!file_oformat) {
 -            av_log(NULL, AV_LOG_FATAL, "Requested output format '%s' is not a suitable output format\n", o->format);
 -            exit_program(1);
 -        }
 -    } else {
 -        file_oformat = av_guess_format(NULL, filename, NULL);
 -        if (!file_oformat) {
 -            av_log(NULL, AV_LOG_FATAL, "Unable to find a suitable output format for '%s'\n",
 -                   filename);
 -            exit_program(1);
 -        }
 -    }
 -
 -    oc->oformat = file_oformat;
 +    file_oformat= oc->oformat;
      oc->interrupt_callback = int_cb;
 -    av_strlcpy(oc->filename, filename, sizeof(oc->filename));
  
 -    if (!o->nb_stream_maps) {
 -        /* pick the "best" stream of each type */
 -#define NEW_STREAM(type, index)\
 -        if (index >= 0) {\
 -            ost = new_ ## type ## _stream(o, oc);\
 -            ost->source_index = index;\
 -            ost->sync_ist     = input_streams[index];\
 -            input_streams[index]->discard = 0;\
 -            input_streams[index]->st->discard = AVDISCARD_NONE;\
+     /* create streams for all unlabeled output pads */
+     for (i = 0; i < nb_filtergraphs; i++) {
+         FilterGraph *fg = filtergraphs[i];
+         for (j = 0; j < fg->nb_outputs; j++) {
+             OutputFilter *ofilter = fg->outputs[j];
+             if (!ofilter->out_tmp || ofilter->out_tmp->name)
+                 continue;
+             switch (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type) {
+             case AVMEDIA_TYPE_VIDEO:    o->video_disable    = 1; break;
+             case AVMEDIA_TYPE_AUDIO:    o->audio_disable    = 1; break;
+             case AVMEDIA_TYPE_SUBTITLE: o->subtitle_disable = 1; break;
+             }
+             init_output_filter(ofilter, o, oc);
+         }
+     }
 +    if (!strcmp(file_oformat->name, "ffm") &&
 +        av_strstart(filename, "http:", NULL)) {
 +        int j;
 +        /* special case for files sent to ffserver: we get the stream
 +           parameters from ffserver */
 +        int err = read_ffserver_streams(o, oc, filename);
 +        if (err < 0) {
 +            print_error(filename, err);
 +            exit_program(1);
          }
-             ost = &output_streams[j];
 +        for(j = nb_output_streams - oc->nb_streams; j < nb_output_streams; j++) {
-                 ist = &input_streams[i];
++            ost = output_streams[j];
 +            for (i = 0; i < nb_input_streams; i++) {
++                ist = input_streams[i];
 +                if(ist->st->codec->codec_type == ost->st->codec->codec_type){
 +                    ost->sync_ist= ist;
 +                    ost->source_index= i;
 +                    ist->discard = 0;
 +                    ist->st->discard = AVDISCARD_NONE;
 +                    break;
 +                }
 +            }
 +            if(!ost->sync_ist){
 +                av_log(NULL, AV_LOG_FATAL, "Missing %s stream which is required by this ffm\n", av_get_media_type_string(ost->st->codec->codec_type));
 +                exit_program(1);
 +            }
 +        }
 +    } else if (!o->nb_stream_maps) {
 +        /* pick the "best" stream of each type */
  
          /* video: highest resolution */
          if (!o->video_disable && oc->oformat->video_codec != CODEC_ID_NONE) {
          }
  
          /* subtitles: pick first */
 -        if (!o->subtitle_disable && oc->oformat->subtitle_codec != CODEC_ID_NONE) {
 +        if (!o->subtitle_disable && (oc->oformat->subtitle_codec != CODEC_ID_NONE || subtitle_codec_name)) {
              for (i = 0; i < nb_input_streams; i++)
-                 if (input_streams[i].st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
+                 if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
 -                    NEW_STREAM(subtitle, i);
 +                    new_subtitle_stream(o, oc, i);
                      break;
                  }
          }
      } else {
          for (i = 0; i < o->nb_stream_maps; i++) {
              StreamMap *map = &o->stream_maps[i];
-             int src_idx = input_files[map->file_index].ist_index + map->stream_index;
++            int src_idx = input_files[map->file_index]->ist_index + map->stream_index;
  
              if (map->disabled)
                  continue;
  
-             ist = &input_streams[input_files[map->file_index].ist_index + map->stream_index];
+             if (map->linklabel) {
+                 FilterGraph *fg;
+                 OutputFilter *ofilter = NULL;
+                 int j, k;
+                 for (j = 0; j < nb_filtergraphs; j++) {
+                     fg = filtergraphs[j];
+                     for (k = 0; k < fg->nb_outputs; k++) {
+                         AVFilterInOut *out = fg->outputs[k]->out_tmp;
+                         if (out && !strcmp(out->name, map->linklabel)) {
+                             ofilter = fg->outputs[k];
+                             goto loop_end;
+                         }
+                     }
+                 }
+ loop_end:
+                 if (!ofilter) {
+                     av_log(NULL, AV_LOG_FATAL, "Output with label '%s' does not exist "
+                            "in any defined filter graph.\n", map->linklabel);
+                     exit_program(1);
+                 }
+                 init_output_filter(ofilter, o, oc);
+             } else {
+             ist = input_streams[input_files[map->file_index]->ist_index + map->stream_index];
 +            if(o->subtitle_disable && ist->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE)
 +                continue;
 +            if(o->   audio_disable && ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
 +                continue;
 +            if(o->   video_disable && ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
 +                continue;
 +            if(o->    data_disable && ist->st->codec->codec_type == AVMEDIA_TYPE_DATA)
 +                continue;
 +
              switch (ist->st->codec->codec_type) {
 -            case AVMEDIA_TYPE_VIDEO:    ost = new_video_stream(o, oc);    break;
 -            case AVMEDIA_TYPE_AUDIO:    ost = new_audio_stream(o, oc);    break;
 -            case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream(o, oc); break;
 -            case AVMEDIA_TYPE_DATA:     ost = new_data_stream(o, oc);     break;
 -            case AVMEDIA_TYPE_ATTACHMENT: ost = new_attachment_stream(o, oc); break;
 +            case AVMEDIA_TYPE_VIDEO:    ost = new_video_stream(o, oc, src_idx);    break;
 +            case AVMEDIA_TYPE_AUDIO:    ost = new_audio_stream(o, oc, src_idx);    break;
 +            case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream(o, oc, src_idx); break;
 +            case AVMEDIA_TYPE_DATA:     ost = new_data_stream(o, oc, src_idx);     break;
 +            case AVMEDIA_TYPE_ATTACHMENT: ost = new_attachment_stream(o, oc, src_idx); break;
              default:
                  av_log(NULL, AV_LOG_FATAL, "Cannot map stream #%d:%d - unsupported type.\n",
                         map->file_index, map->stream_index);
                  exit_program(1);
              }
 -            ost->source_index = input_files[map->file_index]->ist_index + map->stream_index;
 -            ost->sync_ist     = input_streams[input_files[map->sync_file_index]->ist_index +
 -                                           map->sync_stream_index];
 -            ist->discard = 0;
 -            ist->st->discard = AVDISCARD_NONE;
+             }
          }
      }
  
-         ost = &output_streams[i];
 +
 +    for (i = nb_output_streams - oc->nb_streams; i < nb_output_streams; i++) { //for all streams of this output file
 +        AVDictionaryEntry *e;
++        ost = output_streams[i];
 +
 +        if (   ost->stream_copy
 +            && (e = av_dict_get(codec_opts, "flags", NULL, AV_DICT_IGNORE_SUFFIX))
 +            && (!e->key[5] || check_stream_specifier(oc, ost->st, e->key+6)))
 +            if (av_opt_set(ost->st->codec, "flags", e->value, 0) < 0)
 +                exit_program(1);
 +    }
 +
      /* handle attached files */
      for (i = 0; i < o->nb_attachments; i++) {
          AVIOContext *pb;
      if (o->mux_preload) {
          uint8_t buf[64];
          snprintf(buf, sizeof(buf), "%d", (int)(o->mux_preload*AV_TIME_BASE));
-         av_dict_set(&output_files[nb_output_files - 1].opts, "preload", buf, 0);
+         av_dict_set(&output_files[nb_output_files - 1]->opts, "preload", buf, 0);
      }
      oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE);
 -    oc->flags |= AVFMT_FLAG_NONBLOCK;
  
      /* copy metadata */
      for (i = 0; i < o->nb_metadata_map; i++) {
              av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d while processing metadata maps\n", in_file_index);
              exit_program(1);
          }
-         copy_metadata(o->metadata_map[i].specifier, *p ? p + 1 : p, oc, in_file_index >= 0 ? input_files[in_file_index].ctx : NULL, o);
 -        copy_metadata(o->metadata_map[i].specifier, *p ? p + 1 : p, oc, input_files[in_file_index]->ctx, o);
++        copy_metadata(o->metadata_map[i].specifier, *p ? p + 1 : p, oc, in_file_index >= 0 ? input_files[in_file_index]->ctx : NULL, o);
      }
  
      /* copy chapters */
                        !o->metadata_chapters_manual);
  
      /* copy global metadata by default */
 -    if (!o->metadata_global_manual && nb_input_files)
 +    if (!o->metadata_global_manual && nb_input_files){
-         av_dict_copy(&oc->metadata, input_files[0].ctx->metadata,
+         av_dict_copy(&oc->metadata, input_files[0]->ctx->metadata,
                       AV_DICT_DONT_OVERWRITE);
 +        if(o->recording_time != INT64_MAX)
 +            av_dict_set(&oc->metadata, "duration", NULL, 0);
 +    }
      if (!o->metadata_streams_manual)
-         for (i = output_files[nb_output_files - 1].ost_index; i < nb_output_streams; i++) {
+         for (i = output_files[nb_output_files - 1]->ost_index; i < nb_output_streams; i++) {
              InputStream *ist;
-             if (output_streams[i].source_index < 0)         /* this is true e.g. for attached files */
+             if (output_streams[i]->source_index < 0)         /* this is true e.g. for attached files */
                  continue;
-             ist = &input_streams[output_streams[i].source_index];
-             av_dict_copy(&output_streams[i].st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE);
+             ist = input_streams[output_streams[i]->source_index];
+             av_dict_copy(&output_streams[i]->st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE);
          }
  
      /* process manually set metadata */
@@@ -5128,9 -4956,9 +5639,10 @@@ static const OptionDef options[] = 
      { "frames", OPT_INT64 | HAS_ARG | OPT_SPEC, {.off = OFFSET(max_frames)}, "set the number of frames to record", "number" },
      { "tag",   OPT_STRING | HAS_ARG | OPT_SPEC, {.off = OFFSET(codec_tags)}, "force codec tag/fourcc", "fourcc/tag" },
      { "q", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
 -    { "qscale", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
 +    { "qscale", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_qscale}, "use fixed quality scale (VBR)", "q" },
 +    { "profile", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_profile}, "set profile", "profile" },
      { "filter", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(filters)}, "set stream filterchain", "filter_list" },
+     { "filter_complex", HAS_ARG | OPT_EXPERT, {(void*)opt_filter_complex}, "create a complex filtergraph", "graph_description" },
      { "stats", OPT_BOOL, {&print_stats}, "print progress report during encoding", },
      { "attach", HAS_ARG | OPT_FUNC2, {(void*)opt_attach}, "add an attachment to the output file", "filename" },
      { "dump_attachment", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(dump_attachment)}, "extract an attachment into a file", "filename" },
@@@ -5273,8 -5068,8 +5785,8 @@@ int main(int argc, char **argv
          exit_program(1);
      }
  
 -    ti = getutime();
 +    current_time = ti = getutime();
-     if (transcode(output_files, nb_output_files, input_files, nb_input_files) < 0)
+     if (transcode() < 0)
          exit_program(1);
      ti = getutime() - ti;
      if (do_benchmark) {
diff --cc ffplay.c
+++ b/ffplay.c
@@@ -1740,10 -1701,10 +1740,10 @@@ static AVFilter input_filter 
  
  static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
  {
+     static const enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
      char sws_flags_str[128];
      int ret;
-     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
 -    SinkContext sink_ctx = { .pix_fmts = pix_fmts };
 +    AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
      AVFilterContext *filt_src = NULL, *filt_out = NULL;
      snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
      graph->scale_sws_opts = av_strdup(sws_flags_str);
Simple merge
index 926362b,0000000..8eb695e
mode 100644,000000..100644
--- /dev/null
@@@ -1,298 -1,0 +1,302 @@@
-         av_log(ctx, AV_LOG_ERROR,
 +/*
 + * Copyright (c) 2011 Stefano Sabatini
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +/**
 + * @file
 + * buffer video sink
 + */
 +
 +#include "libavutil/fifo.h"
 +#include "avfilter.h"
 +#include "buffersink.h"
 +#include "internal.h"
 +
 +AVBufferSinkParams *av_buffersink_params_alloc(void)
 +{
 +    static const int pixel_fmts[] = { -1 };
 +    AVBufferSinkParams *params = av_malloc(sizeof(AVBufferSinkParams));
 +    if (!params)
 +        return NULL;
 +
 +    params->pixel_fmts = pixel_fmts;
 +    return params;
 +}
 +
 +AVABufferSinkParams *av_abuffersink_params_alloc(void)
 +{
 +    static const int sample_fmts[] = { -1 };
 +    static const int packing_fmts[] = { -1 };
 +    static const int64_t channel_layouts[] = { -1 };
 +    AVABufferSinkParams *params = av_malloc(sizeof(AVABufferSinkParams));
 +
 +    if (!params)
 +        return NULL;
 +
 +    params->sample_fmts = sample_fmts;
 +    params->channel_layouts = channel_layouts;
 +    params->packing_fmts = packing_fmts;
 +    return params;
 +}
 +
 +typedef struct {
 +    AVFifoBuffer *fifo;                      ///< FIFO buffer of video frame references
 +
 +    /* only used for video */
 +    enum PixelFormat *pixel_fmts;           ///< list of accepted pixel formats, must be terminated with -1
 +
 +    /* only used for audio */
 +    enum AVSampleFormat *sample_fmts;       ///< list of accepted sample formats, terminated by AV_SAMPLE_FMT_NONE
 +    int64_t *channel_layouts;               ///< list of accepted channel layouts, terminated by -1
 +    int *packing_fmts;                      ///< list of accepted packing formats, terminated by -1
 +} BufferSinkContext;
 +
 +#define FIFO_INIT_SIZE 8
 +
 +static av_cold int common_init(AVFilterContext *ctx)
 +{
 +    BufferSinkContext *buf = ctx->priv;
 +
 +    buf->fifo = av_fifo_alloc(FIFO_INIT_SIZE*sizeof(AVFilterBufferRef *));
 +    if (!buf->fifo) {
 +        av_log(ctx, AV_LOG_ERROR, "Failed to allocate fifo\n");
 +        return AVERROR(ENOMEM);
 +    }
 +    return 0;
 +}
 +
 +static av_cold void common_uninit(AVFilterContext *ctx)
 +{
 +    BufferSinkContext *buf = ctx->priv;
 +    AVFilterBufferRef *picref;
 +
 +    if (buf->fifo) {
 +        while (av_fifo_size(buf->fifo) >= sizeof(AVFilterBufferRef *)) {
 +            av_fifo_generic_read(buf->fifo, &picref, sizeof(picref), NULL);
 +            avfilter_unref_buffer(picref);
 +        }
 +        av_fifo_free(buf->fifo);
 +        buf->fifo = NULL;
 +    }
 +}
 +
 +static void end_frame(AVFilterLink *inlink)
 +{
 +    AVFilterContext *ctx = inlink->dst;
 +    BufferSinkContext *buf = inlink->dst->priv;
 +
 +    if (av_fifo_space(buf->fifo) < sizeof(AVFilterBufferRef *)) {
 +        /* realloc fifo size */
 +        if (av_fifo_realloc2(buf->fifo, av_fifo_size(buf->fifo) * 2) < 0) {
 +            av_log(ctx, AV_LOG_ERROR,
 +                   "Cannot buffer more frames. Consume some available frames "
 +                   "before adding new ones.\n");
 +            return;
 +        }
 +    }
 +
 +    /* cache frame */
 +    av_fifo_generic_write(buf->fifo,
 +                          &inlink->cur_buf, sizeof(AVFilterBufferRef *), NULL);
 +}
 +
 +int av_buffersink_get_buffer_ref(AVFilterContext *ctx,
 +                                  AVFilterBufferRef **bufref, int flags)
 +{
 +    BufferSinkContext *buf = ctx->priv;
 +    AVFilterLink *inlink = ctx->inputs[0];
 +    int ret;
 +    *bufref = NULL;
 +
 +    /* no picref available, fetch it from the filterchain */
 +    if (!av_fifo_size(buf->fifo)) {
 +        if ((ret = avfilter_request_frame(inlink)) < 0)
 +            return ret;
 +    }
 +
 +    if (!av_fifo_size(buf->fifo))
 +        return AVERROR(EINVAL);
 +
 +    if (flags & AV_BUFFERSINK_FLAG_PEEK)
 +        *bufref = *((AVFilterBufferRef **)av_fifo_peek2(buf->fifo, 0));
 +    else
 +        av_fifo_generic_read(buf->fifo, bufref, sizeof(*bufref), NULL);
 +
 +    return 0;
 +}
 +
 +int av_buffersink_poll_frame(AVFilterContext *ctx)
 +{
 +    BufferSinkContext *buf = ctx->priv;
 +    AVFilterLink *inlink = ctx->inputs[0];
 +
 +    return av_fifo_size(buf->fifo)/sizeof(AVFilterBufferRef *) + avfilter_poll_frame(inlink);
 +}
 +
 +#if FF_API_OLD_VSINK_API
 +int av_vsink_buffer_get_video_buffer_ref(AVFilterContext *ctx,
 +                                         AVFilterBufferRef **picref, int flags)
 +{
 +    return av_buffersink_get_buffer_ref(ctx, picref, flags);
 +}
 +#endif
 +
 +#if CONFIG_BUFFERSINK_FILTER
 +
 +static av_cold int vsink_init(AVFilterContext *ctx, const char *args, void *opaque)
 +{
 +    BufferSinkContext *buf = ctx->priv;
 +    av_unused AVBufferSinkParams *params;
 +
 +    if (!opaque) {
-         return AVERROR(EINVAL);
++        av_log(ctx, AV_LOG_WARNING,
 +               "No opaque field provided\n");
-     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(buf->pixel_fmts));
++        buf->pixel_fmts = NULL;
 +    } else {
 +#if FF_API_OLD_VSINK_API
 +        const int *pixel_fmts = (const enum PixelFormat *)opaque;
 +#else
 +        params = (AVBufferSinkParams *)opaque;
 +        const int *pixel_fmts = params->pixel_fmts;
 +#endif
 +        buf->pixel_fmts = ff_copy_int_list(pixel_fmts);
 +        if (!buf->pixel_fmts)
 +            return AVERROR(ENOMEM);
 +    }
 +
 +    return common_init(ctx);
 +}
 +
 +static av_cold void vsink_uninit(AVFilterContext *ctx)
 +{
 +    BufferSinkContext *buf = ctx->priv;
 +    av_freep(&buf->pixel_fmts);
 +    return common_uninit(ctx);
 +}
 +
 +static int vsink_query_formats(AVFilterContext *ctx)
 +{
 +    BufferSinkContext *buf = ctx->priv;
 +
++    if (buf->pixel_fmts)
++        avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(buf->pixel_fmts));
++    else
++        avfilter_default_query_formats(ctx);
++
 +    return 0;
 +}
 +
 +AVFilter avfilter_vsink_buffersink = {
 +    .name      = "buffersink",
 +    .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."),
 +    .priv_size = sizeof(BufferSinkContext),
 +    .init      = vsink_init,
 +    .uninit    = vsink_uninit,
 +
 +    .query_formats = vsink_query_formats,
 +
 +    .inputs    = (const AVFilterPad[]) {{ .name    = "default",
 +                                    .type          = AVMEDIA_TYPE_VIDEO,
 +                                    .end_frame     = end_frame,
 +                                    .min_perms     = AV_PERM_READ, },
 +                                  { .name = NULL }},
 +    .outputs   = (const AVFilterPad[]) {{ .name = NULL }},
 +};
 +
 +#endif /* CONFIG_BUFFERSINK_FILTER */
 +
 +#if CONFIG_ABUFFERSINK_FILTER
 +
 +static void filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
 +{
 +    end_frame(link);
 +}
 +
 +static av_cold int asink_init(AVFilterContext *ctx, const char *args, void *opaque)
 +{
 +    BufferSinkContext *buf = ctx->priv;
 +    AVABufferSinkParams *params;
 +
 +    if (!opaque) {
 +        av_log(ctx, AV_LOG_ERROR,
 +               "No opaque field provided, an AVABufferSinkParams struct is required\n");
 +        return AVERROR(EINVAL);
 +    } else
 +        params = (AVABufferSinkParams *)opaque;
 +
 +    buf->sample_fmts     = ff_copy_int_list  (params->sample_fmts);
 +    buf->channel_layouts = ff_copy_int64_list(params->channel_layouts);
 +    buf->packing_fmts    = ff_copy_int_list  (params->packing_fmts);
 +    if (!buf->sample_fmts || !buf->channel_layouts || !buf->sample_fmts) {
 +        av_freep(&buf->sample_fmts);
 +        av_freep(&buf->channel_layouts);
 +        av_freep(&buf->packing_fmts);
 +        return AVERROR(ENOMEM);
 +    }
 +
 +    return common_init(ctx);
 +}
 +
 +static av_cold void asink_uninit(AVFilterContext *ctx)
 +{
 +    BufferSinkContext *buf = ctx->priv;
 +
 +    av_freep(&buf->sample_fmts);
 +    av_freep(&buf->channel_layouts);
 +    av_freep(&buf->packing_fmts);
 +    return common_uninit(ctx);
 +}
 +
 +static int asink_query_formats(AVFilterContext *ctx)
 +{
 +    BufferSinkContext *buf = ctx->priv;
 +    AVFilterFormats *formats = NULL;
 +
 +    if (!(formats = avfilter_make_format_list(buf->sample_fmts)))
 +        return AVERROR(ENOMEM);
 +    avfilter_set_common_sample_formats(ctx, formats);
 +
 +    if (!(formats = avfilter_make_format64_list(buf->channel_layouts)))
 +        return AVERROR(ENOMEM);
 +    avfilter_set_common_channel_layouts(ctx, formats);
 +
 +    if (!(formats = avfilter_make_format_list(buf->packing_fmts)))
 +        return AVERROR(ENOMEM);
 +    avfilter_set_common_packing_formats(ctx, formats);
 +
 +    return 0;
 +}
 +
 +AVFilter avfilter_asink_abuffersink = {
 +    .name      = "abuffersink",
 +    .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."),
 +    .init      = asink_init,
 +    .uninit    = asink_uninit,
 +    .priv_size = sizeof(BufferSinkContext),
 +    .query_formats = asink_query_formats,
 +
 +    .inputs    = (const AVFilterPad[]) {{ .name     = "default",
 +                                    .type           = AVMEDIA_TYPE_AUDIO,
 +                                    .filter_samples = filter_samples,
 +                                    .min_perms      = AV_PERM_READ, },
 +                                  { .name = NULL }},
 +    .outputs   = (const AVFilterPad[]) {{ .name = NULL }},
 +};
 +
 +#endif /* CONFIG_ABUFFERSINK_FILTER */
@@@ -71,11 -70,6 +71,11 @@@ if [ -n "$do_mpeg2thread_ilace" ]; the
  # mpeg2 encoding interlaced using intra vlc
  do_video_encoding mpeg2threadivlc.mpg "-qscale 10 -vcodec mpeg2video -f mpeg1video -bf 2 -flags +ildct+ilme -intra_vlc 1 -threads 2 -slices 2"
  do_video_decoding
- file=${outfile}mpeg2reuse.mpg
- do_avconv $file $DEC_OPTS -me_threshold 256 -i ${target_path}/${outfile}mpeg2thread.mpg $ENC_OPTS -same_quant -me_threshold 256 -mb_threshold 1024 -vcodec mpeg2video -f mpeg1video -bf 2 -flags +ildct+ilme -threads 4
- do_video_decoding
 +
 +# mpeg2 encoding interlaced
++#file=${outfile}mpeg2reuse.mpg
++#do_avconv $file $DEC_OPTS -me_threshold 256 -i ${target_path}/${outfile}mpeg2thread.mpg $ENC_OPTS -same_quant -me_threshold 256 -mb_threshold 1024 -vcodec mpeg2video -f mpeg1video -bf 2 -flags +ildct+ilme -threads 4
++#do_video_decoding
  fi
  
  if [ -n "$do_msmpeg4v2" ] ; then