# first so "all" becomes default target
all: all-yes
- $(CC) $(CPPFLAGS) $(CFLAGS) -Wno-unused -c -o $@ -x c $<
+ifndef SUBDIR
+
+ifndef V
+Q = @
+ECHO = printf "$(1)\t%s\n" $(2)
+BRIEF = CC CXX AS YASM AR LD HOSTCC STRIP CP
+SILENT = DEPCC YASMDEP RM RANLIB
+MSG = $@
+M = @$(call ECHO,$(TAG),$@);
+$(foreach VAR,$(BRIEF), \
+ $(eval override $(VAR) = @$$(call ECHO,$(VAR),$$(MSG)); $($(VAR))))
+$(foreach VAR,$(SILENT),$(eval override $(VAR) = @$($(VAR))))
+$(eval INSTALL = @$(call ECHO,INSTALL,$$(^:$(SRC_DIR)/%=%)); $(INSTALL))
+endif
+
+ALLFFLIBS = avcodec avdevice avfilter avformat avresample avutil postproc swscale swresample
+
+# NASM requires -I path terminated with /
+IFLAGS := -I. -I$(SRC_PATH)/
+CPPFLAGS := $(IFLAGS) $(CPPFLAGS)
+CFLAGS += $(ECFLAGS)
+CCFLAGS = $(CFLAGS)
+CXXFLAGS := $(CFLAGS) $(CXXFLAGS)
+YASMFLAGS += $(IFLAGS) -I$(SRC_PATH)/libavutil/x86/ -Pconfig.asm
+HOSTCFLAGS += $(IFLAGS)
+LDFLAGS := $(ALLFFLIBS:%=-Llib%) $(LDFLAGS)
+
+define COMPILE
+ $($(1)DEP)
+ $($(1)) $(CPPFLAGS) $($(1)FLAGS) $($(1)_DEPFLAGS) -c $($(1)_O) $<
+endef
+
+COMPILE_C = $(call COMPILE,CC)
+COMPILE_CXX = $(call COMPILE,CXX)
+COMPILE_S = $(call COMPILE,AS)
+
+%.o: %.c
+ $(COMPILE_C)
+
+%.o: %.cpp
+ $(COMPILE_CXX)
+
+%.s: %.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) -S -o $@ $<
+
+%.o: %.S
+ $(COMPILE_S)
+
+%.ho: %.h
++ $(CC) $(CPPFLAGS) $(CFLAGS) -c -o $@ -x c $<
+
+%.ver: %.v
+ $(Q)sed 's/$$MAJOR/$($(basename $(@F))_VERSION_MAJOR)/' $^ > $@
+
+%.c %.h: TAG = GEN
+
+# Dummy rule to stop make trying to rebuild removed or renamed headers
+%.h:
+ @:
+
+# Disable suffix rules. Most of the builtin rules are suffix rules,
+# so this saves some time on slow systems.
+.SUFFIXES:
+
+# Do not delete intermediate files from chains of implicit rules
+$(OBJS):
+endif
+
include $(SRC_PATH)/arch.mak
OBJS += $(OBJS-yes)
*/
enum AVMediaType avfilter_pad_get_type(AVFilterPad *pads, int pad_idx);
- void avfilter_default_end_frame(AVFilterLink *link);
+/** default handler for end_frame() for video inputs */
+attribute_deprecated
++int avfilter_default_end_frame(AVFilterLink *link);
+
/**
* Filter definition. This defines the pads a filter contains, and all the
* callback functions used to interact with the filter.
{
BufferSinkContext *s = link->dst->priv;
- av_assert0(!s->cur_buf);
+// av_assert0(!s->cur_buf);
s->cur_buf = buf;
link->cur_buf = NULL;
- };
- static int filter_samples(AVFilterLink *link, AVFilterBufferRef *buf)
- {
- start_frame(link, buf);
return 0;
- }
+ };
int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf)
{
}
inlink->cur_buf = NULL;
- ff_start_frame(outlink, picref);
+ return ff_start_frame(outlink, picref);
}
+static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFilterBufferRef *outsamples = insamples;
+
+ if (av_cmp_q(inlink->time_base, outlink->time_base)) {
+ outsamples = avfilter_ref_buffer(insamples, ~0);
+ outsamples->pts = av_rescale_q(insamples->pts, inlink->time_base, outlink->time_base);
+ av_log(ctx, AV_LOG_DEBUG, "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
+ inlink ->time_base.num, inlink ->time_base.den, insamples ->pts,
+ outlink->time_base.num, outlink->time_base.den, outsamples->pts);
+ avfilter_unref_buffer(insamples);
+ }
+
+ return ff_filter_samples(outlink, outsamples);
+}
+
+#if CONFIG_SETTB_FILTER
AVFilter avfilter_vf_settb = {
.name = "settb",
- .description = NULL_IF_CONFIG_SMALL("Set timebase for the output link."),
+ .description = NULL_IF_CONFIG_SMALL("Set timebase for the video output link."),
+ .init = init,
+
+ .priv_size = sizeof(SetTBContext),
+
+ .inputs = (const AVFilterPad[]) {
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .get_video_buffer = ff_null_get_video_buffer,
+ .start_frame = start_frame,
+ .end_frame = ff_null_end_frame },
+ { .name = NULL }
+ },
+ .outputs = (const AVFilterPad[]) {
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output_props, },
+ { .name = NULL}
+ },
+};
+#endif
+
+#if CONFIG_ASETTB_FILTER
+AVFilter avfilter_af_asettb = {
+ .name = "asettb",
+ .description = NULL_IF_CONFIG_SMALL("Set timebase for the audio output link."),
.init = init,
.priv_size = sizeof(SetTBContext),
--- /dev/null
- static void end_frame(AVFilterLink *inlink)
+/*
+ * Copyright (c) 2011 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * buffer sink
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/fifo.h"
+#include "avfilter.h"
+#include "buffersink.h"
+#include "internal.h"
+
+AVBufferSinkParams *av_buffersink_params_alloc(void)
+{
+ static const int pixel_fmts[] = { -1 };
+ AVBufferSinkParams *params = av_malloc(sizeof(AVBufferSinkParams));
+ if (!params)
+ return NULL;
+
+ params->pixel_fmts = pixel_fmts;
+ return params;
+}
+
+AVABufferSinkParams *av_abuffersink_params_alloc(void)
+{
+ static const int sample_fmts[] = { -1 };
+ static const int64_t channel_layouts[] = { -1 };
+ AVABufferSinkParams *params = av_malloc(sizeof(AVABufferSinkParams));
+
+ if (!params)
+ return NULL;
+
+ params->sample_fmts = sample_fmts;
+ params->channel_layouts = channel_layouts;
+ return params;
+}
+
+typedef struct {
+ AVFifoBuffer *fifo; ///< FIFO buffer of video frame references
+ unsigned warning_limit;
+
+ /* only used for video */
+ enum PixelFormat *pixel_fmts; ///< list of accepted pixel formats, must be terminated with -1
+
+ /* only used for audio */
+ enum AVSampleFormat *sample_fmts; ///< list of accepted sample formats, terminated by AV_SAMPLE_FMT_NONE
+ int64_t *channel_layouts; ///< list of accepted channel layouts, terminated by -1
+} BufferSinkContext;
+
+#define FIFO_INIT_SIZE 8
+
+static av_cold int common_init(AVFilterContext *ctx)
+{
+ BufferSinkContext *buf = ctx->priv;
+
+ buf->fifo = av_fifo_alloc(FIFO_INIT_SIZE*sizeof(AVFilterBufferRef *));
+ if (!buf->fifo) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to allocate fifo\n");
+ return AVERROR(ENOMEM);
+ }
+ buf->warning_limit = 100;
+ return 0;
+}
+
+static av_cold void common_uninit(AVFilterContext *ctx)
+{
+ BufferSinkContext *buf = ctx->priv;
+ AVFilterBufferRef *picref;
+
+ if (buf->fifo) {
+ while (av_fifo_size(buf->fifo) >= sizeof(AVFilterBufferRef *)) {
+ av_fifo_generic_read(buf->fifo, &picref, sizeof(picref), NULL);
+ avfilter_unref_buffer(picref);
+ }
+ av_fifo_free(buf->fifo);
+ buf->fifo = NULL;
+ }
+}
+
- return;
++static int end_frame(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ BufferSinkContext *buf = inlink->dst->priv;
+
+ av_assert1(inlink->cur_buf);
+ if (av_fifo_space(buf->fifo) < sizeof(AVFilterBufferRef *)) {
+ /* realloc fifo size */
+ if (av_fifo_realloc2(buf->fifo, av_fifo_size(buf->fifo) * 2) < 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Cannot buffer more frames. Consume some available frames "
+ "before adding new ones.\n");
++ return AVERROR(ENOMEM);
+ }
+ }
+
+ /* cache frame */
+ av_fifo_generic_write(buf->fifo,
+ &inlink->cur_buf, sizeof(AVFilterBufferRef *), NULL);
+ inlink->cur_buf = NULL;
+ if (buf->warning_limit &&
+ av_fifo_size(buf->fifo) / sizeof(AVFilterBufferRef *) >= buf->warning_limit) {
+ av_log(ctx, AV_LOG_WARNING,
+ "%d buffers queued in %s, something may be wrong.\n",
+ buf->warning_limit,
+ (char *)av_x_if_null(ctx->name, ctx->filter->name));
+ buf->warning_limit *= 10;
+ }
++ return 0;
+}
+
+void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
+{
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ inlink->min_samples = inlink->max_samples =
+ inlink->partial_buf_size = frame_size;
+}
+
+int av_buffersink_get_buffer_ref(AVFilterContext *ctx,
+ AVFilterBufferRef **bufref, int flags)
+{
+ BufferSinkContext *buf = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ int ret;
+ *bufref = NULL;
+
+ av_assert0(!strcmp(ctx->filter->name, "buffersink") || !strcmp(ctx->filter->name, "abuffersink"));
+
+ /* no picref available, fetch it from the filterchain */
+ if (!av_fifo_size(buf->fifo)) {
+ if (flags & AV_BUFFERSINK_FLAG_NO_REQUEST)
+ return AVERROR(EAGAIN);
+ if ((ret = ff_request_frame(inlink)) < 0)
+ return ret;
+ }
+
+ if (!av_fifo_size(buf->fifo))
+ return AVERROR(EINVAL);
+
+ if (flags & AV_BUFFERSINK_FLAG_PEEK)
+ *bufref = *((AVFilterBufferRef **)av_fifo_peek2(buf->fifo, 0));
+ else
+ av_fifo_generic_read(buf->fifo, bufref, sizeof(*bufref), NULL);
+
+ return 0;
+}
+
+AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx)
+{
+ av_assert0(!strcmp(ctx->filter->name, "buffersink"));
+
+ return ctx->inputs[0]->frame_rate;
+}
+
+int av_buffersink_poll_frame(AVFilterContext *ctx)
+{
+ BufferSinkContext *buf = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ av_assert0(!strcmp(ctx->filter->name, "buffersink") || !strcmp(ctx->filter->name, "abuffersink"));
+
+ return av_fifo_size(buf->fifo)/sizeof(AVFilterBufferRef *) + ff_poll_frame(inlink);
+}
+
+#if CONFIG_BUFFERSINK_FILTER
+
+static av_cold int vsink_init(AVFilterContext *ctx, const char *args, void *opaque)
+{
+ BufferSinkContext *buf = ctx->priv;
+ AVBufferSinkParams *params = opaque;
+
+ if (params && buf->pixel_fmts) {
+ const int *pixel_fmts = params->pixel_fmts;
+
+ buf->pixel_fmts = ff_copy_int_list(pixel_fmts);
+ if (!buf->pixel_fmts)
+ return AVERROR(ENOMEM);
+ }
+
+ return common_init(ctx);
+}
+
+static av_cold void vsink_uninit(AVFilterContext *ctx)
+{
+ BufferSinkContext *buf = ctx->priv;
+ av_freep(&buf->pixel_fmts);
+ common_uninit(ctx);
+}
+
+static int vsink_query_formats(AVFilterContext *ctx)
+{
+ BufferSinkContext *buf = ctx->priv;
+
+ if (buf->pixel_fmts)
+ ff_set_common_formats(ctx, ff_make_format_list(buf->pixel_fmts));
+ else
+ ff_default_query_formats(ctx);
+
+ return 0;
+}
+
+AVFilter avfilter_vsink_buffersink = {
+ .name = "buffersink",
+ .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."),
+ .priv_size = sizeof(BufferSinkContext),
+ .init_opaque = vsink_init,
+ .uninit = vsink_uninit,
+
+ .query_formats = vsink_query_formats,
+
+ .inputs = (const AVFilterPad[]) {{ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .end_frame = end_frame,
+ .min_perms = AV_PERM_READ, },
+ { .name = NULL }},
+ .outputs = (const AVFilterPad[]) {{ .name = NULL }},
+};
+
+#endif /* CONFIG_BUFFERSINK_FILTER */
+
+#if CONFIG_ABUFFERSINK_FILTER
+
+static int filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
+{
+ end_frame(link);
+ return 0;
+}
+
+static av_cold int asink_init(AVFilterContext *ctx, const char *args, void *opaque)
+{
+ BufferSinkContext *buf = ctx->priv;
+ AVABufferSinkParams *params = opaque;
+
+ if (params && params->sample_fmts) {
+ buf->sample_fmts = ff_copy_int_list (params->sample_fmts);
+ if (!buf->sample_fmts)
+ goto fail_enomem;
+ }
+ if (params && params->channel_layouts) {
+ buf->channel_layouts = ff_copy_int64_list(params->channel_layouts);
+ if (!buf->channel_layouts)
+ goto fail_enomem;
+ }
+ if (!common_init(ctx))
+ return 0;
+
+fail_enomem:
+ av_freep(&buf->sample_fmts);
+ av_freep(&buf->channel_layouts);
+ return AVERROR(ENOMEM);
+}
+
+static av_cold void asink_uninit(AVFilterContext *ctx)
+{
+ BufferSinkContext *buf = ctx->priv;
+
+ av_freep(&buf->sample_fmts);
+ av_freep(&buf->channel_layouts);
+ common_uninit(ctx);
+}
+
+static int asink_query_formats(AVFilterContext *ctx)
+{
+ BufferSinkContext *buf = ctx->priv;
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts = NULL;
+
+ if (buf->sample_fmts) {
+ if (!(formats = ff_make_format_list(buf->sample_fmts)))
+ return AVERROR(ENOMEM);
+ ff_set_common_formats(ctx, formats);
+ }
+
+ if (buf->channel_layouts) {
+ if (!(layouts = avfilter_make_format64_list(buf->channel_layouts)))
+ return AVERROR(ENOMEM);
+ ff_set_common_channel_layouts(ctx, layouts);
+ }
+
+ return 0;
+}
+
+AVFilter avfilter_asink_abuffersink = {
+ .name = "abuffersink",
+ .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."),
+ .init_opaque = asink_init,
+ .uninit = asink_uninit,
+ .priv_size = sizeof(BufferSinkContext),
+ .query_formats = asink_query_formats,
+
+ .inputs = (const AVFilterPad[]) {{ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_samples = filter_samples,
+ .min_perms = AV_PERM_READ, },
+ { .name = NULL }},
+ .outputs = (const AVFilterPad[]) {{ .name = NULL }},
+};
+
+#endif /* CONFIG_ABUFFERSINK_FILTER */
--- /dev/null
- static void draw_slice(AVFilterLink *inlink, int y0, int h, int slice_dir)
+/*
+ * Copyright (c) 2012 Steven Robertson
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * simple channel-swapping filter to get at the alpha component
+ */
+
+#include <string.h>
+
+#include "libavutil/pixfmt.h"
+#include "avfilter.h"
+#include "drawutils.h"
+#include "formats.h"
+#include "video.h"
+
+enum { Y, U, V, A };
+
+typedef struct {
+ int is_packed_rgb;
+ uint8_t rgba_map[4];
+} AlphaExtractContext;
+
+static int query_formats(AVFilterContext *ctx)
+{
+ enum PixelFormat in_fmts[] = {
+ PIX_FMT_YUVA444P, PIX_FMT_YUVA422P, PIX_FMT_YUVA420P,
+ PIX_FMT_RGBA, PIX_FMT_BGRA, PIX_FMT_ARGB, PIX_FMT_ABGR,
+ PIX_FMT_NONE
+ };
+ enum PixelFormat out_fmts[] = { PIX_FMT_GRAY8, PIX_FMT_NONE };
+ ff_formats_ref(ff_make_format_list(in_fmts), &ctx->inputs[0]->out_formats);
+ ff_formats_ref(ff_make_format_list(out_fmts), &ctx->outputs[0]->in_formats);
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AlphaExtractContext *extract = inlink->dst->priv;
+ extract->is_packed_rgb =
+ ff_fill_rgba_map(extract->rgba_map, inlink->format) >= 0;
+ return 0;
+}
+
- ff_draw_slice(inlink->dst->outputs[0], y0, h, slice_dir);
++static int draw_slice(AVFilterLink *inlink, int y0, int h, int slice_dir)
+{
+ AlphaExtractContext *extract = inlink->dst->priv;
+ AVFilterBufferRef *cur_buf = inlink->cur_buf;
+ AVFilterBufferRef *out_buf = inlink->dst->outputs[0]->out_buf;
+
+ if (extract->is_packed_rgb) {
+ int x, y;
+ uint8_t *pin, *pout;
+ for (y = y0; y < (y0 + h); y++) {
+ pin = cur_buf->data[0] + y * cur_buf->linesize[0] + extract->rgba_map[A];
+ pout = out_buf->data[0] + y * out_buf->linesize[0];
+ for (x = 0; x < out_buf->video->w; x++) {
+ *pout = *pin;
+ pout += 1;
+ pin += 4;
+ }
+ }
+ } else if (cur_buf->linesize[A] == out_buf->linesize[Y]) {
+ const int linesize = cur_buf->linesize[A];
+ memcpy(out_buf->data[Y] + y0 * linesize,
+ cur_buf->data[A] + y0 * linesize,
+ linesize * h);
+ } else {
+ const int linesize = FFMIN(out_buf->linesize[Y], cur_buf->linesize[A]);
+ int y;
+ for (y = y0; y < (y0 + h); y++) {
+ memcpy(out_buf->data[Y] + y * out_buf->linesize[Y],
+ cur_buf->data[A] + y * cur_buf->linesize[A],
+ linesize);
+ }
+ }
++ return ff_draw_slice(inlink->dst->outputs[0], y0, h, slice_dir);
+}
+
+AVFilter avfilter_vf_alphaextract = {
+ .name = "alphaextract",
+ .description = NULL_IF_CONFIG_SMALL("Extract an alpha channel as a "
+ "grayscale image component."),
+ .priv_size = sizeof(AlphaExtractContext),
+ .query_formats = query_formats,
+
+ .inputs = (const AVFilterPad[]) {
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .draw_slice = draw_slice,
+ .min_perms = AV_PERM_READ },
+ { .name = NULL }
+ },
+ .outputs = (const AVFilterPad[]) {
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO, },
+ { .name = NULL }
+ },
+};
--- /dev/null
- static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) {}
- static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) {}
+/*
+ * Copyright (c) 2012 Steven Robertson
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * copy an alpha component from another video's luma
+ */
+
+#include <string.h>
+
+#include "libavutil/pixfmt.h"
+#include "avfilter.h"
+#include "bufferqueue.h"
+#include "drawutils.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+enum { Y, U, V, A };
+
+typedef struct {
+ int frame_requested;
+ int is_packed_rgb;
+ uint8_t rgba_map[4];
+ struct FFBufQueue queue_main;
+ struct FFBufQueue queue_alpha;
+} AlphaMergeContext;
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AlphaMergeContext *merge = ctx->priv;
+ ff_bufqueue_discard_all(&merge->queue_main);
+ ff_bufqueue_discard_all(&merge->queue_alpha);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ enum PixelFormat main_fmts[] = {
+ PIX_FMT_YUVA444P, PIX_FMT_YUVA422P, PIX_FMT_YUVA420P,
+ PIX_FMT_RGBA, PIX_FMT_BGRA, PIX_FMT_ARGB, PIX_FMT_ABGR,
+ PIX_FMT_NONE
+ };
+ enum PixelFormat alpha_fmts[] = { PIX_FMT_GRAY8, PIX_FMT_NONE };
+ AVFilterFormats *main_formats = ff_make_format_list(main_fmts);
+ AVFilterFormats *alpha_formats = ff_make_format_list(alpha_fmts);
+ ff_formats_ref(main_formats, &ctx->inputs[0]->out_formats);
+ ff_formats_ref(alpha_formats, &ctx->inputs[1]->out_formats);
+ ff_formats_ref(main_formats, &ctx->outputs[0]->in_formats);
+ return 0;
+}
+
+static int config_input_main(AVFilterLink *inlink)
+{
+ AlphaMergeContext *merge = inlink->dst->priv;
+ merge->is_packed_rgb =
+ ff_fill_rgba_map(merge->rgba_map, inlink->format) >= 0;
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *mainlink = ctx->inputs[0];
+ AVFilterLink *alphalink = ctx->inputs[1];
+ if (mainlink->w != alphalink->w || mainlink->h != alphalink->h) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Input frame sizes do not match (%dx%d vs %dx%d).\n",
+ mainlink->w, mainlink->h,
+ alphalink->w, alphalink->h);
+ return AVERROR(EINVAL);
+ }
+
+ outlink->w = mainlink->w;
+ outlink->h = mainlink->h;
+ outlink->time_base = mainlink->time_base;
+ outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
+ outlink->frame_rate = mainlink->frame_rate;
+ return 0;
+}
+
- static void end_frame(AVFilterLink *inlink)
++static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) {return 0;}
++static int draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) {return 0;}
+
+static void draw_frame(AVFilterContext *ctx,
+ AVFilterBufferRef *main_buf,
+ AVFilterBufferRef *alpha_buf)
+{
+ AlphaMergeContext *merge = ctx->priv;
+ int h = main_buf->video->h;
+
+ if (merge->is_packed_rgb) {
+ int x, y;
+ uint8_t *pin, *pout;
+ for (y = 0; y < h; y++) {
+ pin = alpha_buf->data[0] + y * alpha_buf->linesize[0];
+ pout = main_buf->data[0] + y * main_buf->linesize[0] + merge->rgba_map[A];
+ for (x = 0; x < main_buf->video->w; x++) {
+ *pout = *pin;
+ pin += 1;
+ pout += 4;
+ }
+ }
+ } else {
+ int y;
+ const int main_linesize = main_buf->linesize[A];
+ const int alpha_linesize = alpha_buf->linesize[Y];
+ for (y = 0; y < h && y < alpha_buf->video->h; y++) {
+ memcpy(main_buf->data[A] + y * main_linesize,
+ alpha_buf->data[Y] + y * alpha_linesize,
+ FFMIN(main_linesize, alpha_linesize));
+ }
+ }
+ ff_draw_slice(ctx->outputs[0], 0, h, 1);
+}
+
++static int end_frame(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AlphaMergeContext *merge = ctx->priv;
+
+ int is_alpha = (inlink == ctx->inputs[1]);
+ struct FFBufQueue *queue =
+ (is_alpha ? &merge->queue_alpha : &merge->queue_main);
+ ff_bufqueue_add(ctx, queue, inlink->cur_buf);
+ inlink->cur_buf = NULL;
+
+ while (1) {
+ AVFilterBufferRef *main_buf, *alpha_buf;
+
+ if (!ff_bufqueue_peek(&merge->queue_main, 0) ||
+ !ff_bufqueue_peek(&merge->queue_alpha, 0)) break;
+
+ main_buf = ff_bufqueue_get(&merge->queue_main);
+ alpha_buf = ff_bufqueue_get(&merge->queue_alpha);
+
+ ctx->outputs[0]->out_buf = main_buf;
+ ff_start_frame(ctx->outputs[0], avfilter_ref_buffer(main_buf, ~0));
+ merge->frame_requested = 0;
+ draw_frame(ctx, main_buf, alpha_buf);
+ ff_end_frame(ctx->outputs[0]);
+ avfilter_unref_buffer(alpha_buf);
+ }
++ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AlphaMergeContext *merge = ctx->priv;
+ int in, ret;
+
+ merge->frame_requested = 1;
+ while (merge->frame_requested) {
+ in = ff_bufqueue_peek(&merge->queue_main, 0) ? 0 : 1;
+ ret = ff_request_frame(ctx->inputs[in]);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+AVFilter avfilter_vf_alphamerge = {
+ .name = "alphamerge",
+ .description = NULL_IF_CONFIG_SMALL("Copy the luma value of the second "
+ "input into the alpha channel of the first input."),
+ .uninit = uninit,
+ .priv_size = sizeof(AlphaMergeContext),
+ .query_formats = query_formats,
+
+ .inputs = (const AVFilterPad[]) {
+ { .name = "main",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input_main,
+ .get_video_buffer = ff_null_get_video_buffer,
+ .start_frame = start_frame,
+ .draw_slice = draw_slice,
+ .end_frame = end_frame,
+ .min_perms = AV_PERM_READ | AV_PERM_WRITE,
+ .rej_perms = AV_PERM_REUSE2 | AV_PERM_PRESERVE },
+ { .name = "alpha",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .start_frame = start_frame,
+ .draw_slice = draw_slice,
+ .end_frame = end_frame,
+ .min_perms = AV_PERM_READ,
+ .rej_perms = AV_PERM_REUSE2 },
+ { .name = NULL }
+ },
+ .outputs = (const AVFilterPad[]) {
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame },
+ { .name = NULL }
+ },
+};
{
AspectContext *aspect = link->dst->priv;
- picref->video->pixel_aspect = aspect->aspect;
+ picref->video->sample_aspect_ratio = aspect->ratio;
link->cur_buf = NULL;
- ff_start_frame(link->dst->outputs[0], picref);
+ return ff_start_frame(link->dst->outputs[0], picref);
}
#if CONFIG_SETDAR_FILTER
--- /dev/null
- static void null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { }
+/*
+ * Copyright (c) 2011 Baptiste Coudurier
+ * Copyright (c) 2011 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Libass subtitles burning filter.
+ *
+ * @see{http://www.matroska.org/technical/specs/subtitles/ssa.html}
+ */
+
+#include <ass/ass.h>
+
+#include "libavutil/avstring.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "drawutils.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "formats.h"
+#include "video.h"
+
+typedef struct {
+ const AVClass *class;
+ ASS_Library *library;
+ ASS_Renderer *renderer;
+ ASS_Track *track;
+ char *filename;
+ uint8_t rgba_map[4];
+ int pix_step[4]; ///< steps per pixel for each plane of the main output
+ int original_w, original_h;
+ FFDrawContext draw;
+} AssContext;
+
+#define OFFSET(x) offsetof(AssContext, x)
+
+static const AVOption ass_options[] = {
+ {"original_size", "set the size of the original video (used to scale fonts)", OFFSET(original_w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, CHAR_MIN, CHAR_MAX },
+ {NULL},
+};
+
+AVFILTER_DEFINE_CLASS(ass);
+
+/* libass supports a log level ranging from 0 to 7 */
+int ass_libavfilter_log_level_map[] = {
+ AV_LOG_QUIET, /* 0 */
+ AV_LOG_PANIC, /* 1 */
+ AV_LOG_FATAL, /* 2 */
+ AV_LOG_ERROR, /* 3 */
+ AV_LOG_WARNING, /* 4 */
+ AV_LOG_INFO, /* 5 */
+ AV_LOG_VERBOSE, /* 6 */
+ AV_LOG_DEBUG, /* 7 */
+};
+
+static void ass_log(int ass_level, const char *fmt, va_list args, void *ctx)
+{
+ int level = ass_libavfilter_log_level_map[ass_level];
+
+ av_vlog(ctx, level, fmt, args);
+ av_log(ctx, level, "\n");
+}
+
+static av_cold int init(AVFilterContext *ctx, const char *args)
+{
+ AssContext *ass = ctx->priv;
+ int ret;
+
+ ass->class = &ass_class;
+ av_opt_set_defaults(ass);
+
+ if (args)
+ ass->filename = av_get_token(&args, ":");
+ if (!ass->filename || !*ass->filename) {
+ av_log(ctx, AV_LOG_ERROR, "No filename provided!\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (*args++ == ':' && (ret = av_set_options_string(ass, args, "=", ":")) < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Error parsing options string: '%s'\n", args);
+ return ret;
+ }
+
+ ass->library = ass_library_init();
+ if (!ass->library) {
+ av_log(ctx, AV_LOG_ERROR, "Could not initialize libass.\n");
+ return AVERROR(EINVAL);
+ }
+ ass_set_message_cb(ass->library, ass_log, ctx);
+
+ ass->renderer = ass_renderer_init(ass->library);
+ if (!ass->renderer) {
+ av_log(ctx, AV_LOG_ERROR, "Could not initialize libass renderer.\n");
+ return AVERROR(EINVAL);
+ }
+
+ ass->track = ass_read_file(ass->library, ass->filename, NULL);
+ if (!ass->track) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Could not create a libass track when reading file '%s'\n",
+ ass->filename);
+ return AVERROR(EINVAL);
+ }
+
+ ass_set_fonts(ass->renderer, NULL, NULL, 1, NULL, 1);
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AssContext *ass = ctx->priv;
+
+ av_freep(&ass->filename);
+ if (ass->track)
+ ass_free_track(ass->track);
+ if (ass->renderer)
+ ass_renderer_done(ass->renderer);
+ if (ass->library)
+ ass_library_done(ass->library);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AssContext *ass = inlink->dst->priv;
+
+ ff_draw_init(&ass->draw, inlink->format, 0);
+
+ ass_set_frame_size (ass->renderer, inlink->w, inlink->h);
+ if (ass->original_w && ass->original_h)
+ ass_set_aspect_ratio(ass->renderer, (double)inlink->w / inlink->h,
+ (double)ass->original_w / ass->original_h);
+
+ return 0;
+}
+
- static void end_frame(AVFilterLink *inlink)
++static int null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { return 0; }
+
+/* libass stores an RGBA color in the format RRGGBBTT, where TT is the transparency level */
+#define AR(c) ( (c)>>24)
+#define AG(c) (((c)>>16)&0xFF)
+#define AB(c) (((c)>>8) &0xFF)
+#define AA(c) ((0xFF-c) &0xFF)
+
+static void overlay_ass_image(AssContext *ass, AVFilterBufferRef *picref,
+ const ASS_Image *image)
+{
+ for (; image; image = image->next) {
+ uint8_t rgba_color[] = {AR(image->color), AG(image->color), AB(image->color), AA(image->color)};
+ FFDrawColor color;
+ ff_draw_color(&ass->draw, &color, rgba_color);
+ ff_blend_mask(&ass->draw, &color,
+ picref->data, picref->linesize,
+ picref->video->w, picref->video->h,
+ image->bitmap, image->stride, image->w, image->h,
+ 3, 0, image->dst_x, image->dst_y);
+ }
+}
+
- ff_end_frame(outlink);
++static int end_frame(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AssContext *ass = ctx->priv;
+ AVFilterBufferRef *picref = inlink->cur_buf;
+ int detect_change = 0;
+ double time_ms = picref->pts * av_q2d(inlink->time_base) * 1000;
+ ASS_Image *image = ass_render_frame(ass->renderer, ass->track,
+ time_ms, &detect_change);
+
+ if (detect_change)
+ av_log(ctx, AV_LOG_DEBUG, "Change happened at time ms:%f\n", time_ms);
+
+ overlay_ass_image(ass, picref, image);
+
+ ff_draw_slice(outlink, 0, picref->video->h, 1);
++ return ff_end_frame(outlink);
+}
+
+AVFilter avfilter_vf_ass = {
+ .name = "ass",
+ .description = NULL_IF_CONFIG_SMALL("Render subtitles onto input video using the libass library."),
+ .priv_size = sizeof(AssContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+
+ .inputs = (const AVFilterPad[]) {
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .get_video_buffer = ff_null_get_video_buffer,
+ .start_frame = ff_null_start_frame,
+ .draw_slice = null_draw_slice,
+ .end_frame = end_frame,
+ .config_props = config_input,
+ .min_perms = AV_PERM_WRITE | AV_PERM_READ,
+ .rej_perms = AV_PERM_PRESERVE },
+ { .name = NULL}
+ },
+ .outputs = (const AVFilterPad[]) {
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO, },
+ { .name = NULL}
+ },
+};
--- /dev/null
- static void end_frame(AVFilterLink *inlink)
+/*
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * bounding box detection filter
+ */
+
+#include "libavutil/pixdesc.h"
+#include "libavutil/timestamp.h"
+#include "avfilter.h"
+#include "bbox.h"
+#include "internal.h"
+
+typedef struct {
+ unsigned int frame;
+ int vsub, hsub;
+} BBoxContext;
+
+static av_cold int init(AVFilterContext *ctx, const char *args)
+{
+ BBoxContext *bbox = ctx->priv;
+ bbox->frame = 0;
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum PixelFormat pix_fmts[] = {
+ PIX_FMT_YUV420P,
+ PIX_FMT_YUV444P,
+ PIX_FMT_YUV440P,
+ PIX_FMT_YUV422P,
+ PIX_FMT_YUV411P,
+ PIX_FMT_NONE,
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
- ff_end_frame(inlink->dst->outputs[0]);
++static int end_frame(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ BBoxContext *bbox = ctx->priv;
+ AVFilterBufferRef *picref = inlink->cur_buf;
+ FFBoundingBox box;
+ int has_bbox, w, h;
+
+ has_bbox =
+ ff_calculate_bounding_box(&box,
+ picref->data[0], picref->linesize[0],
+ inlink->w, inlink->h, 16);
+ w = box.x2 - box.x1 + 1;
+ h = box.y2 - box.y1 + 1;
+
+ av_log(ctx, AV_LOG_INFO,
+ "n:%d pts:%s pts_time:%s", bbox->frame,
+ av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base));
+
+ if (has_bbox) {
+ av_log(ctx, AV_LOG_INFO,
+ " x1:%d x2:%d y1:%d y2:%d w:%d h:%d"
+ " crop=%d:%d:%d:%d drawbox=%d:%d:%d:%d",
+ box.x1, box.x2, box.y1, box.y2, w, h,
+ w, h, box.x1, box.y1, /* crop params */
+ box.x1, box.y1, w, h); /* drawbox params */
+ }
+ av_log(ctx, AV_LOG_INFO, "\n");
+
+ bbox->frame++;
++ return ff_end_frame(inlink->dst->outputs[0]);
+}
+
+AVFilter avfilter_vf_bbox = {
+ .name = "bbox",
+ .description = NULL_IF_CONFIG_SMALL("Compute bounding box for each frame."),
+ .priv_size = sizeof(BBoxContext),
+ .query_formats = query_formats,
+ .init = init,
+
+ .inputs = (const AVFilterPad[]) {
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .get_video_buffer = ff_null_get_video_buffer,
+ .start_frame = ff_null_start_frame_keep_ref,
+ .end_frame = end_frame,
+ .min_perms = AV_PERM_READ, },
+ { .name = NULL }
+ },
+
+ .outputs = (const AVFilterPad[]) {
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO },
+ { .name = NULL }
+ },
+};
--- /dev/null
- static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
+/*
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Video black detector, loosely based on blackframe with extended
+ * syntax and features
+ */
+
+#include <float.h>
+#include "libavutil/opt.h"
+#include "libavutil/timestamp.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct {
+ const AVClass *class;
+ double black_min_duration_time; ///< minimum duration of detected black, in seconds
+ int64_t black_min_duration; ///< minimum duration of detected black, expressed in timebase units
+ int64_t black_start; ///< pts start time of the first black picture
+ int64_t black_end; ///< pts end time of the last black picture
+ int64_t last_picref_pts; ///< pts of the last input picture
+ int black_started;
+
+ double picture_black_ratio_th;
+ double pixel_black_th;
+ unsigned int pixel_black_th_i;
+
+ unsigned int frame_count; ///< frame number
+ unsigned int nb_black_pixels; ///< number of black pixels counted so far
+} BlackDetectContext;
+
+#define OFFSET(x) offsetof(BlackDetectContext, x)
+static const AVOption blackdetect_options[] = {
+ { "d", "set minimum detected black duration in seconds", OFFSET(black_min_duration_time), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 0, DBL_MAX},
+ { "black_min_duration", "set minimum detected black duration in seconds", OFFSET(black_min_duration_time), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 0, DBL_MAX},
+ { "picture_black_ratio_th", "set the picture black ratio threshold", OFFSET(picture_black_ratio_th), AV_OPT_TYPE_DOUBLE, {.dbl=.98}, 0, 1},
+ { "pic_th", "set the picture black ratio threshold", OFFSET(picture_black_ratio_th), AV_OPT_TYPE_DOUBLE, {.dbl=.98}, 0, 1},
+ { "pixel_black_th", "set the pixel black threshold", OFFSET(pixel_black_th), AV_OPT_TYPE_DOUBLE, {.dbl=.10}, 0, 1},
+ { "pix_th", "set the pixel black threshold", OFFSET(pixel_black_th), AV_OPT_TYPE_DOUBLE, {.dbl=.10}, 0, 1},
+ { NULL },
+};
+
+AVFILTER_DEFINE_CLASS(blackdetect);
+
+#define YUVJ_FORMATS \
+ PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUVJ440P
+
+static enum PixelFormat yuvj_formats[] = {
+ YUVJ_FORMATS, PIX_FMT_NONE
+};
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum PixelFormat pix_fmts[] = {
+ PIX_FMT_YUV410P, PIX_FMT_YUV420P, PIX_FMT_GRAY8, PIX_FMT_NV12,
+ PIX_FMT_NV21, PIX_FMT_YUV444P, PIX_FMT_YUV422P, PIX_FMT_YUV411P,
+ YUVJ_FORMATS,
+ PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static av_cold int init(AVFilterContext *ctx, const char *args)
+{
+ int ret;
+ BlackDetectContext *blackdetect = ctx->priv;
+
+ blackdetect->class = &blackdetect_class;
+ av_opt_set_defaults(blackdetect);
+
+ if ((ret = av_set_options_string(blackdetect, args, "=", ":")) < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Error parsing options string: '%s'\n", args);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ BlackDetectContext *blackdetect = ctx->priv;
+
+ blackdetect->black_min_duration =
+ blackdetect->black_min_duration_time / av_q2d(inlink->time_base);
+
+ blackdetect->pixel_black_th_i = ff_fmt_is_in(inlink->format, yuvj_formats) ?
+ // luminance_minimum_value + pixel_black_th * luminance_range_size
+ blackdetect->pixel_black_th * 255 :
+ 16 + blackdetect->pixel_black_th * (235 - 16);
+
+ av_log(blackdetect, AV_LOG_VERBOSE,
+ "black_min_duration:%s pixel_black_th:%f pixel_black_th_i:%d picture_black_ratio_th:%f\n",
+ av_ts2timestr(blackdetect->black_min_duration, &inlink->time_base),
+ blackdetect->pixel_black_th, blackdetect->pixel_black_th_i,
+ blackdetect->picture_black_ratio_th);
+ return 0;
+}
+
+static void check_black_end(AVFilterContext *ctx)
+{
+ BlackDetectContext *blackdetect = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ if ((blackdetect->black_end - blackdetect->black_start) >= blackdetect->black_min_duration) {
+ av_log(blackdetect, AV_LOG_INFO,
+ "black_start:%s black_end:%s black_duration:%s\n",
+ av_ts2timestr(blackdetect->black_start, &inlink->time_base),
+ av_ts2timestr(blackdetect->black_end, &inlink->time_base),
+ av_ts2timestr(blackdetect->black_end - blackdetect->black_start, &inlink->time_base));
+ }
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ BlackDetectContext *blackdetect = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ int ret = ff_request_frame(inlink);
+
+ if (ret == AVERROR_EOF && blackdetect->black_started) {
+ // FIXME: black_end should be set to last_picref_pts + last_picref_duration
+ blackdetect->black_end = blackdetect->last_picref_pts;
+ check_black_end(ctx);
+ }
+ return ret;
+}
+
- ff_draw_slice(ctx->outputs[0], y, h, slice_dir);
++static int draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
+{
+ AVFilterContext *ctx = inlink->dst;
+ BlackDetectContext *blackdetect = ctx->priv;
+ AVFilterBufferRef *picref = inlink->cur_buf;
+ int x, i;
+ const uint8_t *p = picref->data[0] + y * picref->linesize[0];
+
+ for (i = 0; i < h; i++) {
+ for (x = 0; x < inlink->w; x++)
+ blackdetect->nb_black_pixels += p[x] <= blackdetect->pixel_black_th_i;
+ p += picref->linesize[0];
+ }
+
- static void end_frame(AVFilterLink *inlink)
++ return ff_draw_slice(ctx->outputs[0], y, h, slice_dir);
+}
+
- ff_end_frame(inlink->dst->outputs[0]);
++static int end_frame(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ BlackDetectContext *blackdetect = ctx->priv;
+ AVFilterBufferRef *picref = inlink->cur_buf;
+ double picture_black_ratio = 0;
+
+ picture_black_ratio = (double)blackdetect->nb_black_pixels / (inlink->w * inlink->h);
+
+ av_log(ctx, AV_LOG_DEBUG,
+ "frame:%u picture_black_ratio:%f pos:%"PRId64" pts:%s t:%s type:%c\n",
+ blackdetect->frame_count, picture_black_ratio,
+ picref->pos, av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base),
+ av_get_picture_type_char(picref->video->pict_type));
+
+ if (picture_black_ratio >= blackdetect->picture_black_ratio_th) {
+ if (!blackdetect->black_started) {
+ /* black starts here */
+ blackdetect->black_started = 1;
+ blackdetect->black_start = picref->pts;
+ }
+ } else if (blackdetect->black_started) {
+ /* black ends here */
+ blackdetect->black_started = 0;
+ blackdetect->black_end = picref->pts;
+ check_black_end(ctx);
+ }
+
+ blackdetect->last_picref_pts = picref->pts;
+ blackdetect->frame_count++;
+ blackdetect->nb_black_pixels = 0;
++ return ff_end_frame(inlink->dst->outputs[0]);
+}
+
+AVFilter avfilter_vf_blackdetect = {
+ .name = "blackdetect",
+ .description = NULL_IF_CONFIG_SMALL("Detect video intervals that are (almost) black."),
+ .priv_size = sizeof(BlackDetectContext),
+ .init = init,
+ .query_formats = query_formats,
+
+ .inputs = (const AVFilterPad[]) {
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .draw_slice = draw_slice,
+ .get_video_buffer = ff_null_get_video_buffer,
+ .start_frame = ff_null_start_frame_keep_ref,
+ .end_frame = end_frame, },
+ { .name = NULL }
+ },
+
+ .outputs = (const AVFilterPad[]) {
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame, },
+ { .name = NULL }
+ },
+};
h, radius, power, temp);
}
- static void null_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) { }
-static int draw_slice(AVFilterLink *inlink, int y0, int h0, int slice_dir)
++static int null_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) { return 0; }
+
- static void end_frame(AVFilterLink *inlink)
++static int end_frame(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
BoxBlurContext *boxblur = ctx->priv;
w[plane], h[plane], boxblur->radius[plane], boxblur->power[plane],
boxblur->temp);
- return ff_draw_slice(outlink, y0, h0, slice_dir);
+ ff_draw_slice(outlink, 0, inlink->h, 1);
- avfilter_default_end_frame(inlink);
++ return avfilter_default_end_frame(inlink);
}
AVFilter avfilter_vf_boxblur = {
--- /dev/null
- static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
+/*
+ * ColorMatrix v2.2 for Avisynth 2.5.x
+ *
+ * Copyright (C) 2006-2007 Kevin Stone
+ *
+ * ColorMatrix 1.x is Copyright (C) Wilbert Dijkhof
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/**
+ * @file
+ * ColorMatrix 2.0 is based on the original ColorMatrix filter by Wilbert
+ * Dijkhof. It adds the ability to convert between any of: Rec.709, FCC,
+ * Rec.601, and SMPTE 240M. It also makes pre and post clipping optional,
+ * adds an option to use scaled or non-scaled coefficients, and more...
+ */
+
+#include <strings.h>
+#include <float.h>
+#include "avfilter.h"
+#include "formats.h"
+#include "video.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/avstring.h"
+
+#define NS(n) n < 0 ? (int)(n*65536.0-0.5+DBL_EPSILON) : (int)(n*65536.0+0.5)
+#define CB(n) av_clip_uint8(n)
+
+static const double yuv_coeff[4][3][3] = {
+ { { +0.7152, +0.0722, +0.2126 }, // Rec.709 (0)
+ { -0.3850, +0.5000, -0.1150 },
+ { -0.4540, -0.0460, +0.5000 } },
+ { { +0.5900, +0.1100, +0.3000 }, // FCC (1)
+ { -0.3310, +0.5000, -0.1690 },
+ { -0.4210, -0.0790, +0.5000 } },
+ { { +0.5870, +0.1140, +0.2990 }, // Rec.601 (ITU-R BT.470-2/SMPTE 170M) (2)
+ { -0.3313, +0.5000, -0.1687 },
+ { -0.4187, -0.0813, +0.5000 } },
+ { { +0.7010, +0.0870, +0.2120 }, // SMPTE 240M (3)
+ { -0.3840, +0.5000, -0.1160 },
+ { -0.4450, -0.0550, +0.5000 } },
+};
+
+typedef struct {
+ int yuv_convert[16][3][3];
+ int interlaced;
+ int source, dest, mode;
+ char src[256];
+ char dst[256];
+ int hsub, vsub;
+ AVFilterBufferRef *outpicref
+} ColorMatrixContext;
+
+#define ma m[0][0]
+#define mb m[0][1]
+#define mc m[0][2]
+#define md m[1][0]
+#define me m[1][1]
+#define mf m[1][2]
+#define mg m[2][0]
+#define mh m[2][1]
+#define mi m[2][2]
+
+#define ima im[0][0]
+#define imb im[0][1]
+#define imc im[0][2]
+#define imd im[1][0]
+#define ime im[1][1]
+#define imf im[1][2]
+#define img im[2][0]
+#define imh im[2][1]
+#define imi im[2][2]
+
+static void inverse3x3(double im[3][3], const double m[3][3])
+{
+ double det = ma * (me * mi - mf * mh) - mb * (md * mi - mf * mg) + mc * (md * mh - me * mg);
+ det = 1.0 / det;
+ ima = det * (me * mi - mf * mh);
+ imb = det * (mc * mh - mb * mi);
+ imc = det * (mb * mf - mc * me);
+ imd = det * (mf * mg - md * mi);
+ ime = det * (ma * mi - mc * mg);
+ imf = det * (mc * md - ma * mf);
+ img = det * (md * mh - me * mg);
+ imh = det * (mb * mg - ma * mh);
+ imi = det * (ma * me - mb * md);
+}
+
+static void solve_coefficients(double cm[3][3], double rgb[3][3], const double yuv[3][3])
+{
+ int i, j;
+ for (i = 0; i < 3; i++)
+ for (j = 0; j < 3; j++)
+ cm[i][j] = yuv[i][0] * rgb[0][j] + yuv[i][1] * rgb[1][j] + yuv[i][2] * rgb[2][j];
+}
+
+static void calc_coefficients(AVFilterContext *ctx)
+{
+ ColorMatrixContext *color = ctx->priv;
+ double rgb_coeffd[4][3][3];
+ double yuv_convertd[16][3][3];
+ int v = 0;
+ int i, j, k;
+
+ for (i = 0; i < 4; i++)
+ inverse3x3(rgb_coeffd[i], yuv_coeff[i]);
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 4; j++) {
+ solve_coefficients(yuv_convertd[v], rgb_coeffd[i], yuv_coeff[j]);
+ for (k = 0; k < 3; k++) {
+ color->yuv_convert[v][k][0] = NS(yuv_convertd[v][k][0]);
+ color->yuv_convert[v][k][1] = NS(yuv_convertd[v][k][1]);
+ color->yuv_convert[v][k][2] = NS(yuv_convertd[v][k][2]);
+ }
+ if (color->yuv_convert[v][0][0] != 65536 || color->yuv_convert[v][1][0] != 0 ||
+ color->yuv_convert[v][2][0] != 0) {
+ av_log(ctx, AV_LOG_ERROR, "error calculating conversion coefficients\n");
+ }
+ v++;
+ }
+ }
+}
+
+static const char *color_modes[] = {"bt709", "FCC", "bt601", "smpte240m"};
+
+static int get_color_mode_index(const char *name)
+{
+ int i;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(color_modes); i++)
+ if (!av_strcasecmp(color_modes[i], name))
+ return i;
+ return -1;
+}
+
+static av_cold int init(AVFilterContext *ctx, const char *args)
+{
+ ColorMatrixContext *color = ctx->priv;
+
+ if (!args)
+ goto usage;
+ if (sscanf(args, "%255[^:]:%255[^:]", color->src, color->dst) != 2) {
+ usage:
+ av_log(ctx, AV_LOG_ERROR, "usage: <src>:<dst>\n");
+ av_log(ctx, AV_LOG_ERROR, "possible options: bt709,bt601,smpte240m,fcc\n");
+ return -1;
+ }
+
+ color->source = get_color_mode_index(color->src);
+ if (color->source < 0) {
+ av_log(ctx, AV_LOG_ERROR, "unknown color space %s\n", color->src);
+ return AVERROR(EINVAL);
+ }
+
+ color->dest = get_color_mode_index(color->dst);
+ if (color->dest < 0) {
+ av_log(ctx, AV_LOG_ERROR, "unknown color space %s\n", color->dst);
+ return AVERROR(EINVAL);
+ }
+
+ if (color->source == color->dest) {
+ av_log(ctx, AV_LOG_ERROR, "source and destination color space are identical\n");
+ return AVERROR(EINVAL);
+ }
+
+ color->mode = color->source * 4 + color->dest;
+
+ calc_coefficients(ctx);
+
+ return 0;
+}
+
+static void process_frame_uyvy422(ColorMatrixContext *color,
+ AVFilterBufferRef *dst, AVFilterBufferRef *src)
+{
+ const unsigned char *srcp = src->data[0];
+ const int src_pitch = src->linesize[0];
+ const int height = src->video->h;
+ const int width = src->video->w*2;
+ unsigned char *dstp = dst->data[0];
+ const int dst_pitch = dst->linesize[0];
+ const int c2 = color->yuv_convert[color->mode][0][1];
+ const int c3 = color->yuv_convert[color->mode][0][2];
+ const int c4 = color->yuv_convert[color->mode][1][1];
+ const int c5 = color->yuv_convert[color->mode][1][2];
+ const int c6 = color->yuv_convert[color->mode][2][1];
+ const int c7 = color->yuv_convert[color->mode][2][2];
+ int x, y;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x += 4) {
+ const int u = srcp[x + 0] - 128;
+ const int v = srcp[x + 2] - 128;
+ const int uvval = c2 * u + c3 * v + 1081344;
+ dstp[x + 0] = CB((c4 * u + c5 * v + 8421376) >> 16);
+ dstp[x + 1] = CB((65536 * (srcp[x + 1] - 16) + uvval) >> 16);
+ dstp[x + 2] = CB((c6 * u + c7 * v + 8421376) >> 16);
+ dstp[x + 3] = CB((65536 * (srcp[x + 3] - 16) + uvval) >> 16);
+ }
+ srcp += src_pitch;
+ dstp += dst_pitch;
+ }
+}
+
+static void process_frame_yuv422p(ColorMatrixContext *color,
+ AVFilterBufferRef *dst, AVFilterBufferRef *src)
+{
+ const unsigned char *srcpU = src->data[1];
+ const unsigned char *srcpV = src->data[2];
+ const unsigned char *srcpY = src->data[0];
+ const int src_pitchY = src->linesize[0];
+ const int src_pitchUV = src->linesize[1];
+ const int height = src->video->h;
+ const int width = src->video->w;
+ unsigned char *dstpU = dst->data[1];
+ unsigned char *dstpV = dst->data[2];
+ unsigned char *dstpY = dst->data[0];
+ const int dst_pitchY = dst->linesize[0];
+ const int dst_pitchUV = dst->linesize[1];
+ const int c2 = color->yuv_convert[color->mode][0][1];
+ const int c3 = color->yuv_convert[color->mode][0][2];
+ const int c4 = color->yuv_convert[color->mode][1][1];
+ const int c5 = color->yuv_convert[color->mode][1][2];
+ const int c6 = color->yuv_convert[color->mode][2][1];
+ const int c7 = color->yuv_convert[color->mode][2][2];
+ int x, y;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x += 2) {
+ const int u = srcpU[x >> 1] - 128;
+ const int v = srcpV[x >> 1] - 128;
+ const int uvval = c2 * u + c3 * v + 1081344;
+ dstpY[x + 0] = CB((65536 * (srcpY[x + 0] - 16) + uvval) >> 16);
+ dstpY[x + 1] = CB((65536 * (srcpY[x + 1] - 16) + uvval) >> 16);
+ dstpU[x >> 1] = CB((c4 * u + c5 * v + 8421376) >> 16);
+ dstpV[x >> 1] = CB((c6 * u + c7 * v + 8421376) >> 16);
+ }
+ srcpY += src_pitchY;
+ dstpY += dst_pitchY;
+ srcpU += src_pitchUV;
+ srcpV += src_pitchUV;
+ dstpU += dst_pitchUV;
+ dstpV += dst_pitchUV;
+ }
+}
+
+static void process_frame_yuv420p(ColorMatrixContext *color,
+ AVFilterBufferRef *dst, AVFilterBufferRef *src)
+{
+ const unsigned char *srcpU = src->data[1];
+ const unsigned char *srcpV = src->data[2];
+ const unsigned char *srcpY = src->data[0];
+ const unsigned char *srcpN = src->data[0] + src->linesize[0];
+ const int src_pitchY = src->linesize[0];
+ const int src_pitchUV = src->linesize[1];
+ const int height = src->video->h;
+ const int width = src->video->w;
+ unsigned char *dstpU = dst->data[1];
+ unsigned char *dstpV = dst->data[2];
+ unsigned char *dstpY = dst->data[0];
+ unsigned char *dstpN = dst->data[0] + dst->linesize[0];
+ const int dst_pitchY = dst->linesize[0];
+ const int dst_pitchUV = dst->linesize[1];
+ const int c2 = color->yuv_convert[color->mode][0][1];
+ const int c3 = color->yuv_convert[color->mode][0][2];
+ const int c4 = color->yuv_convert[color->mode][1][1];
+ const int c5 = color->yuv_convert[color->mode][1][2];
+ const int c6 = color->yuv_convert[color->mode][2][1];
+ const int c7 = color->yuv_convert[color->mode][2][2];
+ int x, y;
+
+ for (y = 0; y < height; y += 2) {
+ for (x = 0; x < width; x += 2) {
+ const int u = srcpU[x >> 1] - 128;
+ const int v = srcpV[x >> 1] - 128;
+ const int uvval = c2 * u + c3 * v + 1081344;
+ dstpY[x + 0] = CB((65536 * (srcpY[x + 0] - 16) + uvval) >> 16);
+ dstpY[x + 1] = CB((65536 * (srcpY[x + 1] - 16) + uvval) >> 16);
+ dstpN[x + 0] = CB((65536 * (srcpN[x + 0] - 16) + uvval) >> 16);
+ dstpN[x + 1] = CB((65536 * (srcpN[x + 1] - 16) + uvval) >> 16);
+ dstpU[x >> 1] = CB((c4 * u + c5 * v + 8421376) >> 16);
+ dstpV[x >> 1] = CB((c6 * u + c7 * v + 8421376) >> 16);
+ }
+ srcpY += src_pitchY << 1;
+ dstpY += dst_pitchY << 1;
+ srcpN += src_pitchY << 1;
+ dstpN += dst_pitchY << 1;
+ srcpU += src_pitchUV;
+ srcpV += src_pitchUV;
+ dstpU += dst_pitchUV;
+ dstpV += dst_pitchUV;
+ }
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ColorMatrixContext *color = ctx->priv;
+ const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[inlink->format];
+
+ color->hsub = pix_desc->log2_chroma_w;
+ color->vsub = pix_desc->log2_chroma_h;
+
+ av_log(ctx, AV_LOG_VERBOSE, "%s -> %s\n", color->src, color->dst);
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum PixelFormat pix_fmts[] = {
+ PIX_FMT_YUV422P,
+ PIX_FMT_YUV420P,
+ PIX_FMT_UYVY422,
+ PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+
+ return 0;
+}
+
+static AVFilterBufferRef *get_video_buffer(AVFilterLink *inlink, int perms, int w, int h)
+{
+ AVFilterBufferRef *picref =
+ ff_get_video_buffer(inlink->dst->outputs[0], perms, w, h);
+ return picref;
+}
+
- ff_start_frame(link->dst->outputs[0], outpicref);
++static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
+{
+ AVFilterContext *ctx = link->dst;
+ ColorMatrixContext *color = ctx->priv;
+ AVFilterBufferRef *outpicref = avfilter_ref_buffer(picref, ~0);
+
+ color->outpicref = outpicref;
+
- static void end_frame(AVFilterLink *link)
++ return ff_start_frame(link->dst->outputs[0], outpicref);
+}
+
- ff_end_frame(ctx->outputs[0]);
++static int end_frame(AVFilterLink *link)
+{
+ AVFilterContext *ctx = link->dst;
+ ColorMatrixContext *color = ctx->priv;
+ AVFilterBufferRef *out = color->outpicref;
+
+ if (link->cur_buf->format == PIX_FMT_YUV422P)
+ process_frame_yuv422p(color, out, link->cur_buf);
+ else if (link->cur_buf->format == PIX_FMT_YUV420P)
+ process_frame_yuv420p(color, out, link->cur_buf);
+ else
+ process_frame_uyvy422(color, out, link->cur_buf);
+
+ ff_draw_slice(ctx->outputs[0], 0, link->dst->outputs[0]->h, 1);
- static void null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { }
++ return ff_end_frame(ctx->outputs[0]);
+}
+
++static int null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { return 0; }
+
+AVFilter avfilter_vf_colormatrix = {
+ .name = "colormatrix",
+ .description = NULL_IF_CONFIG_SMALL("Color matrix conversion"),
+
+ .priv_size = sizeof(ColorMatrixContext),
+ .init = init,
+ .query_formats = query_formats,
+
+ .inputs = (AVFilterPad[]) {{ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .start_frame = start_frame,
+ .get_video_buffer = get_video_buffer,
+ .draw_slice = null_draw_slice,
+ .end_frame = end_frame, },
+ { .name = NULL }},
+
+ .outputs = (AVFilterPad[]) {{ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO, },
+ { .name = NULL }},
+};
--- /dev/null
- static void end_frame(AVFilterLink *link)
+/*
+ * Copyright (C) 2010 Georg Martius <georg.martius@web.de>
+ * Copyright (C) 2010 Daniel G. Taylor <dan@programmer-art.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * fast deshake / depan video filter
+ *
+ * SAD block-matching motion compensation to fix small changes in
+ * horizontal and/or vertical shift. This filter helps remove camera shake
+ * from hand-holding a camera, bumping a tripod, moving on a vehicle, etc.
+ *
+ * Algorithm:
+ * - For each frame with one previous reference frame
+ * - For each block in the frame
+ * - If contrast > threshold then find likely motion vector
+ * - For all found motion vectors
+ * - Find most common, store as global motion vector
+ * - Find most likely rotation angle
+ * - Transform image along global motion
+ *
+ * TODO:
+ * - Fill frame edges based on previous/next reference frames
+ * - Fill frame edges by stretching image near the edges?
+ * - Can this be done quickly and look decent?
+ *
+ * Dark Shikari links to http://wiki.videolan.org/SoC_x264_2010#GPU_Motion_Estimation_2
+ * for an algorithm similar to what could be used here to get the gmv
+ * It requires only a couple diamond searches + fast downscaling
+ *
+ * Special thanks to Jason Kotenko for his help with the algorithm and my
+ * inability to see simple errors in C code.
+ */
+
+#include "avfilter.h"
+#include "formats.h"
+#include "video.h"
+#include "libavutil/common.h"
+#include "libavutil/mem.h"
+#include "libavutil/pixdesc.h"
+#include "libavcodec/dsputil.h"
+
+#include "transform.h"
+
+#define CHROMA_WIDTH(link) -((-link->w) >> av_pix_fmt_descriptors[link->format].log2_chroma_w)
+#define CHROMA_HEIGHT(link) -((-link->h) >> av_pix_fmt_descriptors[link->format].log2_chroma_h)
+
+enum SearchMethod {
+ EXHAUSTIVE, ///< Search all possible positions
+ SMART_EXHAUSTIVE, ///< Search most possible positions (faster)
+ SEARCH_COUNT
+};
+
+typedef struct {
+ int x; ///< Horizontal shift
+ int y; ///< Vertical shift
+} IntMotionVector;
+
+typedef struct {
+ double x; ///< Horizontal shift
+ double y; ///< Vertical shift
+} MotionVector;
+
+typedef struct {
+ MotionVector vector; ///< Motion vector
+ double angle; ///< Angle of rotation
+ double zoom; ///< Zoom percentage
+} Transform;
+
+typedef struct {
+ AVClass av_class;
+ AVFilterBufferRef *ref; ///< Previous frame
+ int rx; ///< Maximum horizontal shift
+ int ry; ///< Maximum vertical shift
+ enum FillMethod edge; ///< Edge fill method
+ int blocksize; ///< Size of blocks to compare
+ int contrast; ///< Contrast threshold
+ enum SearchMethod search; ///< Motion search method
+ AVCodecContext *avctx;
+ DSPContext c; ///< Context providing optimized SAD methods
+ Transform last; ///< Transform from last frame
+ int refcount; ///< Number of reference frames (defines averaging window)
+ FILE *fp;
+ Transform avg;
+ int cw; ///< Crop motion search to this box
+ int ch;
+ int cx;
+ int cy;
+} DeshakeContext;
+
+static int cmp(const double *a, const double *b)
+{
+ return *a < *b ? -1 : ( *a > *b ? 1 : 0 );
+}
+
+/**
+ * Cleaned mean (cuts off 20% of values to remove outliers and then averages)
+ */
+static double clean_mean(double *values, int count)
+{
+ double mean = 0;
+ int cut = count / 5;
+ int x;
+
+ qsort(values, count, sizeof(double), (void*)cmp);
+
+ for (x = cut; x < count - cut; x++) {
+ mean += values[x];
+ }
+
+ return mean / (count - cut * 2);
+}
+
+/**
+ * Find the most likely shift in motion between two frames for a given
+ * macroblock. Test each block against several shifts given by the rx
+ * and ry attributes. Searches using a simple matrix of those shifts and
+ * chooses the most likely shift by the smallest difference in blocks.
+ */
+static void find_block_motion(DeshakeContext *deshake, uint8_t *src1,
+ uint8_t *src2, int cx, int cy, int stride,
+ IntMotionVector *mv)
+{
+ int x, y;
+ int diff;
+ int smallest = INT_MAX;
+ int tmp, tmp2;
+
+ #define CMP(i, j) deshake->c.sad[0](deshake, src1 + cy * stride + cx, \
+ src2 + (j) * stride + (i), stride, \
+ deshake->blocksize)
+
+ if (deshake->search == EXHAUSTIVE) {
+ // Compare every possible position - this is sloooow!
+ for (y = -deshake->ry; y <= deshake->ry; y++) {
+ for (x = -deshake->rx; x <= deshake->rx; x++) {
+ diff = CMP(cx - x, cy - y);
+ if (diff < smallest) {
+ smallest = diff;
+ mv->x = x;
+ mv->y = y;
+ }
+ }
+ }
+ } else if (deshake->search == SMART_EXHAUSTIVE) {
+ // Compare every other possible position and find the best match
+ for (y = -deshake->ry + 1; y < deshake->ry - 2; y += 2) {
+ for (x = -deshake->rx + 1; x < deshake->rx - 2; x += 2) {
+ diff = CMP(cx - x, cy - y);
+ if (diff < smallest) {
+ smallest = diff;
+ mv->x = x;
+ mv->y = y;
+ }
+ }
+ }
+
+ // Hone in on the specific best match around the match we found above
+ tmp = mv->x;
+ tmp2 = mv->y;
+
+ for (y = tmp2 - 1; y <= tmp2 + 1; y++) {
+ for (x = tmp - 1; x <= tmp + 1; x++) {
+ if (x == tmp && y == tmp2)
+ continue;
+
+ diff = CMP(cx - x, cy - y);
+ if (diff < smallest) {
+ smallest = diff;
+ mv->x = x;
+ mv->y = y;
+ }
+ }
+ }
+ }
+
+ if (smallest > 512) {
+ mv->x = -1;
+ mv->y = -1;
+ }
+ emms_c();
+ //av_log(NULL, AV_LOG_ERROR, "%d\n", smallest);
+ //av_log(NULL, AV_LOG_ERROR, "Final: (%d, %d) = %d x %d\n", cx, cy, mv->x, mv->y);
+}
+
+/**
+ * Find the contrast of a given block. When searching for global motion we
+ * really only care about the high contrast blocks, so using this method we
+ * can actually skip blocks we don't care much about.
+ */
+static int block_contrast(uint8_t *src, int x, int y, int stride, int blocksize)
+{
+ int highest = 0;
+ int lowest = 0;
+ int i, j, pos;
+
+ for (i = 0; i <= blocksize * 2; i++) {
+ // We use a width of 16 here to match the libavcodec sad functions
+ for (j = 0; i <= 15; i++) {
+ pos = (y - i) * stride + (x - j);
+ if (src[pos] < lowest)
+ lowest = src[pos];
+ else if (src[pos] > highest) {
+ highest = src[pos];
+ }
+ }
+ }
+
+ return highest - lowest;
+}
+
+/**
+ * Find the rotation for a given block.
+ */
+static double block_angle(int x, int y, int cx, int cy, IntMotionVector *shift)
+{
+ double a1, a2, diff;
+
+ a1 = atan2(y - cy, x - cx);
+ a2 = atan2(y - cy + shift->y, x - cx + shift->x);
+
+ diff = a2 - a1;
+
+ return (diff > M_PI) ? diff - 2 * M_PI :
+ (diff < -M_PI) ? diff + 2 * M_PI :
+ diff;
+}
+
+/**
+ * Find the estimated global motion for a scene given the most likely shift
+ * for each block in the frame. The global motion is estimated to be the
+ * same as the motion from most blocks in the frame, so if most blocks
+ * move one pixel to the right and two pixels down, this would yield a
+ * motion vector (1, -2).
+ */
+static void find_motion(DeshakeContext *deshake, uint8_t *src1, uint8_t *src2,
+ int width, int height, int stride, Transform *t)
+{
+ int x, y;
+ IntMotionVector mv = {0, 0};
+ int counts[128][128];
+ int count_max_value = 0;
+ int contrast;
+
+ int pos;
+ double *angles = av_malloc(sizeof(*angles) * width * height / (16 * deshake->blocksize));
+ int center_x = 0, center_y = 0;
+ double p_x, p_y;
+
+ // Reset counts to zero
+ for (x = 0; x < deshake->rx * 2 + 1; x++) {
+ for (y = 0; y < deshake->ry * 2 + 1; y++) {
+ counts[x][y] = 0;
+ }
+ }
+
+ pos = 0;
+ // Find motion for every block and store the motion vector in the counts
+ for (y = deshake->ry; y < height - deshake->ry - (deshake->blocksize * 2); y += deshake->blocksize * 2) {
+ // We use a width of 16 here to match the libavcodec sad functions
+ for (x = deshake->rx; x < width - deshake->rx - 16; x += 16) {
+ // If the contrast is too low, just skip this block as it probably
+ // won't be very useful to us.
+ contrast = block_contrast(src2, x, y, stride, deshake->blocksize);
+ if (contrast > deshake->contrast) {
+ //av_log(NULL, AV_LOG_ERROR, "%d\n", contrast);
+ find_block_motion(deshake, src1, src2, x, y, stride, &mv);
+ if (mv.x != -1 && mv.y != -1) {
+ counts[mv.x + deshake->rx][mv.y + deshake->ry] += 1;
+ if (x > deshake->rx && y > deshake->ry)
+ angles[pos++] = block_angle(x, y, 0, 0, &mv);
+
+ center_x += mv.x;
+ center_y += mv.y;
+ }
+ }
+ }
+ }
+
+ if (pos) {
+ center_x /= pos;
+ center_y /= pos;
+ t->angle = clean_mean(angles, pos);
+ if (t->angle < 0.001)
+ t->angle = 0;
+ } else {
+ t->angle = 0;
+ }
+
+ // Find the most common motion vector in the frame and use it as the gmv
+ for (y = deshake->ry * 2; y >= 0; y--) {
+ for (x = 0; x < deshake->rx * 2 + 1; x++) {
+ //av_log(NULL, AV_LOG_ERROR, "%5d ", counts[x][y]);
+ if (counts[x][y] > count_max_value) {
+ t->vector.x = x - deshake->rx;
+ t->vector.y = y - deshake->ry;
+ count_max_value = counts[x][y];
+ }
+ }
+ //av_log(NULL, AV_LOG_ERROR, "\n");
+ }
+
+ p_x = (center_x - width / 2);
+ p_y = (center_y - height / 2);
+ t->vector.x += (cos(t->angle)-1)*p_x - sin(t->angle)*p_y;
+ t->vector.y += sin(t->angle)*p_x + (cos(t->angle)-1)*p_y;
+
+ // Clamp max shift & rotation?
+ t->vector.x = av_clipf(t->vector.x, -deshake->rx * 2, deshake->rx * 2);
+ t->vector.y = av_clipf(t->vector.y, -deshake->ry * 2, deshake->ry * 2);
+ t->angle = av_clipf(t->angle, -0.1, 0.1);
+
+ //av_log(NULL, AV_LOG_ERROR, "%d x %d\n", avg->x, avg->y);
+ av_free(angles);
+}
+
+static av_cold int init(AVFilterContext *ctx, const char *args)
+{
+ DeshakeContext *deshake = ctx->priv;
+ char filename[256] = {0};
+
+ deshake->rx = 16;
+ deshake->ry = 16;
+ deshake->edge = FILL_MIRROR;
+ deshake->blocksize = 8;
+ deshake->contrast = 125;
+ deshake->search = EXHAUSTIVE;
+ deshake->refcount = 20;
+
+ deshake->cw = -1;
+ deshake->ch = -1;
+ deshake->cx = -1;
+ deshake->cy = -1;
+
+ if (args) {
+ sscanf(args, "%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%255s",
+ &deshake->cx, &deshake->cy, &deshake->cw, &deshake->ch,
+ &deshake->rx, &deshake->ry, (int *)&deshake->edge,
+ &deshake->blocksize, &deshake->contrast, (int *)&deshake->search, filename);
+
+ deshake->blocksize /= 2;
+
+ deshake->rx = av_clip(deshake->rx, 0, 64);
+ deshake->ry = av_clip(deshake->ry, 0, 64);
+ deshake->edge = av_clip(deshake->edge, FILL_BLANK, FILL_COUNT - 1);
+ deshake->blocksize = av_clip(deshake->blocksize, 4, 128);
+ deshake->contrast = av_clip(deshake->contrast, 1, 255);
+ deshake->search = av_clip(deshake->search, EXHAUSTIVE, SEARCH_COUNT - 1);
+
+ }
+ if (*filename)
+ deshake->fp = fopen(filename, "w");
+ if (deshake->fp)
+ fwrite("Ori x, Avg x, Fin x, Ori y, Avg y, Fin y, Ori angle, Avg angle, Fin angle, Ori zoom, Avg zoom, Fin zoom\n", sizeof(char), 104, deshake->fp);
+
+ // Quadword align left edge of box for MMX code, adjust width if necessary
+ // to keep right margin
+ if (deshake->cx > 0) {
+ deshake->cw += deshake->cx - (deshake->cx & ~15);
+ deshake->cx &= ~15;
+ }
+
+ av_log(ctx, AV_LOG_VERBOSE, "cx: %d, cy: %d, cw: %d, ch: %d, rx: %d, ry: %d, edge: %d blocksize: %d contrast: %d search: %d\n",
+ deshake->cx, deshake->cy, deshake->cw, deshake->ch,
+ deshake->rx, deshake->ry, deshake->edge, deshake->blocksize * 2, deshake->contrast, deshake->search);
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ enum PixelFormat pix_fmts[] = {
+ PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_YUV410P,
+ PIX_FMT_YUV411P, PIX_FMT_YUV440P, PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P,
+ PIX_FMT_YUVJ444P, PIX_FMT_YUVJ440P, PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+
+ return 0;
+}
+
+static int config_props(AVFilterLink *link)
+{
+ DeshakeContext *deshake = link->dst->priv;
+
+ deshake->ref = NULL;
+ deshake->last.vector.x = 0;
+ deshake->last.vector.y = 0;
+ deshake->last.angle = 0;
+ deshake->last.zoom = 0;
+
+ deshake->avctx = avcodec_alloc_context3(NULL);
+ dsputil_init(&deshake->c, deshake->avctx);
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ DeshakeContext *deshake = ctx->priv;
+
+ avfilter_unref_buffer(deshake->ref);
+ if (deshake->fp)
+ fclose(deshake->fp);
+ if (deshake->avctx)
+ avcodec_close(deshake->avctx);
+ av_freep(&deshake->avctx);
+}
+
- ff_end_frame(link->dst->outputs[0]);
++static int end_frame(AVFilterLink *link)
+{
+ DeshakeContext *deshake = link->dst->priv;
+ AVFilterBufferRef *in = link->cur_buf;
+ AVFilterBufferRef *out = link->dst->outputs[0]->out_buf;
+ Transform t = {{0},0}, orig = {{0},0};
+ float matrix[9];
+ float alpha = 2.0 / deshake->refcount;
+ char tmp[256];
+
+ if (deshake->cx < 0 || deshake->cy < 0 || deshake->cw < 0 || deshake->ch < 0) {
+ // Find the most likely global motion for the current frame
+ find_motion(deshake, (deshake->ref == NULL) ? in->data[0] : deshake->ref->data[0], in->data[0], link->w, link->h, in->linesize[0], &t);
+ } else {
+ uint8_t *src1 = (deshake->ref == NULL) ? in->data[0] : deshake->ref->data[0];
+ uint8_t *src2 = in->data[0];
+
+ deshake->cx = FFMIN(deshake->cx, link->w);
+ deshake->cy = FFMIN(deshake->cy, link->h);
+
+ if ((unsigned)deshake->cx + (unsigned)deshake->cw > link->w) deshake->cw = link->w - deshake->cx;
+ if ((unsigned)deshake->cy + (unsigned)deshake->ch > link->h) deshake->ch = link->h - deshake->cy;
+
+ // Quadword align right margin
+ deshake->cw &= ~15;
+
+ src1 += deshake->cy * in->linesize[0] + deshake->cx;
+ src2 += deshake->cy * in->linesize[0] + deshake->cx;
+
+ find_motion(deshake, src1, src2, deshake->cw, deshake->ch, in->linesize[0], &t);
+ }
+
+
+ // Copy transform so we can output it later to compare to the smoothed value
+ orig.vector.x = t.vector.x;
+ orig.vector.y = t.vector.y;
+ orig.angle = t.angle;
+ orig.zoom = t.zoom;
+
+ // Generate a one-sided moving exponential average
+ deshake->avg.vector.x = alpha * t.vector.x + (1.0 - alpha) * deshake->avg.vector.x;
+ deshake->avg.vector.y = alpha * t.vector.y + (1.0 - alpha) * deshake->avg.vector.y;
+ deshake->avg.angle = alpha * t.angle + (1.0 - alpha) * deshake->avg.angle;
+ deshake->avg.zoom = alpha * t.zoom + (1.0 - alpha) * deshake->avg.zoom;
+
+ // Remove the average from the current motion to detect the motion that
+ // is not on purpose, just as jitter from bumping the camera
+ t.vector.x -= deshake->avg.vector.x;
+ t.vector.y -= deshake->avg.vector.y;
+ t.angle -= deshake->avg.angle;
+ t.zoom -= deshake->avg.zoom;
+
+ // Invert the motion to undo it
+ t.vector.x *= -1;
+ t.vector.y *= -1;
+ t.angle *= -1;
+
+ // Write statistics to file
+ if (deshake->fp) {
+ snprintf(tmp, 256, "%f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f\n", orig.vector.x, deshake->avg.vector.x, t.vector.x, orig.vector.y, deshake->avg.vector.y, t.vector.y, orig.angle, deshake->avg.angle, t.angle, orig.zoom, deshake->avg.zoom, t.zoom);
+ fwrite(tmp, sizeof(char), strlen(tmp), deshake->fp);
+ }
+
+ // Turn relative current frame motion into absolute by adding it to the
+ // last absolute motion
+ t.vector.x += deshake->last.vector.x;
+ t.vector.y += deshake->last.vector.y;
+ t.angle += deshake->last.angle;
+ t.zoom += deshake->last.zoom;
+
+ // Shrink motion by 10% to keep things centered in the camera frame
+ t.vector.x *= 0.9;
+ t.vector.y *= 0.9;
+ t.angle *= 0.9;
+
+ // Store the last absolute motion information
+ deshake->last.vector.x = t.vector.x;
+ deshake->last.vector.y = t.vector.y;
+ deshake->last.angle = t.angle;
+ deshake->last.zoom = t.zoom;
+
+ // Generate a luma transformation matrix
+ avfilter_get_matrix(t.vector.x, t.vector.y, t.angle, 1.0 + t.zoom / 100.0, matrix);
+
+ // Transform the luma plane
+ avfilter_transform(in->data[0], out->data[0], in->linesize[0], out->linesize[0], link->w, link->h, matrix, INTERPOLATE_BILINEAR, deshake->edge);
+
+ // Generate a chroma transformation matrix
+ avfilter_get_matrix(t.vector.x / (link->w / CHROMA_WIDTH(link)), t.vector.y / (link->h / CHROMA_HEIGHT(link)), t.angle, 1.0 + t.zoom / 100.0, matrix);
+
+ // Transform the chroma planes
+ avfilter_transform(in->data[1], out->data[1], in->linesize[1], out->linesize[1], CHROMA_WIDTH(link), CHROMA_HEIGHT(link), matrix, INTERPOLATE_BILINEAR, deshake->edge);
+ avfilter_transform(in->data[2], out->data[2], in->linesize[2], out->linesize[2], CHROMA_WIDTH(link), CHROMA_HEIGHT(link), matrix, INTERPOLATE_BILINEAR, deshake->edge);
+
+ // Store the current frame as the reference frame for calculating the
+ // motion of the next frame
+ if (deshake->ref != NULL)
+ avfilter_unref_buffer(deshake->ref);
+
+ // Cleanup the old reference frame
+ deshake->ref = in;
+
+ // Draw the transformed frame information
+ ff_draw_slice(link->dst->outputs[0], 0, link->h, 1);
- static void draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
++ return ff_end_frame(link->dst->outputs[0]);
+}
+
++static int draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
+{
++ return 0;
+}
+
+AVFilter avfilter_vf_deshake = {
+ .name = "deshake",
+ .description = NULL_IF_CONFIG_SMALL("Stabilize shaky video."),
+
+ .priv_size = sizeof(DeshakeContext),
+
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+
+ .inputs = (const AVFilterPad[]) {{ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .draw_slice = draw_slice,
+ .end_frame = end_frame,
+ .config_props = config_props,
+ .min_perms = AV_PERM_READ, },
+ { .name = NULL}},
+
+ .outputs = (const AVFilterPad[]) {{ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO, },
+ { .name = NULL}},
+};
return 0;
}
- static void null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { }
+ static int null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
+ {
+ return 0;
+ }
- static void end_frame(AVFilterLink *inlink)
-static inline int normalize_double(int *n, double d)
-{
- int ret = 0;
-
- if (isnan(d)) {
- ret = AVERROR(EINVAL);
- } else if (d > INT_MAX || d < INT_MIN) {
- *n = d > INT_MAX ? INT_MAX : INT_MIN;
- ret = AVERROR(EINVAL);
- } else
- *n = round(d);
-
- return ret;
-}
-
-static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
++static int end_frame(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
DrawTextContext *dtext = ctx->priv;
- AVFilterBufferRef *buf_out;
- int ret = 0;
-
- if ((ret = dtext_prepare_text(ctx)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Can't draw text\n");
- return ret;
- }
-
- dtext->var_values[VAR_T] = inpicref->pts == AV_NOPTS_VALUE ?
- NAN : inpicref->pts * av_q2d(inlink->time_base);
- dtext->var_values[VAR_X] =
- av_expr_eval(dtext->x_pexpr, dtext->var_values, &dtext->prng);
- dtext->var_values[VAR_Y] =
- av_expr_eval(dtext->y_pexpr, dtext->var_values, &dtext->prng);
- dtext->var_values[VAR_X] =
- av_expr_eval(dtext->x_pexpr, dtext->var_values, &dtext->prng);
-
- dtext->draw = av_expr_eval(dtext->d_pexpr, dtext->var_values, &dtext->prng);
-
- normalize_double(&dtext->x, dtext->var_values[VAR_X]);
- normalize_double(&dtext->y, dtext->var_values[VAR_Y]);
-
- if (dtext->fix_bounds) {
- if (dtext->x < 0) dtext->x = 0;
- if (dtext->y < 0) dtext->y = 0;
- if ((unsigned)dtext->x + (unsigned)dtext->w > inlink->w)
- dtext->x = inlink->w - dtext->w;
- if ((unsigned)dtext->y + (unsigned)dtext->h > inlink->h)
- dtext->y = inlink->h - dtext->h;
- }
-
- dtext->x &= ~((1 << dtext->hsub) - 1);
- dtext->y &= ~((1 << dtext->vsub) - 1);
-
- av_dlog(ctx, "n:%d t:%f x:%d y:%d x+w:%d y+h:%d\n",
- (int)dtext->var_values[VAR_N], dtext->var_values[VAR_T],
- dtext->x, dtext->y, dtext->x+dtext->w, dtext->y+dtext->h);
-
- buf_out = avfilter_ref_buffer(inpicref, ~0);
- if (!buf_out)
- return AVERROR(ENOMEM);
-
- return ff_start_frame(inlink->dst->outputs[0], buf_out);
-}
-
-static int end_frame(AVFilterLink *inlink)
-{
- AVFilterLink *outlink = inlink->dst->outputs[0];
AVFilterBufferRef *picref = inlink->cur_buf;
- DrawTextContext *dtext = inlink->dst->priv;
+ int ret;
- if (dtext->draw)
- draw_text(inlink->dst, picref, picref->video->w, picref->video->h);
+ dtext->var_values[VAR_T] = picref->pts == AV_NOPTS_VALUE ?
+ NAN : picref->pts * av_q2d(inlink->time_base);
+
+ draw_text(ctx, picref, picref->video->w, picref->video->h);
+
+ av_log(ctx, AV_LOG_DEBUG, "n:%d t:%f text_w:%d text_h:%d x:%d y:%d\n",
+ (int)dtext->var_values[VAR_N], dtext->var_values[VAR_T],
+ (int)dtext->var_values[VAR_TEXT_W], (int)dtext->var_values[VAR_TEXT_H],
+ dtext->x, dtext->y);
dtext->var_values[VAR_N] += 1.0;
return 0;
}
- static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
+static void fade_plane(int y, int h, int w,
+ int fade_factor, int black_level, int black_level_scaled,
+ uint8_t offset, uint8_t step, int bytes_per_plane,
+ uint8_t *data, int line_size)
+{
+ uint8_t *p;
+ int i, j;
+
+ /* luma, alpha or rgb plane */
+ for (i = 0; i < h; i++) {
+ p = data + offset + (y+i) * line_size;
+ for (j = 0; j < w * bytes_per_plane; j++) {
+ /* fade->factor is using 16 lower-order bits for decimal places. */
+ *p = ((*p - black_level) * fade_factor + black_level_scaled) >> 16;
+ p+=step;
+ }
+ }
+}
+
+ static int draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
{
FadeContext *fade = inlink->dst->priv;
AVFilterBufferRef *outpic = inlink->cur_buf;
{
Frei0rContext *frei0r = outlink->src->priv;
AVFilterBufferRef *picref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
- picref->video->pixel_aspect = (AVRational) {1, 1};
+ AVFilterBufferRef *buf_out;
+ int ret;
+
+ if (!picref)
+ return AVERROR(ENOMEM);
+
+ picref->video->sample_aspect_ratio = (AVRational) {1, 1};
picref->pts = frei0r->pts++;
picref->pos = -1;
return 0;
}
- static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
++static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
+{
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+
+ outlink->out_buf =
+ ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
+ avfilter_copy_buffer_ref_props(outlink->out_buf, picref);
+
+ /* copy palette if required */
+ if (av_pix_fmt_descriptors[inlink->format].flags & PIX_FMT_PAL)
+ memcpy(inlink->dst->outputs[0]->out_buf->data[1], picref->data[1], AVPALETTE_SIZE);
+
- ff_start_frame(outlink, avfilter_ref_buffer(outlink->out_buf, ~0));
++ return ff_start_frame(outlink, avfilter_ref_buffer(outlink->out_buf, ~0));
+}
+
- static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
+ static int draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
{
FlipContext *flip = inlink->dst->priv;
AVFilterBufferRef *inpic = inlink->cur_buf;
--- /dev/null
- static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
+/*
+ * Copyright (C) 2012 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/cpu.h"
+#include "libavutil/common.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "internal.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+#define HIST_SIZE 4
+
+typedef enum {
+ TFF,
+ BFF,
+ PROGRSSIVE,
+ UNDETERMINED,
+} Type;
+
+typedef struct {
+ float interlace_threshold;
+ float progressive_threshold;
+
+ Type last_type;
+ Type prestat[4];
+ Type poststat[4];
+
+ uint8_t history[HIST_SIZE];
+
+ AVFilterBufferRef *cur;
+ AVFilterBufferRef *next;
+ AVFilterBufferRef *prev;
+ AVFilterBufferRef *out;
+ int (*filter_line)(const uint8_t *prev, const uint8_t *cur, const uint8_t *next, int w);
+
+ const AVPixFmtDescriptor *csp;
+} IDETContext;
+
+static const char *type2str(Type type)
+{
+ switch(type) {
+ case TFF : return "Top Field First ";
+ case BFF : return "Bottom Field First";
+ case PROGRSSIVE : return "Progressive ";
+ case UNDETERMINED: return "Undetermined ";
+ }
+ return NULL;
+}
+
+static int filter_line_c(const uint8_t *a, const uint8_t *b, const uint8_t *c, int w)
+{
+ int x;
+ int ret=0;
+
+ for(x=0; x<w; x++){
+ ret += FFABS((*a++ + *c++) - 2 * *b++);
+ }
+
+ return ret;
+}
+
+static int filter_line_c_16bit(const uint16_t *a, const uint16_t *b, const uint16_t *c, int w)
+{
+ int x;
+ int ret=0;
+
+ for(x=0; x<w; x++){
+ ret += FFABS((*a++ + *c++) - 2 * *b++);
+ }
+
+ return ret;
+}
+
+static void filter(AVFilterContext *ctx)
+{
+ IDETContext *idet = ctx->priv;
+ int y, i;
+ int64_t alpha[2]={0};
+ int64_t delta=0;
+ Type type, best_type;
+ int match = 0;
+
+ for (i = 0; i < idet->csp->nb_components; i++) {
+ int w = idet->cur->video->w;
+ int h = idet->cur->video->h;
+ int refs = idet->cur->linesize[i];
+
+ if (i && i<3) {
+ w >>= idet->csp->log2_chroma_w;
+ h >>= idet->csp->log2_chroma_h;
+ }
+
+ for (y = 2; y < h - 2; y++) {
+ uint8_t *prev = &idet->prev->data[i][y*refs];
+ uint8_t *cur = &idet->cur ->data[i][y*refs];
+ uint8_t *next = &idet->next->data[i][y*refs];
+ alpha[ y &1] += idet->filter_line(cur-refs, prev, cur+refs, w);
+ alpha[(y^1)&1] += idet->filter_line(cur-refs, next, cur+refs, w);
+ delta += idet->filter_line(cur-refs, cur, cur+refs, w);
+ }
+ }
+#if HAVE_MMX
+ __asm__ volatile("emms \n\t" : : : "memory");
+#endif
+
+ if (alpha[0] / (float)alpha[1] > idet->interlace_threshold){
+ type = TFF;
+ }else if(alpha[1] / (float)alpha[0] > idet->interlace_threshold){
+ type = BFF;
+ }else if(alpha[1] / (float)delta > idet->progressive_threshold){
+ type = PROGRSSIVE;
+ }else{
+ type = UNDETERMINED;
+ }
+
+ memmove(idet->history+1, idet->history, HIST_SIZE-1);
+ idet->history[0] = type;
+ best_type = UNDETERMINED;
+ for(i=0; i<HIST_SIZE; i++){
+ if(idet->history[i] != UNDETERMINED){
+ if(best_type == UNDETERMINED)
+ best_type = idet->history[i];
+
+ if(idet->history[i] == best_type) {
+ match++;
+ }else{
+ match=0;
+ break;
+ }
+ }
+ }
+ if(idet->last_type == UNDETERMINED){
+ if(match ) idet->last_type = best_type;
+ }else{
+ if(match>2) idet->last_type = best_type;
+ }
+
+ if (idet->last_type == TFF){
+ idet->cur->video->top_field_first = 1;
+ idet->cur->video->interlaced = 1;
+ }else if(idet->last_type == BFF){
+ idet->cur->video->top_field_first = 0;
+ idet->cur->video->interlaced = 1;
+ }else if(idet->last_type == PROGRSSIVE){
+ idet->cur->video->interlaced = 0;
+ }
+
+ idet->prestat [ type] ++;
+ idet->poststat[idet->last_type] ++;
+ av_log(ctx, AV_LOG_DEBUG, "Single frame:%s, Multi frame:%s\n", type2str(type), type2str(idet->last_type));
+}
+
- return;
++static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
+{
+ AVFilterContext *ctx = link->dst;
+ IDETContext *idet = ctx->priv;
+
+ if (idet->prev)
+ avfilter_unref_buffer(idet->prev);
+ idet->prev = idet->cur;
+ idet->cur = idet->next;
+ idet->next = picref;
+
+ if (!idet->cur)
- ff_start_frame(ctx->outputs[0], avfilter_ref_buffer(idet->cur, AV_PERM_READ));
++ return 0;
+
+ if (!idet->prev)
+ idet->prev = avfilter_ref_buffer(idet->cur, AV_PERM_READ);
+
- static void end_frame(AVFilterLink *link)
++ return ff_start_frame(ctx->outputs[0], avfilter_ref_buffer(idet->cur, AV_PERM_READ));
+}
+
- return;
++static int end_frame(AVFilterLink *link)
+{
+ AVFilterContext *ctx = link->dst;
+ IDETContext *idet = ctx->priv;
+
+ if (!idet->cur)
- ff_end_frame(ctx->outputs[0]);
++ return 0;
+
+ if (!idet->csp)
+ idet->csp = &av_pix_fmt_descriptors[link->format];
+ if (idet->csp->comp[0].depth_minus1 / 8 == 1)
+ idet->filter_line = (void*)filter_line_c_16bit;
+
+ filter(ctx);
+
+ ff_draw_slice(ctx->outputs[0], 0, link->h, 1);
- static void null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { }
++ return ff_end_frame(ctx->outputs[0]);
+}
+
+static int request_frame(AVFilterLink *link)
+{
+ AVFilterContext *ctx = link->src;
+ IDETContext *idet = ctx->priv;
+
+ do {
+ int ret;
+
+ if ((ret = ff_request_frame(link->src->inputs[0])))
+ return ret;
+ } while (!idet->cur);
+
+ return 0;
+}
+
+static int poll_frame(AVFilterLink *link)
+{
+ IDETContext *idet = link->src->priv;
+ int ret, val;
+
+ val = ff_poll_frame(link->src->inputs[0]);
+
+ if (val >= 1 && !idet->next) { //FIXME change API to not requre this red tape
+ if ((ret = ff_request_frame(link->src->inputs[0])) < 0)
+ return ret;
+ val = ff_poll_frame(link->src->inputs[0]);
+ }
+ assert(idet->next || !val);
+
+ return val;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ IDETContext *idet = ctx->priv;
+
+ av_log(ctx, AV_LOG_INFO, "Single frame detection: TFF:%d BFF:%d Progressive:%d Undetermined:%d\n",
+ idet->prestat[TFF],
+ idet->prestat[BFF],
+ idet->prestat[PROGRSSIVE],
+ idet->prestat[UNDETERMINED]
+ );
+ av_log(ctx, AV_LOG_INFO, "Multi frame detection: TFF:%d BFF:%d Progressive:%d Undetermined:%d\n",
+ idet->poststat[TFF],
+ idet->poststat[BFF],
+ idet->poststat[PROGRSSIVE],
+ idet->poststat[UNDETERMINED]
+ );
+
+ if (idet->prev) avfilter_unref_buffer(idet->prev);
+ if (idet->cur ) avfilter_unref_buffer(idet->cur );
+ if (idet->next) avfilter_unref_buffer(idet->next);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum PixelFormat pix_fmts[] = {
+ PIX_FMT_YUV420P,
+ PIX_FMT_YUV422P,
+ PIX_FMT_YUV444P,
+ PIX_FMT_YUV410P,
+ PIX_FMT_YUV411P,
+ PIX_FMT_GRAY8,
+ PIX_FMT_YUVJ420P,
+ PIX_FMT_YUVJ422P,
+ PIX_FMT_YUVJ444P,
+ AV_NE( PIX_FMT_GRAY16BE, PIX_FMT_GRAY16LE ),
+ PIX_FMT_YUV440P,
+ PIX_FMT_YUVJ440P,
+ AV_NE( PIX_FMT_YUV420P10BE, PIX_FMT_YUV420P10LE ),
+ AV_NE( PIX_FMT_YUV422P10BE, PIX_FMT_YUV422P10LE ),
+ AV_NE( PIX_FMT_YUV444P10BE, PIX_FMT_YUV444P10LE ),
+ AV_NE( PIX_FMT_YUV420P16BE, PIX_FMT_YUV420P16LE ),
+ AV_NE( PIX_FMT_YUV422P16BE, PIX_FMT_YUV422P16LE ),
+ AV_NE( PIX_FMT_YUV444P16BE, PIX_FMT_YUV444P16LE ),
+ PIX_FMT_YUVA420P,
+ PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+
+ return 0;
+}
+
+static av_cold int init(AVFilterContext *ctx, const char *args)
+{
+ IDETContext *idet = ctx->priv;
+
+ idet->csp = NULL;
+
+ idet->interlace_threshold = 1.01;
+ idet->progressive_threshold = 2.5;
+
+ if (args) sscanf(args, "%f:%f", &idet->interlace_threshold, &idet->progressive_threshold);
+
+ idet->last_type = UNDETERMINED;
+ memset(idet->history, UNDETERMINED, HIST_SIZE);
+
+ idet->filter_line = filter_line_c;
+
+ return 0;
+}
+
++static int null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { return 0; }
+
+AVFilter avfilter_vf_idet = {
+ .name = "idet",
+ .description = NULL_IF_CONFIG_SMALL("Interlace detect Filter."),
+
+ .priv_size = sizeof(IDETContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+
+ .inputs = (const AVFilterPad[]) {{ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .start_frame = start_frame,
+ .draw_slice = null_draw_slice,
+ .end_frame = end_frame,
+ .rej_perms = AV_PERM_REUSE2, },
+ { .name = NULL}},
+
+ .outputs = (const AVFilterPad[]) {{ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .poll_frame = poll_frame,
+ .request_frame = request_frame, },
+ { .name = NULL}},
+};
--- /dev/null
- static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
+/*
+ * Copyright (c) 2011 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Parts of this file have been stolen from mplayer
+ */
+
+/**
+ * @file
+ */
+
+#include "avfilter.h"
+#include "video.h"
+#include "formats.h"
+#include "internal.h"
+#include "libavutil/avassert.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/imgutils.h"
+
+#include "libmpcodecs/vf.h"
+#include "libmpcodecs/img_format.h"
+#include "libmpcodecs/cpudetect.h"
+#include "libmpcodecs/vd_ffmpeg.h"
+#include "libmpcodecs/vf_scale.h"
+#include "libmpcodecs/libvo/fastmemcpy.h"
+
+#include "libswscale/swscale.h"
+
+
+//FIXME maybe link the orig in
+//XXX: identical pix_fmt must be following with each others
+static const struct {
+ int fmt;
+ enum PixelFormat pix_fmt;
+} conversion_map[] = {
+ {IMGFMT_ARGB, PIX_FMT_ARGB},
+ {IMGFMT_BGRA, PIX_FMT_BGRA},
+ {IMGFMT_BGR24, PIX_FMT_BGR24},
+ {IMGFMT_BGR16BE, PIX_FMT_RGB565BE},
+ {IMGFMT_BGR16LE, PIX_FMT_RGB565LE},
+ {IMGFMT_BGR15BE, PIX_FMT_RGB555BE},
+ {IMGFMT_BGR15LE, PIX_FMT_RGB555LE},
+ {IMGFMT_BGR12BE, PIX_FMT_RGB444BE},
+ {IMGFMT_BGR12LE, PIX_FMT_RGB444LE},
+ {IMGFMT_BGR8, PIX_FMT_RGB8},
+ {IMGFMT_BGR4, PIX_FMT_RGB4},
+ {IMGFMT_BGR1, PIX_FMT_MONOBLACK},
+ {IMGFMT_RGB1, PIX_FMT_MONOBLACK},
+ {IMGFMT_RG4B, PIX_FMT_BGR4_BYTE},
+ {IMGFMT_BG4B, PIX_FMT_RGB4_BYTE},
+ {IMGFMT_RGB48LE, PIX_FMT_RGB48LE},
+ {IMGFMT_RGB48BE, PIX_FMT_RGB48BE},
+ {IMGFMT_ABGR, PIX_FMT_ABGR},
+ {IMGFMT_RGBA, PIX_FMT_RGBA},
+ {IMGFMT_RGB24, PIX_FMT_RGB24},
+ {IMGFMT_RGB16BE, PIX_FMT_BGR565BE},
+ {IMGFMT_RGB16LE, PIX_FMT_BGR565LE},
+ {IMGFMT_RGB15BE, PIX_FMT_BGR555BE},
+ {IMGFMT_RGB15LE, PIX_FMT_BGR555LE},
+ {IMGFMT_RGB12BE, PIX_FMT_BGR444BE},
+ {IMGFMT_RGB12LE, PIX_FMT_BGR444LE},
+ {IMGFMT_RGB8, PIX_FMT_BGR8},
+ {IMGFMT_RGB4, PIX_FMT_BGR4},
+ {IMGFMT_BGR8, PIX_FMT_PAL8},
+ {IMGFMT_YUY2, PIX_FMT_YUYV422},
+ {IMGFMT_UYVY, PIX_FMT_UYVY422},
+ {IMGFMT_NV12, PIX_FMT_NV12},
+ {IMGFMT_NV21, PIX_FMT_NV21},
+ {IMGFMT_Y800, PIX_FMT_GRAY8},
+ {IMGFMT_Y8, PIX_FMT_GRAY8},
+ {IMGFMT_YVU9, PIX_FMT_YUV410P},
+ {IMGFMT_IF09, PIX_FMT_YUV410P},
+ {IMGFMT_YV12, PIX_FMT_YUV420P},
+ {IMGFMT_I420, PIX_FMT_YUV420P},
+ {IMGFMT_IYUV, PIX_FMT_YUV420P},
+ {IMGFMT_411P, PIX_FMT_YUV411P},
+ {IMGFMT_422P, PIX_FMT_YUV422P},
+ {IMGFMT_444P, PIX_FMT_YUV444P},
+ {IMGFMT_440P, PIX_FMT_YUV440P},
+
+ {IMGFMT_420A, PIX_FMT_YUVA420P},
+
+ {IMGFMT_420P16_LE, PIX_FMT_YUV420P16LE},
+ {IMGFMT_420P16_BE, PIX_FMT_YUV420P16BE},
+ {IMGFMT_422P16_LE, PIX_FMT_YUV422P16LE},
+ {IMGFMT_422P16_BE, PIX_FMT_YUV422P16BE},
+ {IMGFMT_444P16_LE, PIX_FMT_YUV444P16LE},
+ {IMGFMT_444P16_BE, PIX_FMT_YUV444P16BE},
+
+ // YUVJ are YUV formats that use the full Y range and not just
+ // 16 - 235 (see colorspaces.txt).
+ // Currently they are all treated the same way.
+ {IMGFMT_YV12, PIX_FMT_YUVJ420P},
+ {IMGFMT_422P, PIX_FMT_YUVJ422P},
+ {IMGFMT_444P, PIX_FMT_YUVJ444P},
+ {IMGFMT_440P, PIX_FMT_YUVJ440P},
+
+ {IMGFMT_XVMC_MOCO_MPEG2, PIX_FMT_XVMC_MPEG2_MC},
+ {IMGFMT_XVMC_IDCT_MPEG2, PIX_FMT_XVMC_MPEG2_IDCT},
+ {IMGFMT_VDPAU_MPEG1, PIX_FMT_VDPAU_MPEG1},
+ {IMGFMT_VDPAU_MPEG2, PIX_FMT_VDPAU_MPEG2},
+ {IMGFMT_VDPAU_H264, PIX_FMT_VDPAU_H264},
+ {IMGFMT_VDPAU_WMV3, PIX_FMT_VDPAU_WMV3},
+ {IMGFMT_VDPAU_VC1, PIX_FMT_VDPAU_VC1},
+ {IMGFMT_VDPAU_MPEG4, PIX_FMT_VDPAU_MPEG4},
+ {0, PIX_FMT_NONE}
+};
+
+//copied from vf.c
+extern const vf_info_t vf_info_1bpp;
+extern const vf_info_t vf_info_ass;
+extern const vf_info_t vf_info_bmovl;
+extern const vf_info_t vf_info_crop;
+extern const vf_info_t vf_info_decimate;
+extern const vf_info_t vf_info_denoise3d;
+extern const vf_info_t vf_info_detc;
+extern const vf_info_t vf_info_dint;
+extern const vf_info_t vf_info_divtc;
+extern const vf_info_t vf_info_down3dright;
+extern const vf_info_t vf_info_dsize;
+extern const vf_info_t vf_info_dvbscale;
+extern const vf_info_t vf_info_eq2;
+extern const vf_info_t vf_info_eq;
+extern const vf_info_t vf_info_expand;
+extern const vf_info_t vf_info_field;
+extern const vf_info_t vf_info_fil;
+extern const vf_info_t vf_info_filmdint;
+extern const vf_info_t vf_info_fixpts;
+extern const vf_info_t vf_info_flip;
+extern const vf_info_t vf_info_format;
+extern const vf_info_t vf_info_framestep;
+extern const vf_info_t vf_info_fspp;
+extern const vf_info_t vf_info_geq;
+extern const vf_info_t vf_info_halfpack;
+extern const vf_info_t vf_info_harddup;
+extern const vf_info_t vf_info_hqdn3d;
+extern const vf_info_t vf_info_hue;
+extern const vf_info_t vf_info_il;
+extern const vf_info_t vf_info_ilpack;
+extern const vf_info_t vf_info_ivtc;
+extern const vf_info_t vf_info_kerndeint;
+extern const vf_info_t vf_info_lavc;
+extern const vf_info_t vf_info_lavcdeint;
+extern const vf_info_t vf_info_mcdeint;
+extern const vf_info_t vf_info_noformat;
+extern const vf_info_t vf_info_noise;
+extern const vf_info_t vf_info_ow;
+extern const vf_info_t vf_info_palette;
+extern const vf_info_t vf_info_perspective;
+extern const vf_info_t vf_info_phase;
+extern const vf_info_t vf_info_pp7;
+extern const vf_info_t vf_info_pp;
+extern const vf_info_t vf_info_pullup;
+extern const vf_info_t vf_info_qp;
+extern const vf_info_t vf_info_rectangle;
+extern const vf_info_t vf_info_rotate;
+extern const vf_info_t vf_info_sab;
+extern const vf_info_t vf_info_scale;
+extern const vf_info_t vf_info_smartblur;
+extern const vf_info_t vf_info_softpulldown;
+extern const vf_info_t vf_info_softskip;
+extern const vf_info_t vf_info_spp;
+extern const vf_info_t vf_info_stereo3d;
+extern const vf_info_t vf_info_telecine;
+extern const vf_info_t vf_info_test;
+extern const vf_info_t vf_info_tfields;
+extern const vf_info_t vf_info_tile;
+extern const vf_info_t vf_info_tinterlace;
+extern const vf_info_t vf_info_unsharp;
+extern const vf_info_t vf_info_uspp;
+extern const vf_info_t vf_info_vo;
+extern const vf_info_t vf_info_yadif;
+extern const vf_info_t vf_info_yuvcsp;
+extern const vf_info_t vf_info_yvu9;
+extern const vf_info_t vf_info_zrmjpeg;
+
+
+static const vf_info_t* const filters[]={
+ &vf_info_decimate,
+ &vf_info_denoise3d,
+ &vf_info_detc,
+ &vf_info_dint,
+ &vf_info_divtc,
+ &vf_info_down3dright,
+ &vf_info_dsize,
+ &vf_info_eq2,
+ &vf_info_eq,
+ &vf_info_field,
+ &vf_info_fil,
+// &vf_info_filmdint, cmmx.h vd.h ‘opt_screen_size_x’
+ &vf_info_fixpts,
+ &vf_info_framestep,
+ &vf_info_fspp,
+ &vf_info_geq,
+ &vf_info_harddup,
+ &vf_info_hqdn3d,
+ &vf_info_hue,
+ &vf_info_il,
+ &vf_info_ilpack,
+ &vf_info_ivtc,
+ &vf_info_kerndeint,
+ &vf_info_mcdeint,
+ &vf_info_noise,
+ &vf_info_ow,
+ &vf_info_palette,
+ &vf_info_perspective,
+ &vf_info_phase,
+ &vf_info_pp,
+ &vf_info_pp7,
+ &vf_info_pullup,
+ &vf_info_qp,
+ &vf_info_rectangle,
+ &vf_info_rotate,
+ &vf_info_sab,
+ &vf_info_smartblur,
+ &vf_info_softpulldown,
+ &vf_info_softskip,
+ &vf_info_spp,
+ &vf_info_stereo3d,
+ &vf_info_telecine,
+ &vf_info_tile,
+ &vf_info_tinterlace,
+ &vf_info_unsharp,
+ &vf_info_uspp,
+ &vf_info_yuvcsp,
+ &vf_info_yvu9,
+
+ NULL
+};
+
+/*
+Unsupported filters
+1bpp
+ass
+bmovl
+crop
+dvbscale
+flip
+expand
+format
+halfpack
+lavc
+lavcdeint
+noformat
+pp
+scale
+tfields
+vo
+yadif
+zrmjpeg
+*/
+
+CpuCaps gCpuCaps; //FIXME initialize this so optims work
+
+
+static void sws_getFlagsAndFilterFromCmdLine(int *flags, SwsFilter **srcFilterParam, SwsFilter **dstFilterParam)
+{
+ static int firstTime=1;
+ *flags=0;
+
+#if ARCH_X86
+ if(gCpuCaps.hasMMX)
+ __asm__ volatile("emms\n\t"::: "memory"); //FIXME this should not be required but it IS (even for non-MMX versions)
+#endif
+ if(firstTime)
+ {
+ firstTime=0;
+ *flags= SWS_PRINT_INFO;
+ }
+ else if( mp_msg_test(MSGT_VFILTER,MSGL_DBG2) ) *flags= SWS_PRINT_INFO;
+
+ switch(SWS_BILINEAR)
+ {
+ case 0: *flags|= SWS_FAST_BILINEAR; break;
+ case 1: *flags|= SWS_BILINEAR; break;
+ case 2: *flags|= SWS_BICUBIC; break;
+ case 3: *flags|= SWS_X; break;
+ case 4: *flags|= SWS_POINT; break;
+ case 5: *flags|= SWS_AREA; break;
+ case 6: *flags|= SWS_BICUBLIN; break;
+ case 7: *flags|= SWS_GAUSS; break;
+ case 8: *flags|= SWS_SINC; break;
+ case 9: *flags|= SWS_LANCZOS; break;
+ case 10:*flags|= SWS_SPLINE; break;
+ default:*flags|= SWS_BILINEAR; break;
+ }
+
+ *srcFilterParam= NULL;
+ *dstFilterParam= NULL;
+}
+
+//exact copy from vf_scale.c
+// will use sws_flags & src_filter (from cmd line)
+struct SwsContext *sws_getContextFromCmdLine(int srcW, int srcH, int srcFormat, int dstW, int dstH, int dstFormat)
+{
+ int flags, i;
+ SwsFilter *dstFilterParam, *srcFilterParam;
+ enum PixelFormat dfmt, sfmt;
+
+ for(i=0; conversion_map[i].fmt && dstFormat != conversion_map[i].fmt; i++);
+ dfmt= conversion_map[i].pix_fmt;
+ for(i=0; conversion_map[i].fmt && srcFormat != conversion_map[i].fmt; i++);
+ sfmt= conversion_map[i].pix_fmt;
+
+ if (srcFormat == IMGFMT_RGB8 || srcFormat == IMGFMT_BGR8) sfmt = PIX_FMT_PAL8;
+ sws_getFlagsAndFilterFromCmdLine(&flags, &srcFilterParam, &dstFilterParam);
+
+ return sws_getContext(srcW, srcH, sfmt, dstW, dstH, dfmt, flags , srcFilterParam, dstFilterParam, NULL);
+}
+
+typedef struct {
+ vf_instance_t vf;
+ vf_instance_t next_vf;
+ AVFilterContext *avfctx;
+ int frame_returned;
+} MPContext;
+
+void mp_msg(int mod, int lev, const char *format, ... ){
+ va_list va;
+ va_start(va, format);
+ //FIXME convert lev/mod
+ av_vlog(NULL, AV_LOG_DEBUG, format, va);
+ va_end(va);
+}
+
+int mp_msg_test(int mod, int lev){
+ return 123;
+}
+
+void init_avcodec(void)
+{
+ //we maybe should init but its kinda 1. unneeded 2. a bit inpolite from here
+}
+
+//Exact copy of vf.c
+void vf_clone_mpi_attributes(mp_image_t* dst, mp_image_t* src){
+ dst->pict_type= src->pict_type;
+ dst->fields = src->fields;
+ dst->qscale_type= src->qscale_type;
+ if(dst->width == src->width && dst->height == src->height){
+ dst->qstride= src->qstride;
+ dst->qscale= src->qscale;
+ }
+}
+
+//Exact copy of vf.c
+void vf_next_draw_slice(struct vf_instance *vf,unsigned char** src, int * stride,int w, int h, int x, int y){
+ if (vf->next->draw_slice) {
+ vf->next->draw_slice(vf->next,src,stride,w,h,x,y);
+ return;
+ }
+ if (!vf->dmpi) {
+ mp_msg(MSGT_VFILTER,MSGL_ERR,"draw_slice: dmpi not stored by vf_%s\n", vf->info->name);
+ return;
+ }
+ if (!(vf->dmpi->flags & MP_IMGFLAG_PLANAR)) {
+ memcpy_pic(vf->dmpi->planes[0]+y*vf->dmpi->stride[0]+vf->dmpi->bpp/8*x,
+ src[0], vf->dmpi->bpp/8*w, h, vf->dmpi->stride[0], stride[0]);
+ return;
+ }
+ memcpy_pic(vf->dmpi->planes[0]+y*vf->dmpi->stride[0]+x, src[0],
+ w, h, vf->dmpi->stride[0], stride[0]);
+ memcpy_pic(vf->dmpi->planes[1]+(y>>vf->dmpi->chroma_y_shift)*vf->dmpi->stride[1]+(x>>vf->dmpi->chroma_x_shift),
+ src[1], w>>vf->dmpi->chroma_x_shift, h>>vf->dmpi->chroma_y_shift, vf->dmpi->stride[1], stride[1]);
+ memcpy_pic(vf->dmpi->planes[2]+(y>>vf->dmpi->chroma_y_shift)*vf->dmpi->stride[2]+(x>>vf->dmpi->chroma_x_shift),
+ src[2], w>>vf->dmpi->chroma_x_shift, h>>vf->dmpi->chroma_y_shift, vf->dmpi->stride[2], stride[2]);
+}
+
+//Exact copy of vf.c
+void vf_mpi_clear(mp_image_t* mpi,int x0,int y0,int w,int h){
+ int y;
+ if(mpi->flags&MP_IMGFLAG_PLANAR){
+ y0&=~1;h+=h&1;
+ if(x0==0 && w==mpi->width){
+ // full width clear:
+ memset(mpi->planes[0]+mpi->stride[0]*y0,0,mpi->stride[0]*h);
+ memset(mpi->planes[1]+mpi->stride[1]*(y0>>mpi->chroma_y_shift),128,mpi->stride[1]*(h>>mpi->chroma_y_shift));
+ memset(mpi->planes[2]+mpi->stride[2]*(y0>>mpi->chroma_y_shift),128,mpi->stride[2]*(h>>mpi->chroma_y_shift));
+ } else
+ for(y=y0;y<y0+h;y+=2){
+ memset(mpi->planes[0]+x0+mpi->stride[0]*y,0,w);
+ memset(mpi->planes[0]+x0+mpi->stride[0]*(y+1),0,w);
+ memset(mpi->planes[1]+(x0>>mpi->chroma_x_shift)+mpi->stride[1]*(y>>mpi->chroma_y_shift),128,(w>>mpi->chroma_x_shift));
+ memset(mpi->planes[2]+(x0>>mpi->chroma_x_shift)+mpi->stride[2]*(y>>mpi->chroma_y_shift),128,(w>>mpi->chroma_x_shift));
+ }
+ return;
+ }
+ // packed:
+ for(y=y0;y<y0+h;y++){
+ unsigned char* dst=mpi->planes[0]+mpi->stride[0]*y+(mpi->bpp>>3)*x0;
+ if(mpi->flags&MP_IMGFLAG_YUV){
+ unsigned int* p=(unsigned int*) dst;
+ int size=(mpi->bpp>>3)*w/4;
+ int i;
+#if HAVE_BIGENDIAN
+#define CLEAR_PACKEDYUV_PATTERN 0x00800080
+#define CLEAR_PACKEDYUV_PATTERN_SWAPPED 0x80008000
+#else
+#define CLEAR_PACKEDYUV_PATTERN 0x80008000
+#define CLEAR_PACKEDYUV_PATTERN_SWAPPED 0x00800080
+#endif
+ if(mpi->flags&MP_IMGFLAG_SWAPPED){
+ for(i=0;i<size-3;i+=4) p[i]=p[i+1]=p[i+2]=p[i+3]=CLEAR_PACKEDYUV_PATTERN_SWAPPED;
+ for(;i<size;i++) p[i]=CLEAR_PACKEDYUV_PATTERN_SWAPPED;
+ } else {
+ for(i=0;i<size-3;i+=4) p[i]=p[i+1]=p[i+2]=p[i+3]=CLEAR_PACKEDYUV_PATTERN;
+ for(;i<size;i++) p[i]=CLEAR_PACKEDYUV_PATTERN;
+ }
+ } else
+ memset(dst,0,(mpi->bpp>>3)*w);
+ }
+}
+
+int vf_next_query_format(struct vf_instance *vf, unsigned int fmt){
+ return 1;
+}
+
+//used by delogo
+unsigned int vf_match_csp(vf_instance_t** vfp,const unsigned int* list,unsigned int preferred){
+ return preferred;
+}
+
+mp_image_t* vf_get_image(vf_instance_t* vf, unsigned int outfmt, int mp_imgtype, int mp_imgflag, int w, int h){
+ MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, next_vf));
+ mp_image_t* mpi=NULL;
+ int w2;
+ int number = mp_imgtype >> 16;
+
+ av_assert0(vf->next == NULL); // all existing filters call this just on next
+
+ //vf_dint needs these as it calls vf_get_image() before configuring the output
+ if(vf->w==0 && w>0) vf->w=w;
+ if(vf->h==0 && h>0) vf->h=h;
+
+ av_assert0(w == -1 || w >= vf->w);
+ av_assert0(h == -1 || h >= vf->h);
+ av_assert0(vf->w > 0);
+ av_assert0(vf->h > 0);
+
+ av_log(m->avfctx, AV_LOG_DEBUG, "get_image: %d:%d, vf: %d:%d\n", w,h,vf->w,vf->h);
+
+ if (w == -1) w = vf->w;
+ if (h == -1) h = vf->h;
+
+ w2=(mp_imgflag&MP_IMGFLAG_ACCEPT_ALIGNED_STRIDE)?((w+15)&(~15)):w;
+
+ // Note: we should call libvo first to check if it supports direct rendering
+ // and if not, then fallback to software buffers:
+ switch(mp_imgtype & 0xff){
+ case MP_IMGTYPE_EXPORT:
+ if(!vf->imgctx.export_images[0]) vf->imgctx.export_images[0]=new_mp_image(w2,h);
+ mpi=vf->imgctx.export_images[0];
+ break;
+ case MP_IMGTYPE_STATIC:
+ if(!vf->imgctx.static_images[0]) vf->imgctx.static_images[0]=new_mp_image(w2,h);
+ mpi=vf->imgctx.static_images[0];
+ break;
+ case MP_IMGTYPE_TEMP:
+ if(!vf->imgctx.temp_images[0]) vf->imgctx.temp_images[0]=new_mp_image(w2,h);
+ mpi=vf->imgctx.temp_images[0];
+ break;
+ case MP_IMGTYPE_IPB:
+ if(!(mp_imgflag&MP_IMGFLAG_READABLE)){ // B frame:
+ if(!vf->imgctx.temp_images[0]) vf->imgctx.temp_images[0]=new_mp_image(w2,h);
+ mpi=vf->imgctx.temp_images[0];
+ break;
+ }
+ case MP_IMGTYPE_IP:
+ if(!vf->imgctx.static_images[vf->imgctx.static_idx]) vf->imgctx.static_images[vf->imgctx.static_idx]=new_mp_image(w2,h);
+ mpi=vf->imgctx.static_images[vf->imgctx.static_idx];
+ vf->imgctx.static_idx^=1;
+ break;
+ case MP_IMGTYPE_NUMBERED:
+ if (number == -1) {
+ int i;
+ for (i = 0; i < NUM_NUMBERED_MPI; i++)
+ if (!vf->imgctx.numbered_images[i] || !vf->imgctx.numbered_images[i]->usage_count)
+ break;
+ number = i;
+ }
+ if (number < 0 || number >= NUM_NUMBERED_MPI) return NULL;
+ if (!vf->imgctx.numbered_images[number]) vf->imgctx.numbered_images[number] = new_mp_image(w2,h);
+ mpi = vf->imgctx.numbered_images[number];
+ mpi->number = number;
+ break;
+ }
+ if(mpi){
+ mpi->type=mp_imgtype;
+ mpi->w=vf->w; mpi->h=vf->h;
+ // keep buffer allocation status & color flags only:
+// mpi->flags&=~(MP_IMGFLAG_PRESERVE|MP_IMGFLAG_READABLE|MP_IMGFLAG_DIRECT);
+ mpi->flags&=MP_IMGFLAG_ALLOCATED|MP_IMGFLAG_TYPE_DISPLAYED|MP_IMGFLAGMASK_COLORS;
+ // accept restrictions, draw_slice and palette flags only:
+ mpi->flags|=mp_imgflag&(MP_IMGFLAGMASK_RESTRICTIONS|MP_IMGFLAG_DRAW_CALLBACK|MP_IMGFLAG_RGB_PALETTE);
+ if(!vf->draw_slice) mpi->flags&=~MP_IMGFLAG_DRAW_CALLBACK;
+ if(mpi->width!=w2 || mpi->height!=h){
+// printf("vf.c: MPI parameters changed! %dx%d -> %dx%d \n", mpi->width,mpi->height,w2,h);
+ if(mpi->flags&MP_IMGFLAG_ALLOCATED){
+ if(mpi->width<w2 || mpi->height<h){
+ // need to re-allocate buffer memory:
+ av_free(mpi->planes[0]);
+ mpi->flags&=~MP_IMGFLAG_ALLOCATED;
+ mp_msg(MSGT_VFILTER,MSGL_V,"vf.c: have to REALLOCATE buffer memory :(\n");
+ }
+// } else {
+ } {
+ mpi->width=w2; mpi->chroma_width=(w2 + (1<<mpi->chroma_x_shift) - 1)>>mpi->chroma_x_shift;
+ mpi->height=h; mpi->chroma_height=(h + (1<<mpi->chroma_y_shift) - 1)>>mpi->chroma_y_shift;
+ }
+ }
+ if(!mpi->bpp) mp_image_setfmt(mpi,outfmt);
+ if(!(mpi->flags&MP_IMGFLAG_ALLOCATED) && mpi->type>MP_IMGTYPE_EXPORT){
+
+ av_assert0(!vf->get_image);
+ // check libvo first!
+ if(vf->get_image) vf->get_image(vf,mpi);
+
+ if(!(mpi->flags&MP_IMGFLAG_DIRECT)){
+ // non-direct and not yet allocated image. allocate it!
+ if (!mpi->bpp) { // no way we can allocate this
+ mp_msg(MSGT_DECVIDEO, MSGL_FATAL,
+ "vf_get_image: Tried to allocate a format that can not be allocated!\n");
+ return NULL;
+ }
+
+ // check if codec prefer aligned stride:
+ if(mp_imgflag&MP_IMGFLAG_PREFER_ALIGNED_STRIDE){
+ int align=(mpi->flags&MP_IMGFLAG_PLANAR &&
+ mpi->flags&MP_IMGFLAG_YUV) ?
+ (8<<mpi->chroma_x_shift)-1 : 15; // -- maybe FIXME
+ w2=((w+align)&(~align));
+ if(mpi->width!=w2){
+#if 0
+ // we have to change width... check if we CAN co it:
+ int flags=vf->query_format(vf,outfmt); // should not fail
+ if(!(flags&3)) mp_msg(MSGT_DECVIDEO,MSGL_WARN,"??? vf_get_image{vf->query_format(outfmt)} failed!\n");
+// printf("query -> 0x%X \n",flags);
+ if(flags&VFCAP_ACCEPT_STRIDE){
+#endif
+ mpi->width=w2;
+ mpi->chroma_width=(w2 + (1<<mpi->chroma_x_shift) - 1)>>mpi->chroma_x_shift;
+// }
+ }
+ }
+
+ mp_image_alloc_planes(mpi);
+// printf("clearing img!\n");
+ vf_mpi_clear(mpi,0,0,mpi->width,mpi->height);
+ }
+ }
+ av_assert0(!vf->start_slice);
+ if(mpi->flags&MP_IMGFLAG_DRAW_CALLBACK)
+ if(vf->start_slice) vf->start_slice(vf,mpi);
+ if(!(mpi->flags&MP_IMGFLAG_TYPE_DISPLAYED)){
+ mp_msg(MSGT_DECVIDEO,MSGL_V,"*** [%s] %s%s mp_image_t, %dx%dx%dbpp %s %s, %d bytes\n",
+ "NULL"/*vf->info->name*/,
+ (mpi->type==MP_IMGTYPE_EXPORT)?"Exporting":
+ ((mpi->flags&MP_IMGFLAG_DIRECT)?"Direct Rendering":"Allocating"),
+ (mpi->flags&MP_IMGFLAG_DRAW_CALLBACK)?" (slices)":"",
+ mpi->width,mpi->height,mpi->bpp,
+ (mpi->flags&MP_IMGFLAG_YUV)?"YUV":((mpi->flags&MP_IMGFLAG_SWAPPED)?"BGR":"RGB"),
+ (mpi->flags&MP_IMGFLAG_PLANAR)?"planar":"packed",
+ mpi->bpp*mpi->width*mpi->height/8);
+ mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"(imgfmt: %x, planes: %p,%p,%p strides: %d,%d,%d, chroma: %dx%d, shift: h:%d,v:%d)\n",
+ mpi->imgfmt, mpi->planes[0], mpi->planes[1], mpi->planes[2],
+ mpi->stride[0], mpi->stride[1], mpi->stride[2],
+ mpi->chroma_width, mpi->chroma_height, mpi->chroma_x_shift, mpi->chroma_y_shift);
+ mpi->flags|=MP_IMGFLAG_TYPE_DISPLAYED;
+ }
+
+ mpi->qscale = NULL;
+ }
+ mpi->usage_count++;
+// printf("\rVF_MPI: %p %p %p %d %d %d \n",
+// mpi->planes[0],mpi->planes[1],mpi->planes[2],
+// mpi->stride[0],mpi->stride[1],mpi->stride[2]);
+ return mpi;
+}
+
+
+int vf_next_put_image(struct vf_instance *vf,mp_image_t *mpi, double pts){
+ MPContext *m= (void*)vf;
+ AVFilterLink *outlink = m->avfctx->outputs[0];
+ AVFilterBuffer *pic = av_mallocz(sizeof(AVFilterBuffer));
+ AVFilterBufferRef *picref = av_mallocz(sizeof(AVFilterBufferRef));
+ int i;
+
+ av_assert0(vf->next);
+
+ av_log(m->avfctx, AV_LOG_DEBUG, "vf_next_put_image\n");
+
+ if (!pic || !picref)
+ goto fail;
+
+ picref->buf = pic;
+ picref->buf->please_use_av_free= (void*)av_free;
+ if (!(picref->video = av_mallocz(sizeof(AVFilterBufferRefVideoProps))))
+ goto fail;
+
+ pic->w = picref->video->w = mpi->w;
+ pic->h = picref->video->h = mpi->h;
+
+ /* make sure the buffer gets read permission or it's useless for output */
+ picref->perms = AV_PERM_READ | AV_PERM_REUSE2;
+// av_assert0(mpi->flags&MP_IMGFLAG_READABLE);
+ if(!(mpi->flags&MP_IMGFLAG_PRESERVE))
+ picref->perms |= AV_PERM_WRITE;
+
+ pic->refcount = 1;
+ picref->type = AVMEDIA_TYPE_VIDEO;
+
+ for(i=0; conversion_map[i].fmt && mpi->imgfmt != conversion_map[i].fmt; i++);
+ pic->format = picref->format = conversion_map[i].pix_fmt;
+
+ memcpy(pic->data, mpi->planes, FFMIN(sizeof(pic->data) , sizeof(mpi->planes)));
+ memcpy(pic->linesize, mpi->stride, FFMIN(sizeof(pic->linesize), sizeof(mpi->stride)));
+ memcpy(picref->data, pic->data, sizeof(picref->data));
+ memcpy(picref->linesize, pic->linesize, sizeof(picref->linesize));
+
+ if(pts != MP_NOPTS_VALUE)
+ picref->pts= pts * av_q2d(outlink->time_base);
+
+ ff_start_frame(outlink, avfilter_ref_buffer(picref, ~0));
+ ff_draw_slice(outlink, 0, picref->video->h, 1);
+ ff_end_frame(outlink);
+ avfilter_unref_buffer(picref);
+ m->frame_returned++;
+
+ return 1;
+fail:
+ if (picref && picref->video)
+ av_free(picref->video);
+ av_free(picref);
+ av_free(pic);
+ return 0;
+}
+
+int vf_next_config(struct vf_instance *vf,
+ int width, int height, int d_width, int d_height,
+ unsigned int voflags, unsigned int outfmt){
+
+ av_assert0(width>0 && height>0);
+ vf->next->w = width; vf->next->h = height;
+
+ return 1;
+#if 0
+ int flags=vf->next->query_format(vf->next,outfmt);
+ if(!flags){
+ // hmm. colorspace mismatch!!!
+ //this is fatal for us ATM
+ return 0;
+ }
+ mp_msg(MSGT_VFILTER,MSGL_V,"REQ: flags=0x%X req=0x%X \n",flags,vf->default_reqs);
+ miss=vf->default_reqs - (flags&vf->default_reqs);
+ if(miss&VFCAP_ACCEPT_STRIDE){
+ // vf requires stride support but vf->next doesn't support it!
+ // let's insert the 'expand' filter, it does the job for us:
+ vf_instance_t* vf2=vf_open_filter(vf->next,"expand",NULL);
+ if(!vf2) return 0; // shouldn't happen!
+ vf->next=vf2;
+ }
+ vf->next->w = width; vf->next->h = height;
+#endif
+ return 1;
+}
+
+int vf_next_control(struct vf_instance *vf, int request, void* data){
+ MPContext *m= (void*)vf;
+ av_log(m->avfctx, AV_LOG_DEBUG, "Received control %d\n", request);
+ return 0;
+}
+
+static int vf_default_query_format(struct vf_instance *vf, unsigned int fmt){
+ MPContext *m= (void*)vf;
+ int i;
+ av_log(m->avfctx, AV_LOG_DEBUG, "query %X\n", fmt);
+
+ for(i=0; conversion_map[i].fmt; i++){
+ if(fmt==conversion_map[i].fmt)
+ return 1; //we suport all
+ }
+ return 0;
+}
+
+
+static av_cold int init(AVFilterContext *ctx, const char *args)
+{
+ MPContext *m = ctx->priv;
+ char name[256];
+ int i;
+
+ m->avfctx= ctx;
+
+ if(!args || 1!=sscanf(args, "%255[^:=]", name)){
+ av_log(ctx, AV_LOG_ERROR, "Invalid parameter.\n");
+ return AVERROR(EINVAL);
+ }
+ args+= strlen(name)+1;
+
+ for(i=0; ;i++){
+ if(!filters[i] || !strcmp(name, filters[i]->name))
+ break;
+ }
+
+ if(!filters[i]){
+ av_log(ctx, AV_LOG_ERROR, "Unknown filter %s\n", name);
+ return AVERROR(EINVAL);
+ }
+
+ av_log(ctx, AV_LOG_WARNING,
+ "'%s' is a wrapped MPlayer filter (libmpcodecs). This filter may be removed\n"
+ "once it has been ported to a native libavfilter.\n", name);
+
+ memset(&m->vf,0,sizeof(m->vf));
+ m->vf.info= filters[i];
+
+ m->vf.next = &m->next_vf;
+ m->vf.put_image = vf_next_put_image;
+ m->vf.config = vf_next_config;
+ m->vf.query_format= vf_default_query_format;
+ m->vf.control = vf_next_control;
+ m->vf.default_caps=VFCAP_ACCEPT_STRIDE;
+ m->vf.default_reqs=0;
+ if(m->vf.info->opts)
+ av_log(ctx, AV_LOG_ERROR, "opts / m_struct_set is unsupported\n");
+#if 0
+ if(vf->info->opts) { // vf_vo get some special argument
+ const m_struct_t* st = vf->info->opts;
+ void* vf_priv = m_struct_alloc(st);
+ int n;
+ for(n = 0 ; args && args[2*n] ; n++)
+ m_struct_set(st,vf_priv,args[2*n],args[2*n+1]);
+ vf->priv = vf_priv;
+ args = NULL;
+ } else // Otherwise we should have the '_oldargs_'
+ if(args && !strcmp(args[0],"_oldargs_"))
+ args = (char**)args[1];
+ else
+ args = NULL;
+#endif
+ if(m->vf.info->vf_open(&m->vf, args)<=0){
+ av_log(ctx, AV_LOG_ERROR, "vf_open() of %s with arg=%s failed\n", name, args);
+ return -1;
+ }
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ MPContext *m = ctx->priv;
+ vf_instance_t *vf = &m->vf;
+
+ while(vf){
+ vf_instance_t *next = vf->next;
+ if(vf->uninit)
+ vf->uninit(vf);
+ free_mp_image(vf->imgctx.static_images[0]);
+ free_mp_image(vf->imgctx.static_images[1]);
+ free_mp_image(vf->imgctx.temp_images[0]);
+ free_mp_image(vf->imgctx.export_images[0]);
+ vf = next;
+ }
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *avfmts=NULL;
+ MPContext *m = ctx->priv;
+ enum PixelFormat lastpixfmt = PIX_FMT_NONE;
+ int i;
+
+ for(i=0; conversion_map[i].fmt; i++){
+ av_log(ctx, AV_LOG_DEBUG, "query: %X\n", conversion_map[i].fmt);
+ if(m->vf.query_format(&m->vf, conversion_map[i].fmt)){
+ av_log(ctx, AV_LOG_DEBUG, "supported,adding\n");
+ if (conversion_map[i].pix_fmt != lastpixfmt) {
+ ff_add_format(&avfmts, conversion_map[i].pix_fmt);
+ lastpixfmt = conversion_map[i].pix_fmt;
+ }
+ }
+ }
+
+ //We assume all allowed input formats are also allowed output formats
+ ff_set_common_formats(ctx, avfmts);
+ return 0;
+}
+
+static int config_inprops(AVFilterLink *inlink)
+{
+ MPContext *m = inlink->dst->priv;
+ int i;
+ for(i=0; conversion_map[i].fmt && conversion_map[i].pix_fmt != inlink->format; i++);
+
+ av_assert0(conversion_map[i].fmt && inlink->w && inlink->h);
+
+ m->vf.fmt.have_configured = 1;
+ m->vf.fmt.orig_height = inlink->h;
+ m->vf.fmt.orig_width = inlink->w;
+ m->vf.fmt.orig_fmt = conversion_map[i].fmt;
+
+ if(m->vf.config(&m->vf, inlink->w, inlink->h, inlink->w, inlink->h, 0, conversion_map[i].fmt)<=0)
+ return -1;
+
+ return 0;
+}
+
+static int config_outprops(AVFilterLink *outlink)
+{
+ MPContext *m = outlink->src->priv;
+
+ outlink->w = m->next_vf.w;
+ outlink->h = m->next_vf.h;
+
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ MPContext *m = outlink->src->priv;
+ int ret;
+
+ av_log(m->avfctx, AV_LOG_DEBUG, "mp request_frame\n");
+
+ for(m->frame_returned=0; !m->frame_returned;){
+ ret=ff_request_frame(outlink->src->inputs[0]);
+ if(ret<0)
+ break;
+ }
+
+ av_log(m->avfctx, AV_LOG_DEBUG, "mp request_frame ret=%d\n", ret);
+ return ret;
+}
+
- static void null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
++static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
+{
++ return 0;
+}
+
- static void end_frame(AVFilterLink *inlink)
++static int null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
+{
++ return 0;
+}
+
++static int end_frame(AVFilterLink *inlink)
+{
+ MPContext *m = inlink->dst->priv;
+ AVFilterBufferRef *inpic = inlink->cur_buf;
+ int i;
+ double pts= MP_NOPTS_VALUE;
+ mp_image_t* mpi = new_mp_image(inpic->video->w, inpic->video->h);
+
+ if(inpic->pts != AV_NOPTS_VALUE)
+ pts= inpic->pts / av_q2d(inlink->time_base);
+
+ for(i=0; conversion_map[i].fmt && conversion_map[i].pix_fmt != inlink->format; i++);
+ mp_image_setfmt(mpi,conversion_map[i].fmt);
+
+ memcpy(mpi->planes, inpic->data, FFMIN(sizeof(inpic->data) , sizeof(mpi->planes)));
+ memcpy(mpi->stride, inpic->linesize, FFMIN(sizeof(inpic->linesize), sizeof(mpi->stride)));
+
+ //FIXME pass interleced & tff flags around
+
+ // mpi->flags|=MP_IMGFLAG_ALLOCATED; ?
+ mpi->flags |= MP_IMGFLAG_READABLE;
+ if(!(inpic->perms & AV_PERM_WRITE))
+ mpi->flags |= MP_IMGFLAG_PRESERVE;
+ if(m->vf.put_image(&m->vf, mpi, pts) == 0){
+ av_log(m->avfctx, AV_LOG_DEBUG, "put_image() says skip\n");
+ }
+ free_mp_image(mpi);
++ return 0;
+}
+
+AVFilter avfilter_vf_mp = {
+ .name = "mp",
+ .description = NULL_IF_CONFIG_SMALL("Apply a libmpcodecs filter to the input video."),
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(MPContext),
+ .query_formats = query_formats,
+
+ .inputs = (const AVFilterPad[]) {{ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .start_frame = start_frame,
+ .draw_slice = null_draw_slice,
+ .end_frame = end_frame,
+ .config_props = config_inprops,
+ .min_perms = AV_PERM_READ, },
+ { .name = NULL}},
+ .outputs = (const AVFilterPad[]) {{ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_outprops, },
+ { .name = NULL}},
+};
{
OverlayContext *over = ctx->priv;
- if (over->overpicref)
- avfilter_unref_bufferp(&over->overpicref);
+ av_freep(&over->x_expr);
+ av_freep(&over->y_expr);
+
+ avfilter_unref_bufferp(&over->overpicref);
+ ff_bufqueue_discard_all(&over->queue_main);
+ ff_bufqueue_discard_all(&over->queue_over);
}
static int query_formats(AVFilterContext *ctx)
}
}
-static int draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
+static int try_start_frame(AVFilterContext *ctx, AVFilterBufferRef *mainpic)
{
- AVFilterContext *ctx = inlink->dst;
+ OverlayContext *over = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
- AVFilterBufferRef *outpicref = inlink->cur_buf;
+ AVFilterBufferRef *next_overpic, *outpicref;
+
+ /* Discard obsolete overlay frames: if there is a next frame with pts is
+ * before the main frame, we can drop the current overlay. */
+ while (1) {
+ next_overpic = ff_bufqueue_peek(&over->queue_over, 0);
+ if (!next_overpic || next_overpic->pts > mainpic->pts)
+ break;
+ ff_bufqueue_get(&over->queue_over);
+ avfilter_unref_buffer(over->overpicref);
+ over->overpicref = next_overpic;
+ }
+ /* If there is no next frame and no EOF and the overlay frame is before
+ * the main frame, we can not know yet if it will be superseded. */
+ if (!over->queue_over.available && !over->overlay_eof &&
+ (!over->overpicref || over->overpicref->pts < mainpic->pts))
+ return AVERROR(EAGAIN);
+ /* At this point, we know that the current overlay frame extends to the
+ * time of the main frame. */
+ outlink->out_buf = outpicref = avfilter_ref_buffer(mainpic, ~0);
+
+ av_dlog(ctx, "main_pts:%s main_pts_time:%s",
+ av_ts2str(outpicref->pts), av_ts2timestr(outpicref->pts, &outlink->time_base));
+ if (over->overpicref)
+ av_dlog(ctx, " over_pts:%s over_pts_time:%s",
+ av_ts2str(over->overpicref->pts), av_ts2timestr(over->overpicref->pts, &outlink->time_base));
+ av_dlog(ctx, "\n");
+
+ ff_start_frame(ctx->outputs[0], avfilter_ref_buffer(outpicref, ~0));
+ over->frame_requested = 0;
+ return 0;
+}
+
+static int try_start_next_frame(AVFilterContext *ctx)
+{
OverlayContext *over = ctx->priv;
- static void start_frame_main(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
+ AVFilterBufferRef *next_mainpic = ff_bufqueue_peek(&over->queue_main, 0);
+ if (!next_mainpic || try_start_frame(ctx, next_mainpic) < 0)
+ return AVERROR(EAGAIN);
+ avfilter_unref_buffer(ff_bufqueue_get(&over->queue_main));
+ return 0;
+}
+
+static int try_push_frame(AVFilterContext *ctx)
+{
+ OverlayContext *over = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFilterBufferRef *outpicref = outlink->out_buf;
+
+ if (try_start_next_frame(ctx) < 0)
+ return AVERROR(EAGAIN);
+ outpicref = outlink->out_buf;
+ if (over->overpicref)
+ blend_slice(ctx, outpicref, over->overpicref, over->x, over->y,
+ over->overpicref->video->w, over->overpicref->video->h,
+ 0, outpicref->video->w, outpicref->video->h);
+ ff_draw_slice(outlink, 0, outpicref->video->h, +1);
+ ff_end_frame(outlink);
+ return 0;
+}
+
+static void flush_frames(AVFilterContext *ctx)
+{
+ while (!try_push_frame(ctx));
+}
+
++static int start_frame_main(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ OverlayContext *over = ctx->priv;
+
+ flush_frames(ctx);
+ inpicref->pts = av_rescale_q(inpicref->pts, ctx->inputs[MAIN]->time_base,
+ ctx->outputs[0]->time_base);
+ if (try_start_frame(ctx, inpicref) < 0) {
+ ff_bufqueue_add(ctx, &over->queue_main, inpicref);
+ av_assert1(inpicref == inlink->cur_buf);
+ inlink->cur_buf = NULL;
+ }
++ return 0;
+}
- static void draw_slice_main(AVFilterLink *inlink, int y, int h, int slice_dir)
++static int draw_slice_main(AVFilterLink *inlink, int y, int h, int slice_dir)
+{
+ AVFilterContext *ctx = inlink->dst;
+ OverlayContext *over = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFilterBufferRef *outpicref = outlink->out_buf;
+
+ if (!outpicref)
+ return;
if (over->overpicref &&
- !(over->x >= outpicref->video->w || over->y >= outpicref->video->h ||
- y+h < over->y || y >= over->y + over->overpicref->video->h)) {
+ y + h > over->y && y < over->y + over->overpicref->video->h) {
blend_slice(ctx, outpicref, over->overpicref, over->x, over->y,
over->overpicref->video->w, over->overpicref->video->h,
y, outpicref->video->w, h);
}
- ff_draw_slice(outlink, y, h, slice_dir);
+ return ff_draw_slice(outlink, y, h, slice_dir);
}
- static void end_frame_main(AVFilterLink *inlink)
-static int end_frame(AVFilterLink *inlink)
++static int end_frame_main(AVFilterLink *inlink)
{
- return ff_end_frame(inlink->dst->outputs[0]);
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFilterBufferRef *outpicref = outlink->out_buf;
+ flush_frames(ctx);
+
+ if (!outpicref)
- return;
- ff_end_frame(ctx->outputs[0]);
++ return 0;
++ return ff_end_frame(ctx->outputs[0]);
}
- static void start_frame_over(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
-static int null_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
++static int start_frame_over(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
{
+ return 0;
}
- static void end_frame_over(AVFilterLink *inlink)
-static int null_end_frame(AVFilterLink *inlink)
++static int end_frame_over(AVFilterLink *inlink)
{
- return 0;
+ AVFilterContext *ctx = inlink->dst;
+ OverlayContext *over = ctx->priv;
+ AVFilterBufferRef *inpicref = inlink->cur_buf;
+ inlink->cur_buf = NULL;
+
+ flush_frames(ctx);
+ inpicref->pts = av_rescale_q(inpicref->pts, ctx->inputs[OVERLAY]->time_base,
+ ctx->outputs[0]->time_base);
+ ff_bufqueue_add(ctx, &over->queue_over, inpicref);
- try_push_frame(ctx);
++ return try_push_frame(ctx);
}
-static int poll_frame(AVFilterLink *link)
+static int request_frame(AVFilterLink *outlink)
{
- AVFilterContext *s = link->src;
- OverlayContext *over = s->priv;
- int ret = ff_poll_frame(s->inputs[OVERLAY]);
-
- if (ret == AVERROR_EOF)
- ret = !!over->overpicref;
+ AVFilterContext *ctx = outlink->src;
+ OverlayContext *over = ctx->priv;
+ int input, ret;
+
+ if (!try_push_frame(ctx))
+ return 0;
+ over->frame_requested = 1;
+ while (over->frame_requested) {
+ /* TODO if we had a frame duration, we could guess more accurately */
+ input = !over->overlay_eof && (over->queue_main.available ||
+ over->queue_over.available < 2) ?
+ OVERLAY : MAIN;
+ ret = ff_request_frame(ctx->inputs[input]);
+ /* EOF on main is reported immediately */
+ if (ret == AVERROR_EOF && input == OVERLAY) {
+ over->overlay_eof = 1;
+ if (!try_start_next_frame(ctx))
+ return 0;
+ ret = 0; /* continue requesting frames on main */
+ }
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
- static void null_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) { }
- return ret && ff_poll_frame(s->inputs[MAIN]);
++static int null_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
++{
++ return 0;
+ }
AVFilter avfilter_vf_overlay = {
.name = "overlay",
PadContext *pad = inlink->dst->priv;
AVFilterBufferRef *outpicref = avfilter_ref_buffer(inpicref, ~0);
AVFilterBufferRef *for_next_filter;
- int plane;
+ int plane, ret = 0;
+
+ if (!outpicref)
+ return AVERROR(ENOMEM);
- for (plane = 0; plane < 4 && outpicref->data[plane]; plane++) {
- int hsub = (plane == 1 || plane == 2) ? pad->hsub : 0;
- int vsub = (plane == 1 || plane == 2) ? pad->vsub : 0;
+ for (plane = 0; plane < 4 && outpicref->data[plane] && pad->draw.pixelstep[plane]; plane++) {
+ int hsub = pad->draw.hsub[plane];
+ int vsub = pad->draw.vsub[plane];
av_assert0(outpicref->buf->w>0 && outpicref->buf->h>0);
outpicref->video->h = pad->h;
for_next_filter = avfilter_ref_buffer(outpicref, ~0);
- ff_start_frame(inlink->dst->outputs[0], for_next_filter);
+ if (!for_next_filter) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ ret = ff_start_frame(inlink->dst->outputs[0], for_next_filter);
+ if (ret < 0)
+ goto fail;
+
+ inlink->dst->outputs[0]->out_buf = outpicref;
+ return 0;
+
+ fail:
+ avfilter_unref_bufferp(&outpicref);
+ return ret;
}
- static void draw_send_bar_slice(AVFilterLink *link, int y, int h, int slice_dir, int before_slice)
-static int end_frame(AVFilterLink *link)
-{
- return ff_end_frame(link->dst->outputs[0]);
-}
-
+ static int draw_send_bar_slice(AVFilterLink *link, int y, int h, int slice_dir, int before_slice)
{
PadContext *pad = link->dst->priv;
- int bar_y, bar_h = 0;
+ int bar_y, bar_h = 0, ret = 0;
if (slice_dir * before_slice == 1 && y == pad->y) {
/* top bar */
}
if (bar_h) {
- ff_draw_rectangle(link->dst->outputs[0]->out_buf->data,
+ ff_fill_rectangle(&pad->draw, &pad->color,
+ link->dst->outputs[0]->out_buf->data,
link->dst->outputs[0]->out_buf->linesize,
- pad->line, pad->line_step, pad->hsub, pad->vsub,
0, bar_y, pad->w, bar_h);
- ff_draw_slice(link->dst->outputs[0], bar_y, bar_h, slice_dir);
+ ret = ff_draw_slice(link->dst->outputs[0], bar_y, bar_h, slice_dir);
}
+ return ret;
}
- static void draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
+ static int draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
{
PadContext *pad = link->dst->priv;
AVFilterBufferRef *outpic = link->dst->outputs[0]->out_buf;
y += pad->y;
- y &= ~((1 << pad->vsub) - 1);
- h &= ~((1 << pad->vsub) - 1);
+ y = ff_draw_round_to_sub(&pad->draw, 1, -1, y);
+ h = ff_draw_round_to_sub(&pad->draw, 1, -1, h);
if (!h)
- return;
+ return 0;
draw_send_bar_slice(link, y, h, slice_dir, 1);
/* left border */
}
/* right border */
- ff_draw_rectangle(outpic->data, outpic->linesize,
- pad->line, pad->line_step, pad->hsub, pad->vsub,
+ ff_fill_rectangle(&pad->draw, &pad->color, outpic->data, outpic->linesize,
pad->x + pad->in_w, y, pad->w - pad->x - pad->in_w, h);
- ff_draw_slice(link->dst->outputs[0], y, h, slice_dir);
+ ret = ff_draw_slice(link->dst->outputs[0], y, h, slice_dir);
+ if (ret < 0)
+ return ret;
- draw_send_bar_slice(link, y, h, slice_dir, -1);
+ return draw_send_bar_slice(link, y, h, slice_dir, -1);
}
AVFilter avfilter_vf_pad = {
/* copy palette */
if (priv->pix_desc->flags & PIX_FMT_PAL ||
priv->pix_desc->flags & PIX_FMT_PSEUDOPAL)
- memcpy(outpicref->data[1], outpicref->data[1], 256*4);
+ memcpy(outpicref->data[1], picref->data[1], AVPALETTE_SIZE);
- ff_start_frame(outlink, avfilter_ref_buffer(outpicref, ~0));
+ for_next_filter = avfilter_ref_buffer(outpicref, ~0);
+ if (for_next_filter)
+ ret = ff_start_frame(outlink, for_next_filter);
+ else
+ ret = AVERROR(ENOMEM);
+
+ if (ret < 0) {
+ avfilter_unref_bufferp(&outpicref);
+ return ret;
+ }
+
+ outlink->out_buf = outpicref;
+ return 0;
}
- static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
+ static int draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
{
PixdescTestContext *priv = inlink->dst->priv;
AVFilterBufferRef *inpic = inlink->cur_buf;
--- /dev/null
- static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
+/*
+ * Copyright (c) 2005 Robert Edele <yartrebo@earthlink.net>
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Advanced blur-based logo removing filter
+ *
+ * This filter loads an image mask file showing where a logo is and
+ * uses a blur transform to remove the logo.
+ *
+ * Based on the libmpcodecs remove-logo filter by Robert Edele.
+ */
+
+/**
+ * This code implements a filter to remove annoying TV logos and other annoying
+ * images placed onto a video stream. It works by filling in the pixels that
+ * comprise the logo with neighboring pixels. The transform is very loosely
+ * based on a gaussian blur, but it is different enough to merit its own
+ * paragraph later on. It is a major improvement on the old delogo filter as it
+ * both uses a better blurring algorithm and uses a bitmap to use an arbitrary
+ * and generally much tighter fitting shape than a rectangle.
+ *
+ * The logo removal algorithm has two key points. The first is that it
+ * distinguishes between pixels in the logo and those not in the logo by using
+ * the passed-in bitmap. Pixels not in the logo are copied over directly without
+ * being modified and they also serve as source pixels for the logo
+ * fill-in. Pixels inside the logo have the mask applied.
+ *
+ * At init-time the bitmap is reprocessed internally, and the distance to the
+ * nearest edge of the logo (Manhattan distance), along with a little extra to
+ * remove rough edges, is stored in each pixel. This is done using an in-place
+ * erosion algorithm, and incrementing each pixel that survives any given
+ * erosion. Once every pixel is eroded, the maximum value is recorded, and a
+ * set of masks from size 0 to this size are generaged. The masks are circular
+ * binary masks, where each pixel within a radius N (where N is the size of the
+ * mask) is a 1, and all other pixels are a 0. Although a gaussian mask would be
+ * more mathematically accurate, a binary mask works better in practice because
+ * we generally do not use the central pixels in the mask (because they are in
+ * the logo region), and thus a gaussian mask will cause too little blur and
+ * thus a very unstable image.
+ *
+ * The mask is applied in a special way. Namely, only pixels in the mask that
+ * line up to pixels outside the logo are used. The dynamic mask size means that
+ * the mask is just big enough so that the edges touch pixels outside the logo,
+ * so the blurring is kept to a minimum and at least the first boundary
+ * condition is met (that the image function itself is continuous), even if the
+ * second boundary condition (that the derivative of the image function is
+ * continuous) is not met. A masking algorithm that does preserve the second
+ * boundary coundition (perhaps something based on a highly-modified bi-cubic
+ * algorithm) should offer even better results on paper, but the noise in a
+ * typical TV signal should make anything based on derivatives hopelessly noisy.
+ */
+
+#include "libavutil/imgutils.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "video.h"
+#include "bbox.h"
+#include "lavfutils.h"
+#include "lswsutils.h"
+
+typedef struct {
+ /* Stores our collection of masks. The first is for an array of
+ the second for the y axis, and the third for the x axis. */
+ int ***mask;
+ int max_mask_size;
+ int mask_w, mask_h;
+
+ uint8_t *full_mask_data;
+ FFBoundingBox full_mask_bbox;
+ uint8_t *half_mask_data;
+ FFBoundingBox half_mask_bbox;
+} RemovelogoContext;
+
+/**
+ * Choose a slightly larger mask size to improve performance.
+ *
+ * This function maps the absolute minimum mask size needed to the
+ * mask size we'll actually use. f(x) = x (the smallest that will
+ * work) will produce the sharpest results, but will be quite
+ * jittery. f(x) = 1.25x (what I'm using) is a good tradeoff in my
+ * opinion. This will calculate only at init-time, so you can put a
+ * long expression here without effecting performance.
+ */
+#define apply_mask_fudge_factor(x) (((x) >> 2) + x)
+
+/**
+ * Pre-process an image to give distance information.
+ *
+ * This function takes a bitmap image and converts it in place into a
+ * distance image. A distance image is zero for pixels outside of the
+ * logo and is the Manhattan distance (|dx| + |dy|) from the logo edge
+ * for pixels inside of the logo. This will overestimate the distance,
+ * but that is safe, and is far easier to implement than a proper
+ * pythagorean distance since I'm using a modified erosion algorithm
+ * to compute the distances.
+ *
+ * @param mask image which will be converted from a greyscale image
+ * into a distance image.
+ */
+static void convert_mask_to_strength_mask(uint8_t *data, int linesize,
+ int w, int h, int min_val,
+ int *max_mask_size)
+{
+ int x, y;
+
+ /* How many times we've gone through the loop. Used in the
+ in-place erosion algorithm and to get us max_mask_size later on. */
+ int current_pass = 0;
+
+ /* set all non-zero values to 1 */
+ for (y = 0; y < h; y++)
+ for (x = 0; x < w; x++)
+ data[y*linesize + x] = data[y*linesize + x] > min_val;
+
+ /* For each pass, if a pixel is itself the same value as the
+ current pass, and its four neighbors are too, then it is
+ incremented. If no pixels are incremented by the end of the
+ pass, then we go again. Edge pixels are counted as always
+ excluded (this should be true anyway for any sane mask, but if
+ it isn't this will ensure that we eventually exit). */
+ while (1) {
+ /* If this doesn't get set by the end of this pass, then we're done. */
+ int has_anything_changed = 0;
+ uint8_t *current_pixel0 = data, *current_pixel;
+ current_pass++;
+
+ for (y = 1; y < h-1; y++) {
+ current_pixel = current_pixel0;
+ for (x = 1; x < w-1; x++) {
+ /* Apply the in-place erosion transform. It is based
+ on the following two premises:
+ 1 - Any pixel that fails 1 erosion will fail all
+ future erosions.
+
+ 2 - Only pixels having survived all erosions up to
+ the present will be >= to current_pass.
+ It doesn't matter if it survived the current pass,
+ failed it, or hasn't been tested yet. By using >=
+ instead of ==, we allow the algorithm to work in
+ place. */
+ if ( *current_pixel >= current_pass &&
+ *(current_pixel + 1) >= current_pass &&
+ *(current_pixel - 1) >= current_pass &&
+ *(current_pixel + w) >= current_pass &&
+ *(current_pixel - w) >= current_pass) {
+ /* Increment the value since it still has not been
+ * eroded, as evidenced by the if statement that
+ * just evaluated to true. */
+ (*current_pixel)++;
+ has_anything_changed = 1;
+ }
+ current_pixel++;
+ }
+ current_pixel0 += linesize;
+ }
+ if (!has_anything_changed)
+ break;
+ }
+
+ /* Apply the fudge factor, which will increase the size of the
+ * mask a little to reduce jitter at the cost of more blur. */
+ for (y = 1; y < h - 1; y++)
+ for (x = 1; x < w - 1; x++)
+ data[(y * linesize) + x] = apply_mask_fudge_factor(data[(y * linesize) + x]);
+
+ /* As a side-effect, we now know the maximum mask size, which
+ * we'll use to generate our masks. */
+ /* Apply the fudge factor to this number too, since we must ensure
+ * that enough masks are generated. */
+ *max_mask_size = apply_mask_fudge_factor(current_pass + 1);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int load_mask(uint8_t **mask, int *w, int *h,
+ const char *filename, void *log_ctx)
+{
+ int ret;
+ enum PixelFormat pix_fmt;
+ uint8_t *src_data[4], *gray_data[4];
+ int src_linesize[4], gray_linesize[4];
+
+ /* load image from file */
+ if ((ret = ff_load_image(src_data, src_linesize, w, h, &pix_fmt, filename, log_ctx)) < 0)
+ return ret;
+
+ /* convert the image to GRAY8 */
+ if ((ret = ff_scale_image(gray_data, gray_linesize, *w, *h, PIX_FMT_GRAY8,
+ src_data, src_linesize, *w, *h, pix_fmt,
+ log_ctx)) < 0)
+ goto end;
+
+ /* copy mask to a newly allocated array */
+ *mask = av_malloc(*w * *h);
+ if (!*mask)
+ ret = AVERROR(ENOMEM);
+ av_image_copy_plane(*mask, *w, gray_data[0], gray_linesize[0], *w, *h);
+
+end:
+ av_free(src_data[0]);
+ av_free(gray_data[0]);
+ return ret;
+}
+
+/**
+ * Generate a scaled down image with half width, height, and intensity.
+ *
+ * This function not only scales down an image, but halves the value
+ * in each pixel too. The purpose of this is to produce a chroma
+ * filter image out of a luma filter image. The pixel values store the
+ * distance to the edge of the logo and halving the dimensions halves
+ * the distance. This function rounds up, because a downwards rounding
+ * error could cause the filter to fail, but an upwards rounding error
+ * will only cause a minor amount of excess blur in the chroma planes.
+ */
+static void generate_half_size_image(const uint8_t *src_data, int src_linesize,
+ uint8_t *dst_data, int dst_linesize,
+ int src_w, int src_h,
+ int *max_mask_size)
+{
+ int x, y;
+
+ /* Copy over the image data, using the average of 4 pixels for to
+ * calculate each downsampled pixel. */
+ for (y = 0; y < src_h/2; y++) {
+ for (x = 0; x < src_w/2; x++) {
+ /* Set the pixel if there exists a non-zero value in the
+ * source pixels, else clear it. */
+ dst_data[(y * dst_linesize) + x] =
+ src_data[((y << 1) * src_linesize) + (x << 1)] ||
+ src_data[((y << 1) * src_linesize) + (x << 1) + 1] ||
+ src_data[(((y << 1) + 1) * src_linesize) + (x << 1)] ||
+ src_data[(((y << 1) + 1) * src_linesize) + (x << 1) + 1];
+ dst_data[(y * dst_linesize) + x] = FFMIN(1, dst_data[(y * dst_linesize) + x]);
+ }
+ }
+
+ convert_mask_to_strength_mask(dst_data, dst_linesize,
+ src_w/2, src_h/2, 0, max_mask_size);
+}
+
+static av_cold int init(AVFilterContext *ctx, const char *args)
+{
+ RemovelogoContext *removelogo = ctx->priv;
+ int ***mask;
+ int ret = 0;
+ int a, b, c, w, h;
+ int full_max_mask_size, half_max_mask_size;
+
+ if (!args) {
+ av_log(ctx, AV_LOG_ERROR, "An image file must be specified as argument\n");
+ return AVERROR(EINVAL);
+ }
+
+ /* Load our mask image. */
+ if ((ret = load_mask(&removelogo->full_mask_data, &w, &h, args, ctx)) < 0)
+ return ret;
+ removelogo->mask_w = w;
+ removelogo->mask_h = h;
+
+ convert_mask_to_strength_mask(removelogo->full_mask_data, w, w, h,
+ 16, &full_max_mask_size);
+
+ /* Create the scaled down mask image for the chroma planes. */
+ if (!(removelogo->half_mask_data = av_mallocz(w/2 * h/2)))
+ return AVERROR(ENOMEM);
+ generate_half_size_image(removelogo->full_mask_data, w,
+ removelogo->half_mask_data, w/2,
+ w, h, &half_max_mask_size);
+
+ removelogo->max_mask_size = FFMAX(full_max_mask_size, half_max_mask_size);
+
+ /* Create a circular mask for each size up to max_mask_size. When
+ the filter is applied, the mask size is determined on a pixel
+ by pixel basis, with pixels nearer the edge of the logo getting
+ smaller mask sizes. */
+ mask = (int ***)av_malloc(sizeof(int **) * (removelogo->max_mask_size + 1));
+ if (!mask)
+ return AVERROR(ENOMEM);
+
+ for (a = 0; a <= removelogo->max_mask_size; a++) {
+ mask[a] = (int **)av_malloc(sizeof(int *) * ((a * 2) + 1));
+ if (!mask[a])
+ return AVERROR(ENOMEM);
+ for (b = -a; b <= a; b++) {
+ mask[a][b + a] = (int *)av_malloc(sizeof(int) * ((a * 2) + 1));
+ if (!mask[a][b + a])
+ return AVERROR(ENOMEM);
+ for (c = -a; c <= a; c++) {
+ if ((b * b) + (c * c) <= (a * a)) /* Circular 0/1 mask. */
+ mask[a][b + a][c + a] = 1;
+ else
+ mask[a][b + a][c + a] = 0;
+ }
+ }
+ }
+ removelogo->mask = mask;
+
+ /* Calculate our bounding rectangles, which determine in what
+ * region the logo resides for faster processing. */
+ ff_calculate_bounding_box(&removelogo->full_mask_bbox, removelogo->full_mask_data, w, w, h, 0);
+ ff_calculate_bounding_box(&removelogo->half_mask_bbox, removelogo->half_mask_data, w/2, w/2, h/2, 0);
+
+#define SHOW_LOGO_INFO(mask_type) \
+ av_log(ctx, AV_LOG_VERBOSE, #mask_type " x1:%d x2:%d y1:%d y2:%d max_mask_size:%d\n", \
+ removelogo->mask_type##_mask_bbox.x1, removelogo->mask_type##_mask_bbox.x2, \
+ removelogo->mask_type##_mask_bbox.y1, removelogo->mask_type##_mask_bbox.y2, \
+ mask_type##_max_mask_size);
+ SHOW_LOGO_INFO(full);
+ SHOW_LOGO_INFO(half);
+
+ return 0;
+}
+
+static int config_props_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ RemovelogoContext *removelogo = ctx->priv;
+
+ if (inlink->w != removelogo->mask_w || inlink->h != removelogo->mask_h) {
+ av_log(ctx, AV_LOG_INFO,
+ "Mask image size %dx%d does not match with the input video size %dx%d\n",
+ removelogo->mask_w, removelogo->mask_h, inlink->w, inlink->h);
+ return AVERROR(EINVAL);
+ }
+
+ return 0;
+}
+
+/**
+ * Blur image.
+ *
+ * It takes a pixel that is inside the mask and blurs it. It does so
+ * by finding the average of all the pixels within the mask and
+ * outside of the mask.
+ *
+ * @param mask_data the mask plane to use for averaging
+ * @param image_data the image plane to blur
+ * @param w width of the image
+ * @param h height of the image
+ * @param x x-coordinate of the pixel to blur
+ * @param y y-coordinate of the pixel to blur
+ */
+static unsigned int blur_pixel(int ***mask,
+ const uint8_t *mask_data, int mask_linesize,
+ uint8_t *image_data, int image_linesize,
+ int w, int h, int x, int y)
+{
+ /* Mask size tells how large a circle to use. The radius is about
+ * (slightly larger than) mask size. */
+ int mask_size;
+ int start_posx, start_posy, end_posx, end_posy;
+ int i, j;
+ unsigned int accumulator = 0, divisor = 0;
+ /* What pixel we are reading out of the circular blur mask. */
+ const uint8_t *image_read_position;
+ /* What pixel we are reading out of the filter image. */
+ const uint8_t *mask_read_position;
+
+ /* Prepare our bounding rectangle and clip it if need be. */
+ mask_size = mask_data[y * mask_linesize + x];
+ start_posx = FFMAX(0, x - mask_size);
+ start_posy = FFMAX(0, y - mask_size);
+ end_posx = FFMIN(w - 1, x + mask_size);
+ end_posy = FFMIN(h - 1, y + mask_size);
+
+ image_read_position = image_data + image_linesize * start_posy + start_posx;
+ mask_read_position = mask_data + mask_linesize * start_posy + start_posx;
+
+ for (j = start_posy; j <= end_posy; j++) {
+ for (i = start_posx; i <= end_posx; i++) {
+ /* Check if this pixel is in the mask or not. Only use the
+ * pixel if it is not. */
+ if (!(*mask_read_position) && mask[mask_size][i - start_posx][j - start_posy]) {
+ accumulator += *image_read_position;
+ divisor++;
+ }
+
+ image_read_position++;
+ mask_read_position++;
+ }
+
+ image_read_position += (image_linesize - ((end_posx + 1) - start_posx));
+ mask_read_position += (mask_linesize - ((end_posx + 1) - start_posx));
+ }
+
+ /* If divisor is 0, it means that not a single pixel is outside of
+ the logo, so we have no data. Else we need to normalise the
+ data using the divisor. */
+ return divisor == 0 ? 255:
+ (accumulator + (divisor / 2)) / divisor; /* divide, taking into account average rounding error */
+}
+
+/**
+ * Blur image plane using a mask.
+ *
+ * @param source The image to have it's logo removed.
+ * @param destination Where the output image will be stored.
+ * @param source_stride How far apart (in memory) two consecutive lines are.
+ * @param destination Same as source_stride, but for the destination image.
+ * @param width Width of the image. This is the same for source and destination.
+ * @param height Height of the image. This is the same for source and destination.
+ * @param is_image_direct If the image is direct, then source and destination are
+ * the same and we can save a lot of time by not copying pixels that
+ * haven't changed.
+ * @param filter The image that stores the distance to the edge of the logo for
+ * each pixel.
+ * @param logo_start_x smallest x-coordinate that contains at least 1 logo pixel.
+ * @param logo_start_y smallest y-coordinate that contains at least 1 logo pixel.
+ * @param logo_end_x largest x-coordinate that contains at least 1 logo pixel.
+ * @param logo_end_y largest y-coordinate that contains at least 1 logo pixel.
+ *
+ * This function processes an entire plane. Pixels outside of the logo are copied
+ * to the output without change, and pixels inside the logo have the de-blurring
+ * function applied.
+ */
+static void blur_image(int ***mask,
+ const uint8_t *src_data, int src_linesize,
+ uint8_t *dst_data, int dst_linesize,
+ const uint8_t *mask_data, int mask_linesize,
+ int w, int h, int direct,
+ FFBoundingBox *bbox)
+{
+ int x, y;
+ uint8_t *dst_line;
+ const uint8_t *src_line;
+
+ if (!direct)
+ av_image_copy_plane(dst_data, dst_linesize, src_data, src_linesize, w, h);
+
+ for (y = bbox->y1; y <= bbox->y2; y++) {
+ src_line = src_data + src_linesize * y;
+ dst_line = dst_data + dst_linesize * y;
+
+ for (x = bbox->x1; x <= bbox->x2; x++) {
+ if (mask_data[y * mask_linesize + x]) {
+ /* Only process if we are in the mask. */
+ dst_line[x] = blur_pixel(mask,
+ mask_data, mask_linesize,
+ dst_data, dst_linesize,
+ w, h, x, y);
+ } else {
+ /* Else just copy the data. */
+ if (!direct)
+ dst_line[x] = src_line[x];
+ }
+ }
+ }
+}
+
- ff_start_frame(outlink, avfilter_ref_buffer(outpicref, ~0));
++static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
+{
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFilterBufferRef *outpicref;
+
+ if (inpicref->perms & AV_PERM_PRESERVE) {
+ outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE,
+ outlink->w, outlink->h);
+ avfilter_copy_buffer_ref_props(outpicref, inpicref);
+ outpicref->video->w = outlink->w;
+ outpicref->video->h = outlink->h;
+ } else
+ outpicref = inpicref;
+
+ outlink->out_buf = outpicref;
- static void end_frame(AVFilterLink *inlink)
++ return ff_start_frame(outlink, avfilter_ref_buffer(outpicref, ~0));
+}
+
- ff_end_frame(outlink);
++static int end_frame(AVFilterLink *inlink)
+{
+ RemovelogoContext *removelogo = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFilterBufferRef *inpicref = inlink ->cur_buf;
+ AVFilterBufferRef *outpicref = outlink->out_buf;
+ int direct = inpicref == outpicref;
+
+ blur_image(removelogo->mask,
+ inpicref ->data[0], inpicref ->linesize[0],
+ outpicref->data[0], outpicref->linesize[0],
+ removelogo->full_mask_data, inlink->w,
+ inlink->w, inlink->h, direct, &removelogo->full_mask_bbox);
+ blur_image(removelogo->mask,
+ inpicref ->data[1], inpicref ->linesize[1],
+ outpicref->data[1], outpicref->linesize[1],
+ removelogo->half_mask_data, inlink->w/2,
+ inlink->w/2, inlink->h/2, direct, &removelogo->half_mask_bbox);
+ blur_image(removelogo->mask,
+ inpicref ->data[2], inpicref ->linesize[2],
+ outpicref->data[2], outpicref->linesize[2],
+ removelogo->half_mask_data, inlink->w/2,
+ inlink->w/2, inlink->h/2, direct, &removelogo->half_mask_bbox);
+
+ ff_draw_slice(outlink, 0, inlink->h, 1);
- static void null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { }
++ return ff_end_frame(outlink);
+}
+
+static void uninit(AVFilterContext *ctx)
+{
+ RemovelogoContext *removelogo = ctx->priv;
+ int a, b;
+
+ av_freep(&removelogo->full_mask_data);
+ av_freep(&removelogo->half_mask_data);
+
+ if (removelogo->mask) {
+ /* Loop through each mask. */
+ for (a = 0; a <= removelogo->max_mask_size; a++) {
+ /* Loop through each scanline in a mask. */
+ for (b = -a; b <= a; b++) {
+ av_free(removelogo->mask[a][b + a]); /* Free a scanline. */
+ }
+ av_free(removelogo->mask[a]);
+ }
+ /* Free the array of pointers pointing to the masks. */
+ av_freep(&removelogo->mask);
+ }
+}
+
++static int null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { return 0; }
+
+AVFilter avfilter_vf_removelogo = {
+ .name = "removelogo",
+ .description = NULL_IF_CONFIG_SMALL("Remove a TV logo based on a mask image."),
+ .priv_size = sizeof(RemovelogoContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+
+ .inputs = (const AVFilterPad[]) {
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .get_video_buffer = ff_null_get_video_buffer,
+ .config_props = config_props_input,
+ .draw_slice = null_draw_slice,
+ .start_frame = start_frame,
+ .end_frame = end_frame,
+ .min_perms = AV_PERM_WRITE | AV_PERM_READ,
+ .rej_perms = AV_PERM_PRESERVE },
+ { .name = NULL }
+ },
+ .outputs = (const AVFilterPad[]) {
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO, },
+ { .name = NULL }
+ },
+};
{
ScaleContext *scale = link->dst->priv;
AVFilterLink *outlink = link->dst->outputs[0];
- AVFilterBufferRef *outpicref;
+ AVFilterBufferRef *outpicref, *for_next_filter;
+ int ret = 0;
+ if( picref->video->w != link->w
+ || picref->video->h != link->h
+ || picref->format != link->format) {
+ int ret;
+ snprintf(scale->w_expr, sizeof(scale->w_expr)-1, "%d", outlink->w);
+ snprintf(scale->h_expr, sizeof(scale->h_expr)-1, "%d", outlink->h);
+
+ link->dst->inputs[0]->format = picref->format;
+ link->dst->inputs[0]->w = picref->video->w;
+ link->dst->inputs[0]->h = picref->video->h;
+
+ if ((ret = config_props(outlink)) < 0)
+ av_assert0(0); //what to do here ?
+ }
+
+
if (!scale->sws) {
- ff_start_frame(outlink, avfilter_ref_buffer(picref, ~0));
- return;
+ outpicref = avfilter_ref_buffer(picref, ~0);
+ if (!outpicref)
+ return AVERROR(ENOMEM);
+ return ff_start_frame(outlink, outpicref);
}
scale->hsub = av_pix_fmt_descriptors[link->format].log2_chroma_w;
scale->vsub = av_pix_fmt_descriptors[link->format].log2_chroma_h;
- outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
+ outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_ALIGN, outlink->w, outlink->h);
+ if (!outpicref)
+ return AVERROR(ENOMEM);
+
avfilter_copy_buffer_ref_props(outpicref, picref);
outpicref->video->w = outlink->w;
outpicref->video->h = outlink->h;
- outlink->out_buf = outpicref;
+ if(scale->output_is_pal)
+ ff_set_systematic_pal2(outpicref->data[1], outlink->format == PIX_FMT_PAL8 ? PIX_FMT_BGR8 : outlink->format);
- av_reduce(&outpicref->video->pixel_aspect.num, &outpicref->video->pixel_aspect.den,
- (int64_t)picref->video->pixel_aspect.num * outlink->h * link->w,
- (int64_t)picref->video->pixel_aspect.den * outlink->w * link->h,
+ av_reduce(&outpicref->video->sample_aspect_ratio.num, &outpicref->video->sample_aspect_ratio.den,
+ (int64_t)picref->video->sample_aspect_ratio.num * outlink->h * link->w,
+ (int64_t)picref->video->sample_aspect_ratio.den * outlink->w * link->h,
INT_MAX);
scale->slice_y = 0;
- ff_start_frame(outlink, avfilter_ref_buffer(outpicref, ~0));
+ for_next_filter = avfilter_ref_buffer(outpicref, ~0);
+ if (for_next_filter)
+ ret = ff_start_frame(outlink, for_next_filter);
+ else
+ ret = AVERROR(ENOMEM);
+
+ if (ret < 0) {
+ avfilter_unref_bufferp(&outpicref);
+ return ret;
+ }
+
+ outlink->out_buf = outpicref;
+ return 0;
}
- static void draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
+static int scale_slice(AVFilterLink *link, struct SwsContext *sws, int y, int h, int mul, int field)
+{
+ ScaleContext *scale = link->dst->priv;
+ AVFilterBufferRef *cur_pic = link->cur_buf;
+ AVFilterBufferRef *out_buf = link->dst->outputs[0]->out_buf;
+ const uint8_t *in[4];
+ uint8_t *out[4];
+ int in_stride[4],out_stride[4];
+ int i;
+
+ for(i=0; i<4; i++){
+ int vsub= ((i+1)&2) ? scale->vsub : 0;
+ in_stride[i] = cur_pic->linesize[i] * mul;
+ out_stride[i] = out_buf->linesize[i] * mul;
+ in[i] = cur_pic->data[i] + ((y>>vsub)+field) * cur_pic->linesize[i];
+ out[i] = out_buf->data[i] + field * out_buf->linesize[i];
+ }
+ if(scale->input_is_pal)
+ in[1] = cur_pic->data[1];
+ if(scale->output_is_pal)
+ out[1] = out_buf->data[1];
+
+ return sws_scale(sws, in, in_stride, y/mul, h,
+ out,out_stride);
+}
+
+ static int draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
{
ScaleContext *scale = link->dst->priv;
- int out_h;
+ int out_h, ret;
- AVFilterBufferRef *cur_pic = link->cur_buf;
- const uint8_t *data[4];
if (!scale->sws) {
- ff_draw_slice(link->dst->outputs[0], y, h, slice_dir);
- return;
+ return ff_draw_slice(link->dst->outputs[0], y, h, slice_dir);
}
if (scale->slice_y == 0 && slice_dir == -1)
--- /dev/null
- static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
+/*
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * set field order
+ */
+
+#include "avfilter.h"
+#include "video.h"
+
+enum SetFieldMode {
+ MODE_AUTO = -1,
+ MODE_BFF,
+ MODE_TFF,
+ MODE_PROG,
+};
+
+typedef struct {
+ enum SetFieldMode mode;
+} SetFieldContext;
+
+static av_cold int init(AVFilterContext *ctx, const char *args)
+{
+ SetFieldContext *setfield = ctx->priv;
+
+ setfield->mode = MODE_AUTO;
+
+ if (args) {
+ char c;
+ if (sscanf(args, "%d%c", &setfield->mode, &c) != 1) {
+ if (!strcmp("tff", args)) setfield->mode = MODE_TFF;
+ else if (!strcmp("bff", args)) setfield->mode = MODE_BFF;
+ else if (!strcmp("prog", args)) setfield->mode = MODE_PROG;
+ else if (!strcmp("auto", args)) setfield->mode = MODE_AUTO;
+ else {
+ av_log(ctx, AV_LOG_ERROR, "Invalid argument '%s'\n", args);
+ return AVERROR(EINVAL);
+ }
+ } else {
+ if (setfield->mode < -1 || setfield->mode > 1) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Provided integer value %d must be included between -1 and +1\n",
+ setfield->mode);
+ return AVERROR(EINVAL);
+ }
+ av_log(ctx, AV_LOG_WARNING,
+ "Using -1/0/1 is deprecated, use auto/tff/bff/prog\n");
+ }
+ }
+
+ return 0;
+}
+
- ff_start_frame(inlink->dst->outputs[0], outpicref);
++static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
+{
+ SetFieldContext *setfield = inlink->dst->priv;
+ AVFilterBufferRef *outpicref = avfilter_ref_buffer(inpicref, ~0);
+
+ if (setfield->mode == MODE_PROG) {
+ outpicref->video->interlaced = 0;
+ } else if (setfield->mode != MODE_AUTO) {
+ outpicref->video->interlaced = 1;
+ outpicref->video->top_field_first = setfield->mode;
+ }
++ return ff_start_frame(inlink->dst->outputs[0], outpicref);
+}
+
+AVFilter avfilter_vf_setfield = {
+ .name = "setfield",
+ .description = NULL_IF_CONFIG_SMALL("Force field for the output video frame."),
+ .init = init,
+
+ .priv_size = sizeof(SetFieldContext),
+
+ .inputs = (const AVFilterPad[]) {
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .get_video_buffer = ff_null_get_video_buffer,
+ .start_frame = start_frame, },
+ { .name = NULL }
+ },
+ .outputs = (const AVFilterPad[]) {
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO, },
+ { .name = NULL }
+ },
+};
picref->video->top_field_first ? 'T' : 'B', /* Top / Bottom */
picref->video->key_frame,
av_get_picture_type_char(picref->video->pict_type),
- checksum, plane_checksum[0], plane_checksum[1], plane_checksum[2], plane_checksum[3]);
+ checksum, plane_checksum[0]);
+
+ for (plane = 1; picref->data[plane] && plane < 4; plane++)
+ av_log(ctx, AV_LOG_INFO, " %08X", plane_checksum[plane]);
+ av_log(ctx, AV_LOG_INFO, "]\n");
showinfo->frame++;
- ff_end_frame(inlink->dst->outputs[0]);
+ return ff_end_frame(inlink->dst->outputs[0]);
}
AVFilter avfilter_vf_showinfo = {
--- /dev/null
- static void null_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) { }
+/*
+ * Copyright (c) 2010 Niel van der Westhuizen <nielkie@gmail.com>
+ * Copyright (c) 2002 A'rpi
+ * Copyright (c) 1997-2001 ZSNES Team ( zsknight@zsnes.com / _demo_@zsnes.com )
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * Super 2xSaI video filter
+ * Ported from MPlayer libmpcodecs/vf_2xsai.c.
+ */
+
+#include "libavutil/pixdesc.h"
+#include "libavutil/intreadwrite.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "video.h"
+
+typedef struct {
+ /* masks used for two pixels interpolation */
+ uint32_t hi_pixel_mask;
+ uint32_t lo_pixel_mask;
+
+ /* masks used for four pixels interpolation */
+ uint32_t q_hi_pixel_mask;
+ uint32_t q_lo_pixel_mask;
+
+ int bpp; ///< bytes per pixel, pixel stride for each (packed) pixel
+ int is_be;
+} Super2xSaIContext;
+
+#define GET_RESULT(A, B, C, D) ((A != C || A != D) - (B != C || B != D))
+
+#define INTERPOLATE(A, B) (((A & hi_pixel_mask) >> 1) + ((B & hi_pixel_mask) >> 1) + (A & B & lo_pixel_mask))
+
+#define Q_INTERPOLATE(A, B, C, D) ((A & q_hi_pixel_mask) >> 2) + ((B & q_hi_pixel_mask) >> 2) + ((C & q_hi_pixel_mask) >> 2) + ((D & q_hi_pixel_mask) >> 2) \
+ + ((((A & q_lo_pixel_mask) + (B & q_lo_pixel_mask) + (C & q_lo_pixel_mask) + (D & q_lo_pixel_mask)) >> 2) & q_lo_pixel_mask)
+
+static void super2xsai(AVFilterContext *ctx,
+ uint8_t *src, int src_linesize,
+ uint8_t *dst, int dst_linesize,
+ int width, int height)
+{
+ Super2xSaIContext *sai = ctx->priv;
+ unsigned int x, y;
+ uint32_t color[4][4];
+ unsigned char *src_line[4];
+ const int bpp = sai->bpp;
+ const uint32_t hi_pixel_mask = sai->hi_pixel_mask;
+ const uint32_t lo_pixel_mask = sai->lo_pixel_mask;
+ const uint32_t q_hi_pixel_mask = sai->q_hi_pixel_mask;
+ const uint32_t q_lo_pixel_mask = sai->q_lo_pixel_mask;
+
+ /* Point to the first 4 lines, first line is duplicated */
+ src_line[0] = src;
+ src_line[1] = src;
+ src_line[2] = src + src_linesize*FFMIN(1, height-1);
+ src_line[3] = src + src_linesize*FFMIN(2, height-1);
+
+#define READ_COLOR4(dst, src_line, off) dst = *((const uint32_t *)src_line + off)
+#define READ_COLOR3(dst, src_line, off) dst = AV_RL24 (src_line + 3*off)
+#define READ_COLOR2(dst, src_line, off) dst = sai->is_be ? AV_RB16(src_line + 2 * off) : AV_RL16(src_line + 2 * off)
+
+ for (y = 0; y < height; y++) {
+ uint8_t *dst_line[2];
+
+ dst_line[0] = dst + dst_linesize*2*y;
+ dst_line[1] = dst + dst_linesize*(2*y+1);
+
+ switch (bpp) {
+ case 4:
+ READ_COLOR4(color[0][0], src_line[0], 0); color[0][1] = color[0][0]; READ_COLOR4(color[0][2], src_line[0], 1); READ_COLOR4(color[0][3], src_line[0], 2);
+ READ_COLOR4(color[1][0], src_line[1], 0); color[1][1] = color[1][0]; READ_COLOR4(color[1][2], src_line[1], 1); READ_COLOR4(color[1][3], src_line[1], 2);
+ READ_COLOR4(color[2][0], src_line[2], 0); color[2][1] = color[2][0]; READ_COLOR4(color[2][2], src_line[2], 1); READ_COLOR4(color[2][3], src_line[2], 2);
+ READ_COLOR4(color[3][0], src_line[3], 0); color[3][1] = color[3][0]; READ_COLOR4(color[3][2], src_line[3], 1); READ_COLOR4(color[3][3], src_line[3], 2);
+ break;
+ case 3:
+ READ_COLOR3(color[0][0], src_line[0], 0); color[0][1] = color[0][0]; READ_COLOR3(color[0][2], src_line[0], 1); READ_COLOR3(color[0][3], src_line[0], 2);
+ READ_COLOR3(color[1][0], src_line[1], 0); color[1][1] = color[1][0]; READ_COLOR3(color[1][2], src_line[1], 1); READ_COLOR3(color[1][3], src_line[1], 2);
+ READ_COLOR3(color[2][0], src_line[2], 0); color[2][1] = color[2][0]; READ_COLOR3(color[2][2], src_line[2], 1); READ_COLOR3(color[2][3], src_line[2], 2);
+ READ_COLOR3(color[3][0], src_line[3], 0); color[3][1] = color[3][0]; READ_COLOR3(color[3][2], src_line[3], 1); READ_COLOR3(color[3][3], src_line[3], 2);
+ break;
+ default:
+ READ_COLOR2(color[0][0], src_line[0], 0); color[0][1] = color[0][0]; READ_COLOR2(color[0][2], src_line[0], 1); READ_COLOR2(color[0][3], src_line[0], 2);
+ READ_COLOR2(color[1][0], src_line[1], 0); color[1][1] = color[1][0]; READ_COLOR2(color[1][2], src_line[1], 1); READ_COLOR2(color[1][3], src_line[1], 2);
+ READ_COLOR2(color[2][0], src_line[2], 0); color[2][1] = color[2][0]; READ_COLOR2(color[2][2], src_line[2], 1); READ_COLOR2(color[2][3], src_line[2], 2);
+ READ_COLOR2(color[3][0], src_line[3], 0); color[3][1] = color[3][0]; READ_COLOR2(color[3][2], src_line[3], 1); READ_COLOR2(color[3][3], src_line[3], 2);
+ }
+
+ for (x = 0; x < width; x++) {
+ uint32_t product1a, product1b, product2a, product2b;
+
+//--------------------------------------- B0 B1 B2 B3 0 1 2 3
+// 4 5* 6 S2 -> 4 5* 6 7
+// 1 2 3 S1 8 9 10 11
+// A0 A1 A2 A3 12 13 14 15
+//--------------------------------------
+ if (color[2][1] == color[1][2] && color[1][1] != color[2][2]) {
+ product2b = color[2][1];
+ product1b = product2b;
+ } else if (color[1][1] == color[2][2] && color[2][1] != color[1][2]) {
+ product2b = color[1][1];
+ product1b = product2b;
+ } else if (color[1][1] == color[2][2] && color[2][1] == color[1][2]) {
+ int r = 0;
+
+ r += GET_RESULT(color[1][2], color[1][1], color[1][0], color[3][1]);
+ r += GET_RESULT(color[1][2], color[1][1], color[2][0], color[0][1]);
+ r += GET_RESULT(color[1][2], color[1][1], color[3][2], color[2][3]);
+ r += GET_RESULT(color[1][2], color[1][1], color[0][2], color[1][3]);
+
+ if (r > 0)
+ product1b = color[1][2];
+ else if (r < 0)
+ product1b = color[1][1];
+ else
+ product1b = INTERPOLATE(color[1][1], color[1][2]);
+
+ product2b = product1b;
+ } else {
+ if (color[1][2] == color[2][2] && color[2][2] == color[3][1] && color[2][1] != color[3][2] && color[2][2] != color[3][0])
+ product2b = Q_INTERPOLATE(color[2][2], color[2][2], color[2][2], color[2][1]);
+ else if (color[1][1] == color[2][1] && color[2][1] == color[3][2] && color[3][1] != color[2][2] && color[2][1] != color[3][3])
+ product2b = Q_INTERPOLATE(color[2][1], color[2][1], color[2][1], color[2][2]);
+ else
+ product2b = INTERPOLATE(color[2][1], color[2][2]);
+
+ if (color[1][2] == color[2][2] && color[1][2] == color[0][1] && color[1][1] != color[0][2] && color[1][2] != color[0][0])
+ product1b = Q_INTERPOLATE(color[1][2], color[1][2], color[1][2], color[1][1]);
+ else if (color[1][1] == color[2][1] && color[1][1] == color[0][2] && color[0][1] != color[1][2] && color[1][1] != color[0][3])
+ product1b = Q_INTERPOLATE(color[1][2], color[1][1], color[1][1], color[1][1]);
+ else
+ product1b = INTERPOLATE(color[1][1], color[1][2]);
+ }
+
+ if (color[1][1] == color[2][2] && color[2][1] != color[1][2] && color[1][0] == color[1][1] && color[1][1] != color[3][2])
+ product2a = INTERPOLATE(color[2][1], color[1][1]);
+ else if (color[1][1] == color[2][0] && color[1][2] == color[1][1] && color[1][0] != color[2][1] && color[1][1] != color[3][0])
+ product2a = INTERPOLATE(color[2][1], color[1][1]);
+ else
+ product2a = color[2][1];
+
+ if (color[2][1] == color[1][2] && color[1][1] != color[2][2] && color[2][0] == color[2][1] && color[2][1] != color[0][2])
+ product1a = INTERPOLATE(color[2][1], color[1][1]);
+ else if (color[1][0] == color[2][1] && color[2][2] == color[2][1] && color[2][0] != color[1][1] && color[2][1] != color[0][0])
+ product1a = INTERPOLATE(color[2][1], color[1][1]);
+ else
+ product1a = color[1][1];
+
+ /* Set the calculated pixels */
+ switch (bpp) {
+ case 4:
+ AV_WN32A(dst_line[0] + x * 8, product1a);
+ AV_WN32A(dst_line[0] + x * 8 + 4, product1b);
+ AV_WN32A(dst_line[1] + x * 8, product2a);
+ AV_WN32A(dst_line[1] + x * 8 + 4, product2b);
+ break;
+ case 3:
+ AV_WL24(dst_line[0] + x * 6, product1a);
+ AV_WL24(dst_line[0] + x * 6 + 3, product1b);
+ AV_WL24(dst_line[1] + x * 6, product2a);
+ AV_WL24(dst_line[1] + x * 6 + 3, product2b);
+ break;
+ default: // bpp = 2
+ if (sai->is_be) {
+ AV_WB32(dst_line[0] + x * 4, product1a | (product1b << 16));
+ AV_WB32(dst_line[1] + x * 4, product2a | (product2b << 16));
+ } else {
+ AV_WL32(dst_line[0] + x * 4, product1a | (product1b << 16));
+ AV_WL32(dst_line[1] + x * 4, product2a | (product2b << 16));
+ }
+ }
+
+ /* Move color matrix forward */
+ color[0][0] = color[0][1]; color[0][1] = color[0][2]; color[0][2] = color[0][3];
+ color[1][0] = color[1][1]; color[1][1] = color[1][2]; color[1][2] = color[1][3];
+ color[2][0] = color[2][1]; color[2][1] = color[2][2]; color[2][2] = color[2][3];
+ color[3][0] = color[3][1]; color[3][1] = color[3][2]; color[3][2] = color[3][3];
+
+ if (x < width - 3) {
+ x += 3;
+ switch (bpp) {
+ case 4:
+ READ_COLOR4(color[0][3], src_line[0], x);
+ READ_COLOR4(color[1][3], src_line[1], x);
+ READ_COLOR4(color[2][3], src_line[2], x);
+ READ_COLOR4(color[3][3], src_line[3], x);
+ break;
+ case 3:
+ READ_COLOR3(color[0][3], src_line[0], x);
+ READ_COLOR3(color[1][3], src_line[1], x);
+ READ_COLOR3(color[2][3], src_line[2], x);
+ READ_COLOR3(color[3][3], src_line[3], x);
+ break;
+ default: /* case 2 */
+ READ_COLOR2(color[0][3], src_line[0], x);
+ READ_COLOR2(color[1][3], src_line[1], x);
+ READ_COLOR2(color[2][3], src_line[2], x);
+ READ_COLOR2(color[3][3], src_line[3], x);
+ }
+ x -= 3;
+ }
+ }
+
+ /* We're done with one line, so we shift the source lines up */
+ src_line[0] = src_line[1];
+ src_line[1] = src_line[2];
+ src_line[2] = src_line[3];
+
+ /* Read next line */
+ src_line[3] = src_line[2];
+ if (y < height - 3)
+ src_line[3] += src_linesize;
+ } // y loop
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum PixelFormat pix_fmts[] = {
+ PIX_FMT_RGBA, PIX_FMT_BGRA, PIX_FMT_ARGB, PIX_FMT_ABGR,
+ PIX_FMT_RGB24, PIX_FMT_BGR24,
+ PIX_FMT_RGB565BE, PIX_FMT_BGR565BE, PIX_FMT_RGB555BE, PIX_FMT_BGR555BE,
+ PIX_FMT_RGB565LE, PIX_FMT_BGR565LE, PIX_FMT_RGB555LE, PIX_FMT_BGR555LE,
+ PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ Super2xSaIContext *sai = inlink->dst->priv;
+
+ sai->hi_pixel_mask = 0xFEFEFEFE;
+ sai->lo_pixel_mask = 0x01010101;
+ sai->q_hi_pixel_mask = 0xFCFCFCFC;
+ sai->q_lo_pixel_mask = 0x03030303;
+ sai->bpp = 4;
+
+ switch (inlink->format) {
+ case PIX_FMT_RGB24:
+ case PIX_FMT_BGR24:
+ sai->bpp = 3;
+ break;
+
+ case PIX_FMT_RGB565BE:
+ case PIX_FMT_BGR565BE:
+ sai->is_be = 1;
+ case PIX_FMT_RGB565LE:
+ case PIX_FMT_BGR565LE:
+ sai->hi_pixel_mask = 0xF7DEF7DE;
+ sai->lo_pixel_mask = 0x08210821;
+ sai->q_hi_pixel_mask = 0xE79CE79C;
+ sai->q_lo_pixel_mask = 0x18631863;
+ sai->bpp = 2;
+ break;
+
+ case PIX_FMT_BGR555BE:
+ case PIX_FMT_RGB555BE:
+ sai->is_be = 1;
+ case PIX_FMT_BGR555LE:
+ case PIX_FMT_RGB555LE:
+ sai->hi_pixel_mask = 0x7BDE7BDE;
+ sai->lo_pixel_mask = 0x04210421;
+ sai->q_hi_pixel_mask = 0x739C739C;
+ sai->q_lo_pixel_mask = 0x0C630C63;
+ sai->bpp = 2;
+ break;
+ }
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterLink *inlink = outlink->src->inputs[0];
+
+ outlink->w = inlink->w*2;
+ outlink->h = inlink->h*2;
+
+ av_log(inlink->dst, AV_LOG_VERBOSE, "fmt:%s size:%dx%d -> size:%dx%d\n",
+ av_get_pix_fmt_name(inlink->format),
+ inlink->w, inlink->h, outlink->w, outlink->h);
+
+ return 0;
+}
+
- static void end_frame(AVFilterLink *inlink)
++static int null_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) { return 0; }
+
- ff_end_frame(outlink);
++static int end_frame(AVFilterLink *inlink)
+{
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFilterBufferRef *inpicref = inlink->cur_buf;
+ AVFilterBufferRef *outpicref = outlink->out_buf;
+
+ super2xsai(inlink->dst, inpicref->data[0], inpicref->linesize[0],
+ outpicref->data[0], outpicref->linesize[0],
+ inlink->w, inlink->h);
+
+ ff_draw_slice(outlink, 0, outlink->h, 1);
++ return ff_end_frame(outlink);
+}
+
+AVFilter avfilter_vf_super2xsai = {
+ .name = "super2xsai",
+ .description = NULL_IF_CONFIG_SMALL("Scale the input by 2x using the Super2xSaI pixel art algorithm."),
+ .priv_size = sizeof(Super2xSaIContext),
+ .query_formats = query_formats,
+
+ .inputs = (const AVFilterPad[]) {
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .draw_slice = null_draw_slice,
+ .end_frame = end_frame,
+ .min_perms = AV_PERM_READ },
+ { .name = NULL }
+ },
+ .outputs = (const AVFilterPad[]) {
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output },
+ { .name = NULL }
+ },
+};
--- /dev/null
- static void start_frame(AVFilterLink *link, AVFilterBufferRef *inpicref)
+/*
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * swap UV filter
+ */
+
+#include "avfilter.h"
+#include "formats.h"
+#include "video.h"
+
+static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms,
+ int w, int h)
+{
+ AVFilterBufferRef *picref =
+ ff_default_get_video_buffer(link, perms, w, h);
+ uint8_t *tmp;
+ int tmp2;
+
+ tmp = picref->data[2];
+ picref->data[2] = picref->data[1];
+ picref->data[1] = tmp;
+
+ tmp2 = picref->linesize[2];
+ picref->linesize[2] = picref->linesize[1];
+ picref->linesize[1] = tmp2;
+
+ return picref;
+}
+
- ff_start_frame(link->dst->outputs[0], outpicref);
++static int start_frame(AVFilterLink *link, AVFilterBufferRef *inpicref)
+{
+ AVFilterBufferRef *outpicref = avfilter_ref_buffer(inpicref, ~0);
+
+ outpicref->data[1] = inpicref->data[2];
+ outpicref->data[2] = inpicref->data[1];
+
+ outpicref->linesize[1] = inpicref->linesize[2];
+ outpicref->linesize[2] = inpicref->linesize[1];
+
++ return ff_start_frame(link->dst->outputs[0], outpicref);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum PixelFormat pix_fmts[] = {
+ PIX_FMT_YUV420P, PIX_FMT_YUVJ420P, PIX_FMT_YUVA420P,
+ PIX_FMT_YUV444P, PIX_FMT_YUVJ444P, PIX_FMT_YUVA444P,
+ PIX_FMT_YUV440P, PIX_FMT_YUVJ440P,
+ PIX_FMT_YUV422P, PIX_FMT_YUVJ422P,
+ PIX_FMT_YUV411P,
+ PIX_FMT_NONE,
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+AVFilter avfilter_vf_swapuv = {
+ .name = "swapuv",
+ .description = NULL_IF_CONFIG_SMALL("Swap U and V components."),
+ .priv_size = 0,
+ .query_formats = query_formats,
+
+ .inputs = (const AVFilterPad[]) {
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .get_video_buffer = get_video_buffer,
+ .start_frame = start_frame, },
+ { .name = NULL }
+ },
+ .outputs = (const AVFilterPad[]) {
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO, },
+ { .name = NULL }
+ },
+};
--- /dev/null
- static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
+/*
+ * Copyright (c) 2011 Smartjog S.A.S, Clément Bœsch <clement.boesch@smartjog.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Potential thumbnail lookup filter to reduce the risk of an inappropriate
+ * selection (such as a black frame) we could get with an absolute seek.
+ *
+ * Simplified version of algorithm by Vadim Zaliva <lord@crocodile.org>.
+ * @see http://notbrainsurgery.livejournal.com/29773.html
+ */
+
+#include "avfilter.h"
+#include "internal.h"
+
+#define HIST_SIZE (3*256)
+
+struct thumb_frame {
+ AVFilterBufferRef *buf; ///< cached frame
+ int histogram[HIST_SIZE]; ///< RGB color distribution histogram of the frame
+};
+
+typedef struct {
+ int n; ///< current frame
+ int n_frames; ///< number of frames for analysis
+ struct thumb_frame *frames; ///< the n_frames frames
+} ThumbContext;
+
+static av_cold int init(AVFilterContext *ctx, const char *args)
+{
+ ThumbContext *thumb = ctx->priv;
+
+ if (!args) {
+ thumb->n_frames = 100;
+ } else {
+ int n = sscanf(args, "%d", &thumb->n_frames);
+ if (n != 1 || thumb->n_frames < 2) {
+ thumb->n_frames = 0;
+ av_log(ctx, AV_LOG_ERROR,
+ "Invalid number of frames specified (minimum is 2).\n");
+ return AVERROR(EINVAL);
+ }
+ }
+ thumb->frames = av_calloc(thumb->n_frames, sizeof(*thumb->frames));
+ if (!thumb->frames) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Allocation failure, try to lower the number of frames\n");
+ return AVERROR(ENOMEM);
+ }
+ av_log(ctx, AV_LOG_VERBOSE, "batch size: %d frames\n", thumb->n_frames);
+ return 0;
+}
+
- static void end_frame(AVFilterLink *inlink)
++static int draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
+{
+ int i, j;
+ AVFilterContext *ctx = inlink->dst;
+ ThumbContext *thumb = ctx->priv;
+ int *hist = thumb->frames[thumb->n].histogram;
+ AVFilterBufferRef *picref = inlink->cur_buf;
+ const uint8_t *p = picref->data[0] + y * picref->linesize[0];
+
+ // update current frame RGB histogram
+ for (j = 0; j < h; j++) {
+ for (i = 0; i < inlink->w; i++) {
+ hist[0*256 + p[i*3 ]]++;
+ hist[1*256 + p[i*3 + 1]]++;
+ hist[2*256 + p[i*3 + 2]]++;
+ }
+ p += picref->linesize[0];
+ }
++ return 0;
+}
+
+/**
+ * @brief Compute Sum-square deviation to estimate "closeness".
+ * @param hist color distribution histogram
+ * @param median average color distribution histogram
+ * @return sum of squared errors
+ */
+static double frame_sum_square_err(const int *hist, const double *median)
+{
+ int i;
+ double err, sum_sq_err = 0;
+
+ for (i = 0; i < HIST_SIZE; i++) {
+ err = median[i] - (double)hist[i];
+ sum_sq_err += err*err;
+ }
+ return sum_sq_err;
+}
+
- return;
++static int end_frame(AVFilterLink *inlink)
+{
+ int i, j, best_frame_idx = 0;
+ double avg_hist[HIST_SIZE] = {0}, sq_err, min_sq_err = -1;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ ThumbContext *thumb = inlink->dst->priv;
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterBufferRef *picref;
+
+ // keep a reference of each frame
+ thumb->frames[thumb->n].buf = inlink->cur_buf;
+ inlink->cur_buf = NULL;
+
+ // no selection until the buffer of N frames is filled up
+ if (thumb->n < thumb->n_frames - 1) {
+ thumb->n++;
- ff_end_frame(outlink);
++ return 0;
+ }
+
+ // average histogram of the N frames
+ for (j = 0; j < FF_ARRAY_ELEMS(avg_hist); j++) {
+ for (i = 0; i < thumb->n_frames; i++)
+ avg_hist[j] += (double)thumb->frames[i].histogram[j];
+ avg_hist[j] /= thumb->n_frames;
+ }
+
+ // find the frame closer to the average using the sum of squared errors
+ for (i = 0; i < thumb->n_frames; i++) {
+ sq_err = frame_sum_square_err(thumb->frames[i].histogram, avg_hist);
+ if (i == 0 || sq_err < min_sq_err)
+ best_frame_idx = i, min_sq_err = sq_err;
+ }
+
+ // free and reset everything (except the best frame buffer)
+ for (i = 0; i < thumb->n_frames; i++) {
+ memset(thumb->frames[i].histogram, 0, sizeof(thumb->frames[i].histogram));
+ if (i == best_frame_idx)
+ continue;
+ avfilter_unref_buffer(thumb->frames[i].buf);
+ thumb->frames[i].buf = NULL;
+ }
+ thumb->n = 0;
+
+ // raise the chosen one
+ picref = thumb->frames[best_frame_idx].buf;
+ av_log(ctx, AV_LOG_INFO, "frame id #%d (pts_time=%f) selected\n",
+ best_frame_idx, picref->pts * av_q2d(inlink->time_base));
+ ff_start_frame(outlink, picref);
+ thumb->frames[best_frame_idx].buf = NULL;
+ ff_draw_slice(outlink, 0, inlink->h, 1);
- static void null_start_frame(AVFilterLink *link, AVFilterBufferRef *picref) { }
++ return ff_end_frame(outlink);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ int i;
+ ThumbContext *thumb = ctx->priv;
+ for (i = 0; i < thumb->n_frames && thumb->frames[i].buf; i++) {
+ avfilter_unref_buffer(thumb->frames[i].buf);
+ thumb->frames[i].buf = NULL;
+ }
+ av_freep(&thumb->frames);
+}
+
++static int null_start_frame(AVFilterLink *link, AVFilterBufferRef *picref) { return 0; }
+
+static int request_frame(AVFilterLink *link)
+{
+ ThumbContext *thumb = link->src->priv;
+
+ /* loop until a frame thumbnail is available (when a frame is queued,
+ * thumb->n is reset to zero) */
+ do {
+ int ret = ff_request_frame(link->src->inputs[0]);
+ if (ret < 0)
+ return ret;
+ } while (thumb->n);
+ return 0;
+}
+
+static int poll_frame(AVFilterLink *link)
+{
+ ThumbContext *thumb = link->src->priv;
+ AVFilterLink *inlink = link->src->inputs[0];
+ int ret, available_frames = ff_poll_frame(inlink);
+
+ /* If the input link is not able to provide any frame, we can't do anything
+ * at the moment and thus have zero thumbnail available. */
+ if (!available_frames)
+ return 0;
+
+ /* Since at least one frame is available and the next frame will allow us
+ * to compute a thumbnail, we can return 1 frame. */
+ if (thumb->n == thumb->n_frames - 1)
+ return 1;
+
+ /* we have some frame(s) available in the input link, but not yet enough to
+ * output a thumbnail, so we request more */
+ ret = ff_request_frame(inlink);
+ return ret < 0 ? ret : 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum PixelFormat pix_fmts[] = {
+ PIX_FMT_RGB24, PIX_FMT_BGR24,
+ PIX_FMT_NONE
+ };
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+AVFilter avfilter_vf_thumbnail = {
+ .name = "thumbnail",
+ .description = NULL_IF_CONFIG_SMALL("Select the most representative frame in a given sequence of consecutive frames."),
+ .priv_size = sizeof(ThumbContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = (const AVFilterPad[]) {
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .get_video_buffer = ff_null_get_video_buffer,
+ .start_frame = null_start_frame,
+ .draw_slice = draw_slice,
+ .end_frame = end_frame,
+ },{ .name = NULL }
+ },
+ .outputs = (const AVFilterPad[]) {
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .poll_frame = poll_frame,
+ .rej_perms = AV_PERM_REUSE2,
+ },{ .name = NULL }
+ },
+};
--- /dev/null
- static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
+/*
+ * Copyright (c) 2012 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * tile video filter
+ */
+
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "drawutils.h"
+#include "formats.h"
+#include "video.h"
+#include "internal.h"
+
+typedef struct {
+ unsigned w, h;
+ unsigned current;
+ FFDrawContext draw;
+ FFDrawColor blank;
+} TileContext;
+
+#define REASONABLE_SIZE 1024
+
+static av_cold int init(AVFilterContext *ctx, const char *args)
+{
+ TileContext *tile = ctx->priv;
+ int r;
+ char dummy;
+
+ if (!args)
+ args = "6x5";
+ r = sscanf(args, "%ux%u%c", &tile->w, &tile->h, &dummy);
+ if (r != 2 || !tile->w || !tile->h)
+ return AVERROR(EINVAL);
+ if (tile->w > REASONABLE_SIZE || tile->h > REASONABLE_SIZE) {
+ av_log(ctx, AV_LOG_ERROR, "Tile size %ux%u is insane.\n",
+ tile->w, tile->h);
+ return AVERROR(EINVAL);
+ }
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
+ return 0;
+}
+
+static int config_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ TileContext *tile = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ if (inlink->w > INT_MAX / tile->w) {
+ av_log(ctx, AV_LOG_ERROR, "Total width %ux%u is too much.\n",
+ tile->w, inlink->w);
+ return AVERROR(EINVAL);
+ }
+ if (inlink->h > INT_MAX / tile->h) {
+ av_log(ctx, AV_LOG_ERROR, "Total height %ux%u is too much.\n",
+ tile->h, inlink->h);
+ return AVERROR(EINVAL);
+ }
+ outlink->w = tile->w * inlink->w;
+ outlink->h = tile->h * inlink->h;
+ outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
+ outlink->frame_rate = av_mul_q(inlink->frame_rate,
+ (AVRational){ 1, tile->w * tile->h });
+ ff_draw_init(&tile->draw, inlink->format, 0);
+ /* TODO make the color an option, or find an unified way of choosing it */
+ ff_draw_color(&tile->draw, &tile->blank, (uint8_t[]){ 0, 0, 0, -1 });
+
+ return 0;
+}
+
+/* Note: direct rendering is not possible since there is no guarantee that
+ * buffers are fed to start_frame in the order they were obtained from
+ * get_buffer (think B-frames). */
+
- return;
++static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ TileContext *tile = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+
+ if (tile->current)
- ff_start_frame(outlink, outlink->out_buf);
++ return 0;
+ outlink->out_buf = ff_get_video_buffer(outlink, AV_PERM_WRITE,
+ outlink->w, outlink->h);
+ avfilter_copy_buffer_ref_props(outlink->out_buf, picref);
+ outlink->out_buf->video->w = outlink->w;
+ outlink->out_buf->video->h = outlink->h;
- static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
++ return ff_start_frame(outlink, outlink->out_buf);
+}
+
- static void end_frame(AVFilterLink *inlink)
++static int draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
+{
+ AVFilterContext *ctx = inlink->dst;
+ TileContext *tile = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ unsigned x0 = inlink->w * (tile->current % tile->w);
+ unsigned y0 = inlink->h * (tile->current / tile->w);
+
+ ff_copy_rectangle2(&tile->draw,
+ outlink->out_buf->data, outlink->out_buf->linesize,
+ inlink ->cur_buf->data, inlink ->cur_buf->linesize,
+ x0, y0 + y, 0, y, inlink->cur_buf->video->w, h);
+ /* TODO if tile->w == 1 && slice_dir is always 1, we could draw_slice
+ * immediately. */
++ return 0;
+}
+
+static void draw_blank_frame(AVFilterContext *ctx)
+{
+ TileContext *tile = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ unsigned x0 = inlink->w * (tile->current % tile->w);
+ unsigned y0 = inlink->h * (tile->current / tile->w);
+
+ ff_fill_rectangle(&tile->draw, &tile->blank,
+ outlink->out_buf->data, outlink->out_buf->linesize,
+ x0, y0, inlink->w, inlink->h);
+ tile->current++;
+}
+static void end_last_frame(AVFilterContext *ctx)
+{
+ TileContext *tile = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+
+ while (tile->current < tile->w * tile->h)
+ draw_blank_frame(ctx);
+ ff_draw_slice(outlink, 0, outlink->out_buf->video->h, 1);
+ ff_end_frame(outlink);
+ tile->current = 0;
+}
+
- avfilter_unref_buffer(inlink->cur_buf);
++static int end_frame(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ TileContext *tile = ctx->priv;
+
++ avfilter_unref_bufferp(&inlink->cur_buf);
+ if (++tile->current == tile->w * tile->h)
+ end_last_frame(ctx);
++ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ TileContext *tile = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ int r;
+
+ while (1) {
+ r = ff_request_frame(inlink);
+ if (r < 0) {
+ if (r == AVERROR_EOF && tile->current)
+ end_last_frame(ctx);
+ else
+ return r;
+ break;
+ }
+ if (!tile->current) /* done */
+ break;
+ }
+ return 0;
+}
+
+
+AVFilter avfilter_vf_tile = {
+ .name = "tile",
+ .description = NULL_IF_CONFIG_SMALL("Tile several successive frames together."),
+ .init = init,
+ .query_formats = query_formats,
+ .priv_size = sizeof(TileContext),
+ .inputs = (const AVFilterPad[]) {
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .start_frame = start_frame,
+ .draw_slice = draw_slice,
+ .end_frame = end_frame,
+ .min_perms = AV_PERM_READ, },
+ { .name = NULL }
+ },
+ .outputs = (const AVFilterPad[]) {
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props,
+ .request_frame = request_frame },
+ { .name = NULL }
+ },
+};
--- /dev/null
- static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
+/*
+ * Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2010 Baptiste Coudurier
+ * Copyright (c) 2003 Michael Zucchi <notzed@ximian.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * temporal field interlace filter, ported from MPlayer/libmpcodecs
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/avassert.h"
+#include "avfilter.h"
+#include "internal.h"
+
+enum TInterlaceMode {
+ MODE_MERGE = 0,
+ MODE_DROP_EVEN,
+ MODE_DROP_ODD,
+ MODE_PAD,
+ MODE_INTERLEAVE_TOP,
+ MODE_INTERLEAVE_BOTTOM,
+ MODE_INTERLACEX2,
+};
+
+static const char *tinterlace_mode_str[] = {
+ "merge",
+ "drop_even",
+ "drop_odd",
+ "pad",
+ "interleave_top",
+ "interleave_bottom",
+ "interlacex2",
+ NULL
+};
+
+typedef struct {
+ enum TInterlaceMode mode; ///< interlace mode selected
+ int frame; ///< number of the output frame
+ int vsub; ///< chroma vertical subsampling
+ AVFilterBufferRef *cur;
+ AVFilterBufferRef *next;
+ uint8_t *black_data[4]; ///< buffer used to fill padded lines
+ int black_linesize[4];
+} TInterlaceContext;
+
+#define FULL_SCALE_YUVJ_FORMATS \
+ PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUVJ440P
+
+static enum PixelFormat full_scale_yuvj_pix_fmts[] = {
+ FULL_SCALE_YUVJ_FORMATS, PIX_FMT_NONE
+};
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum PixelFormat pix_fmts[] = {
+ PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P,
+ PIX_FMT_YUV444P, PIX_FMT_YUV410P, PIX_FMT_YUVA420P,
+ PIX_FMT_GRAY8, FULL_SCALE_YUVJ_FORMATS,
+ PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static av_cold int init(AVFilterContext *ctx, const char *args)
+{
+ TInterlaceContext *tinterlace = ctx->priv;
+ int i;
+ char c;
+
+ tinterlace->mode = MODE_MERGE;
+
+ if (args) {
+ if (sscanf(args, "%d%c", (int *)&tinterlace->mode, &c) == 1) {
+ if (tinterlace->mode > 6) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Invalid mode '%s', use an integer between 0 and 6\n", args);
+ return AVERROR(EINVAL);
+ }
+
+ av_log(ctx, AV_LOG_WARNING,
+ "Using numeric constant is deprecated, use symbolic values\n");
+ } else {
+ for (i = 0; tinterlace_mode_str[i]; i++) {
+ if (!strcmp(tinterlace_mode_str[i], args)) {
+ tinterlace->mode = i;
+ break;
+ }
+ }
+ if (!tinterlace_mode_str[i]) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid argument '%s'\n", args);
+ return AVERROR(EINVAL);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ TInterlaceContext *tinterlace = ctx->priv;
+
+ if (tinterlace->cur ) avfilter_unref_bufferp(&tinterlace->cur );
+ if (tinterlace->next) avfilter_unref_bufferp(&tinterlace->next);
+
+ av_freep(&tinterlace->black_data[0]);
+}
+
+static int config_out_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+ const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[outlink->format];
+ TInterlaceContext *tinterlace = ctx->priv;
+
+ tinterlace->vsub = desc->log2_chroma_h;
+ outlink->w = inlink->w;
+ outlink->h = tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD ?
+ inlink->h*2 : inlink->h;
+
+ if (tinterlace->mode == MODE_PAD) {
+ uint8_t black[4] = { 16, 128, 128, 16 };
+ int i, ret;
+ if (ff_fmt_is_in(outlink->format, full_scale_yuvj_pix_fmts))
+ black[0] = black[3] = 0;
+ ret = av_image_alloc(tinterlace->black_data, tinterlace->black_linesize,
+ outlink->w, outlink->h, outlink->format, 1);
+ if (ret < 0)
+ return ret;
+
+ /* fill black picture with black */
+ for (i = 0; i < 4 && tinterlace->black_data[i]; i++) {
+ int h = i == 1 || i == 2 ? outlink->h >> desc->log2_chroma_h : outlink->h;
+ memset(tinterlace->black_data[i], black[i],
+ tinterlace->black_linesize[i] * h);
+ }
+ }
+ av_log(ctx, AV_LOG_VERBOSE, "mode:%s h:%d -> h:%d\n",
+ tinterlace_mode_str[tinterlace->mode], inlink->h, outlink->h);
+
+ return 0;
+}
+
+#define FIELD_UPPER 0
+#define FIELD_LOWER 1
+#define FIELD_UPPER_AND_LOWER 2
+
+/**
+ * Copy picture field from src to dst.
+ *
+ * @param src_field copy from upper, lower field or both
+ * @param interleave leave a padding line between each copied line
+ * @param dst_field copy to upper or lower field,
+ * only meaningful when interleave is selected
+ */
+static inline
+void copy_picture_field(uint8_t *dst[4], int dst_linesize[4],
+ uint8_t *src[4], int src_linesize[4],
+ enum PixelFormat format, int w, int src_h,
+ int src_field, int interleave, int dst_field)
+{
+ const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[format];
+ int plane, vsub = desc->log2_chroma_h;
+ int k = src_field == FIELD_UPPER_AND_LOWER ? 1 : 2;
+
+ for (plane = 0; plane < desc->nb_components; plane++) {
+ int lines = plane == 1 || plane == 2 ? src_h >> vsub : src_h;
+ int linesize = av_image_get_linesize(format, w, plane);
+ uint8_t *dstp = dst[plane];
+ uint8_t *srcp = src[plane];
+ lines /= k;
+ if (src_field == FIELD_LOWER)
+ srcp += src_linesize[plane];
+ if (interleave && dst_field == FIELD_LOWER)
+ dstp += dst_linesize[plane];
+ av_image_copy_plane(dstp, dst_linesize[plane] * (interleave ? 2 : 1),
+ srcp, src_linesize[plane]*k, linesize, lines);
+ }
+}
+
- static void end_frame(AVFilterLink *inlink)
++static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ TInterlaceContext *tinterlace = ctx->priv;
+
+ avfilter_unref_buffer(tinterlace->cur);
+ tinterlace->cur = tinterlace->next;
+ tinterlace->next = picref;
++ return 0;
+}
+
- return;
++static int end_frame(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ TInterlaceContext *tinterlace = ctx->priv;
+ AVFilterBufferRef *cur = tinterlace->cur;
+ AVFilterBufferRef *next = tinterlace->next;
+ AVFilterBufferRef *out = NULL;
+ int field, tff;
+
+ /* we need at least two frames */
+ if (!tinterlace->cur)
- static void null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { }
++ return 0;
+
+ switch (tinterlace->mode) {
+ case MODE_MERGE: /* move the odd frame into the upper field of the new image, even into
+ * the lower field, generating a double-height video at half framerate */
+ out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
+ avfilter_copy_buffer_ref_props(out, cur);
+ out->video->h = outlink->h;
+ out->video->interlaced = 1;
+ out->video->top_field_first = 1;
+
+ /* write odd frame lines into the upper field of the new frame */
+ copy_picture_field(out->data, out->linesize,
+ cur->data, cur->linesize,
+ inlink->format, inlink->w, inlink->h,
+ FIELD_UPPER_AND_LOWER, 1, FIELD_UPPER);
+ /* write even frame lines into the lower field of the new frame */
+ copy_picture_field(out->data, out->linesize,
+ next->data, next->linesize,
+ inlink->format, inlink->w, inlink->h,
+ FIELD_UPPER_AND_LOWER, 1, FIELD_LOWER);
+ avfilter_unref_bufferp(&tinterlace->next);
+ break;
+
+ case MODE_DROP_ODD: /* only output even frames, odd frames are dropped; height unchanged, half framerate */
+ case MODE_DROP_EVEN: /* only output odd frames, even frames are dropped; height unchanged, half framerate */
+ out = avfilter_ref_buffer(tinterlace->mode == MODE_DROP_EVEN ? cur : next, AV_PERM_READ);
+ avfilter_unref_bufferp(&tinterlace->next);
+ break;
+
+ case MODE_PAD: /* expand each frame to double height, but pad alternate
+ * lines with black; framerate unchanged */
+ out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
+ avfilter_copy_buffer_ref_props(out, cur);
+ out->video->h = outlink->h;
+
+ field = (1 + tinterlace->frame) & 1 ? FIELD_UPPER : FIELD_LOWER;
+ /* copy upper and lower fields */
+ copy_picture_field(out->data, out->linesize,
+ cur->data, cur->linesize,
+ inlink->format, inlink->w, inlink->h,
+ FIELD_UPPER_AND_LOWER, 1, field);
+ /* pad with black the other field */
+ copy_picture_field(out->data, out->linesize,
+ tinterlace->black_data, tinterlace->black_linesize,
+ inlink->format, inlink->w, inlink->h,
+ FIELD_UPPER_AND_LOWER, 1, !field);
+ break;
+
+ /* interleave upper/lower lines from odd frames with lower/upper lines from even frames,
+ * halving the frame rate and preserving image height */
+ case MODE_INTERLEAVE_TOP: /* top field first */
+ case MODE_INTERLEAVE_BOTTOM: /* bottom field first */
+ tff = tinterlace->mode == MODE_INTERLEAVE_TOP;
+ out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
+ avfilter_copy_buffer_ref_props(out, cur);
+ out->video->interlaced = 1;
+ out->video->top_field_first = tff;
+
+ /* copy upper/lower field from cur */
+ copy_picture_field(out->data, out->linesize,
+ cur->data, cur->linesize,
+ inlink->format, inlink->w, inlink->h,
+ tff ? FIELD_UPPER : FIELD_LOWER, 1, tff ? FIELD_UPPER : FIELD_LOWER);
+ /* copy lower/upper field from next */
+ copy_picture_field(out->data, out->linesize,
+ next->data, next->linesize,
+ inlink->format, inlink->w, inlink->h,
+ tff ? FIELD_LOWER : FIELD_UPPER, 1, tff ? FIELD_LOWER : FIELD_UPPER);
+ avfilter_unref_bufferp(&tinterlace->next);
+ break;
+ case MODE_INTERLACEX2: /* re-interlace preserving image height, double frame rate */
+ /* output current frame first */
+ out = avfilter_ref_buffer(cur, AV_PERM_READ);
+ out->video->interlaced = 1;
+
+ ff_start_frame(outlink, out);
+ ff_draw_slice(outlink, 0, outlink->h, 1);
+ ff_end_frame(outlink);
+
+ /* output mix of current and next frame */
+ tff = next->video->top_field_first;
+ out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
+ avfilter_copy_buffer_ref_props(out, next);
+ out->video->interlaced = 1;
+
+ /* write current frame second field lines into the second field of the new frame */
+ copy_picture_field(out->data, out->linesize,
+ cur->data, cur->linesize,
+ inlink->format, inlink->w, inlink->h,
+ tff ? FIELD_LOWER : FIELD_UPPER, 1, tff ? FIELD_LOWER : FIELD_UPPER);
+ /* write next frame first field lines into the first field of the new frame */
+ copy_picture_field(out->data, out->linesize,
+ next->data, next->linesize,
+ inlink->format, inlink->w, inlink->h,
+ tff ? FIELD_UPPER : FIELD_LOWER, 1, tff ? FIELD_UPPER : FIELD_LOWER);
+ break;
+ }
+
+ ff_start_frame(outlink, out);
+ ff_draw_slice(outlink, 0, outlink->h, 1);
+ ff_end_frame(outlink);
+
+ tinterlace->frame++;
++
++ return 0;
+}
+
+static int poll_frame(AVFilterLink *outlink)
+{
+ TInterlaceContext *tinterlace = outlink->src->priv;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+ int ret, val;
+
+ val = ff_poll_frame(inlink);
+
+ if (val == 1 && !tinterlace->next) {
+ if ((ret = ff_request_frame(inlink)) < 0)
+ return ret;
+ val = ff_poll_frame(inlink);
+ }
+ av_assert0(tinterlace->next);
+
+ return val;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ TInterlaceContext *tinterlace = outlink->src->priv;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+
+ do {
+ int ret;
+
+ if ((ret = ff_request_frame(inlink)) < 0)
+ return ret;
+ } while (!tinterlace->cur);
+
+ return 0;
+}
+
++static int null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { return 0; }
+
+AVFilter avfilter_vf_tinterlace = {
+ .name = "tinterlace",
+ .description = NULL_IF_CONFIG_SMALL("Perform temporal field interlacing."),
+ .priv_size = sizeof(TInterlaceContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+
+ .inputs = (const AVFilterPad[]) {
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .start_frame = start_frame,
+ .draw_slice = null_draw_slice,
+ .end_frame = end_frame, },
+ { .name = NULL}
+ },
+ .outputs = (const AVFilterPad[]) {
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_out_props,
+ .poll_frame = poll_frame,
+ .request_frame = request_frame },
+ { .name = NULL}
+ },
+};
outlink->out_buf = ff_get_video_buffer(outlink, AV_PERM_WRITE,
outlink->w, outlink->h);
+ if (!outlink->out_buf)
+ return AVERROR(ENOMEM);
+
outlink->out_buf->pts = picref->pts;
- if (picref->video->pixel_aspect.num == 0) {
- outlink->out_buf->video->pixel_aspect = picref->video->pixel_aspect;
+ if (picref->video->sample_aspect_ratio.num == 0) {
+ outlink->out_buf->video->sample_aspect_ratio = picref->video->sample_aspect_ratio;
} else {
- outlink->out_buf->video->pixel_aspect.num = picref->video->pixel_aspect.den;
- outlink->out_buf->video->pixel_aspect.den = picref->video->pixel_aspect.num;
+ outlink->out_buf->video->sample_aspect_ratio.num = picref->video->sample_aspect_ratio.den;
+ outlink->out_buf->video->sample_aspect_ratio.den = picref->video->sample_aspect_ratio.num;
}
- ff_start_frame(outlink, avfilter_ref_buffer(outlink->out_buf, ~0));
+ buf_out = avfilter_ref_buffer(outlink->out_buf, ~0);
+ if (!buf_out)
+ return AVERROR(ENOMEM);
+ return ff_start_frame(outlink, buf_out);
}
- static void end_frame(AVFilterLink *inlink)
+ static int end_frame(AVFilterLink *inlink)
{
TransContext *trans = inlink->dst->priv;
AVFilterBufferRef *inpic = inlink->cur_buf;
}
}
- ff_draw_slice(outlink, 0, outpic->video->h, 1);
- ff_end_frame(outlink);
+ if ((ret = ff_draw_slice(outlink, 0, outpic->video->h, 1)) < 0 ||
+ (ret = ff_end_frame(outlink)) < 0)
+ return ret;
+ return 0;
}
- static void null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { }
++static int null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { return 0; }
+
AVFilter avfilter_vf_transpose = {
.name = "transpose",
.description = NULL_IF_CONFIG_SMALL("Transpose input video."),
ret = ff_request_frame(link->src->inputs[0]);
- if (ret == AVERROR_EOF && yadif->next) {
+ if (ret == AVERROR_EOF && yadif->cur) {
AVFilterBufferRef *next = avfilter_ref_buffer(yadif->next, AV_PERM_READ);
+ if (!next)
+ return AVERROR(ENOMEM);
+
next->pts = yadif->next->pts * 2 - yadif->cur->pts;
start_frame(link->src->inputs[0], next);
outlink = inlink->dst->outputs[0];
if (outlink) {
+ AVFilterBufferRef *buf_out;
outlink->out_buf = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
+ if (!outlink->out_buf)
+ return AVERROR(ENOMEM);
+
avfilter_copy_buffer_ref_props(outlink->out_buf, picref);
- ff_start_frame(outlink, avfilter_ref_buffer(outlink->out_buf, ~0));
+ outlink->out_buf->video->w = outlink->w;
+ outlink->out_buf->video->h = outlink->h;
+ buf_out = avfilter_ref_buffer(outlink->out_buf, ~0);
+ if (!buf_out)
+ return AVERROR(ENOMEM);
+
+ return ff_start_frame(outlink, buf_out);
}
+ return 0;
+ }
+
+ static void clear_link(AVFilterLink *link)
+ {
+ avfilter_unref_bufferp(&link->cur_buf);
+ avfilter_unref_bufferp(&link->src_buf);
+ avfilter_unref_bufferp(&link->out_buf);
}
/* XXX: should we do the duplicating of the picture ref here, instead of
* forcing the source filter to do it? */
- void ff_start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
+ int ff_start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
{
- void (*start_frame)(AVFilterLink *, AVFilterBufferRef *);
+ int (*start_frame)(AVFilterLink *, AVFilterBufferRef *);
AVFilterPad *dst = link->dstpad;
- int perms = picref->perms;
+ int ret, perms = picref->perms;
+ AVFilterCommand *cmd= link->dst->command_queue;
+ int64_t pts;
- FF_DPRINTF_START(NULL, start_frame); ff_dlog_link(NULL, link, 0); av_dlog(NULL, " "); ff_dlog_ref(NULL, picref, 1);
+ FF_TPRINTF_START(NULL, start_frame); ff_tlog_link(NULL, link, 0); ff_tlog(NULL, " "); ff_tlog_ref(NULL, picref, 1);
if (!(start_frame = dst->start_frame))
start_frame = default_start_frame;
link->dstpad->min_perms, link->dstpad->rej_perms);
link->cur_buf = ff_get_video_buffer(link, dst->min_perms, link->w, link->h);
+ if (!link->cur_buf) {
+ avfilter_unref_bufferp(&picref);
+ return AVERROR(ENOMEM);
+ }
+
link->src_buf = picref;
avfilter_copy_buffer_ref_props(link->cur_buf, link->src_buf);
+
+ /* copy palette if required */
+ if (av_pix_fmt_descriptors[link->format].flags & PIX_FMT_PAL)
+ memcpy(link->cur_buf->data[1], link->src_buf-> data[1], AVPALETTE_SIZE);
}
else
link->cur_buf = picref;
- start_frame(link, link->cur_buf);
+ while(cmd && cmd->time <= picref->pts * av_q2d(link->time_base)){
+ av_log(link->dst, AV_LOG_DEBUG,
+ "Processing command time:%f command:%s arg:%s\n",
+ cmd->time, cmd->command, cmd->arg);
+ avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
+ ff_command_queue_pop(link->dst);
+ cmd= link->dst->command_queue;
+ }
+ pts = link->cur_buf->pts;
+ ret = start_frame(link, link->cur_buf);
+ ff_update_link_current_pts(link,link->cur_buf ? link->cur_buf->pts : pts);
+ if (ret < 0)
+ clear_link(link);
+
+ return ret;
}
- void ff_null_start_frame_keep_ref(AVFilterLink *inlink,
++int ff_null_start_frame_keep_ref(AVFilterLink *inlink,
+ AVFilterBufferRef *picref)
+{
- ff_start_frame(inlink->dst->outputs[0], avfilter_ref_buffer(picref, ~0));
++ return ff_start_frame(inlink->dst->outputs[0], avfilter_ref_buffer(picref, ~0));
+}
+
- void ff_null_end_frame(AVFilterLink *link)
+ int ff_null_end_frame(AVFilterLink *link)
{
- ff_end_frame(link->dst->outputs[0]);
+ return ff_end_frame(link->dst->outputs[0]);
}
- static void default_end_frame(AVFilterLink *inlink)
+ static int default_end_frame(AVFilterLink *inlink)
{
AVFilterLink *outlink = NULL;
outlink = inlink->dst->outputs[0];
if (outlink)
- ff_draw_slice(outlink, y, h, slice_dir);
+ return ff_draw_slice(outlink, y, h, slice_dir);
+ return 0;
}
- void ff_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
+ int ff_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
{
uint8_t *src[4], *dst[4];
- int i, j, vsub;
- void (*draw_slice)(AVFilterLink *, int, int, int);
+ int i, j, vsub, ret;
+ int (*draw_slice)(AVFilterLink *, int, int, int);
- FF_DPRINTF_START(NULL, draw_slice); ff_dlog_link(NULL, link, 0); av_dlog(NULL, " y:%d h:%d dir:%d\n", y, h, slice_dir);
+ FF_TPRINTF_START(NULL, draw_slice); ff_tlog_link(NULL, link, 0); ff_tlog(NULL, " y:%d h:%d dir:%d\n", y, h, slice_dir);
/* copy the slice if needed for permission reasons */
if (link->src_buf) {
if (!(draw_slice = link->dstpad->draw_slice))
draw_slice = default_draw_slice;
- draw_slice(link, y, h, slice_dir);
+ ret = draw_slice(link, y, h, slice_dir);
+ if (ret < 0)
+ clear_link(link);
+ return ret;
}
- void avfilter_default_end_frame(AVFilterLink *inlink)
+
- default_end_frame(inlink);
++int avfilter_default_end_frame(AVFilterLink *inlink)
+{
++ return default_end_frame(inlink);
+}
+
* frame need only be valid once draw_slice() is called for that
* portion. The receiving filter will free this reference when
* it no longer needs it.
+ *
+ * @return >= 0 on success, a negative AVERROR on error. This function will
+ * unreference picref in case of error.
*/
- void ff_start_frame(AVFilterLink *link, AVFilterBufferRef *picref);
+ int ff_start_frame(AVFilterLink *link, AVFilterBufferRef *picref);
/**
- void ff_null_start_frame_keep_ref(AVFilterLink *inlink, AVFilterBufferRef *picref);
+ * Pass video frame along and keep an internal reference for later use.
+ */
++int ff_null_start_frame_keep_ref(AVFilterLink *inlink, AVFilterBufferRef *picref);
+
+/**
* Notify the next filter that the current frame has finished.
*
* @param link the output link the frame was sent over
{
ColorContext *color = link->src->priv;
AVFilterBufferRef *picref = ff_get_video_buffer(link, AV_PERM_WRITE, color->w, color->h);
- picref->video->pixel_aspect = (AVRational) {1, 1};
- picref->pts = color->pts++;
- picref->pos = -1;
+ AVFilterBufferRef *buf_out;
+ int ret;
+
+ if (!picref)
+ return AVERROR(ENOMEM);
+
+ picref->video->sample_aspect_ratio = (AVRational) {1, 1};
+ picref->pts = color->pts++;
+ picref->pos = -1;
- ff_start_frame(link, avfilter_ref_buffer(picref, ~0));
+ buf_out = avfilter_ref_buffer(picref, ~0);
+ if (!buf_out) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ ret = ff_start_frame(link, buf_out);
+ if (ret < 0)
+ goto fail;
+
- ff_draw_rectangle(picref->data, picref->linesize,
- color->line, color->line_step, color->hsub, color->vsub,
+ ff_fill_rectangle(&color->draw, &color->color, picref->data, picref->linesize,
0, 0, color->w, color->h);
- ff_draw_slice(link, 0, color->h, 1);
- ff_end_frame(link);
+ ret = ff_draw_slice(link, 0, color->h, 1);
+ if (ret < 0)
+ goto fail;
+
+ ret = ff_end_frame(link);
+
+ fail:
avfilter_unref_buffer(picref);
- return 0;
+ return ret;
}
AVFilter avfilter_vsrc_color = {
{
TestSourceContext *test = outlink->src->priv;
AVFilterBufferRef *picref;
+ int ret;
- if (test->max_pts >= 0 && test->pts > test->max_pts)
+ if (test->max_pts >= 0 && test->pts >= test->max_pts)
return AVERROR_EOF;
picref = ff_get_video_buffer(outlink, AV_PERM_WRITE, test->w, test->h);
+ if (!picref)
+ return AVERROR(ENOMEM);
+
picref->pts = test->pts++;
picref->pos = -1;
picref->video->key_frame = 1;
picref->video->interlaced = 0;
picref->video->pict_type = AV_PICTURE_TYPE_I;
- picref->video->pixel_aspect = test->sar;
- test->nb_frame++;
+ picref->video->sample_aspect_ratio = test->sar;
test->fill_picture_fn(outlink->src, picref);
+ test->nb_frame++;
- ff_start_frame(outlink, picref);
- ff_draw_slice(outlink, 0, test->h, 1);
- ff_end_frame(outlink);
+ if ((ret = ff_start_frame(outlink, picref)) < 0 ||
+ (ret = ff_draw_slice(outlink, 0, test->h, 1)) < 0 ||
+ (ret = ff_end_frame(outlink)) < 0)
+ return ret;
return 0;
}
--- /dev/null
-fate-filter-asyncts: CMD = pcm -i $(SRC) -af asyncts
+ FATE_ASYNCTS += fate-filter-asyncts
+ fate-filter-asyncts: SRC = $(SAMPLES)/nellymoser/nellymoser-discont.flv
++fate-filter-asyncts: CMD = pcm -i $(SRC) -af aresample=min_comp=0.001:min_hard_comp=0.1
+ fate-filter-asyncts: CMP = oneoff
+ fate-filter-asyncts: REF = $(SAMPLES)/nellymoser/nellymoser-discont.pcm
+
+ FATE_FILTER += $(FATE_ASYNCTS)
+ FATE_SAMPLES_AVCONV += $(FATE_ASYNCTS)
+
+ fate-filter: $(FATE_FILTER)