Merge commit '9df889a5f116c1ee78c2f239e0ba599c492431aa'
authorClément Bœsch <u@pkh.me>
Fri, 29 Jul 2016 09:01:36 +0000 (11:01 +0200)
committerClément Bœsch <u@pkh.me>
Fri, 29 Jul 2016 09:01:36 +0000 (11:01 +0200)
* commit '9df889a5f116c1ee78c2f239e0ba599c492431aa':
  h264: rename h264.[ch] to h264dec.[ch]

Merged-by: Clément Bœsch <u@pkh.me>
39 files changed:
1  2 
libavcodec/Makefile
libavcodec/crystalhd.c
libavcodec/dxva2_h264.c
libavcodec/h264_cabac.c
libavcodec/h264_cavlc.c
libavcodec/h264_direct.c
libavcodec/h264_loopfilter.c
libavcodec/h264_mb.c
libavcodec/h264_mc_template.c
libavcodec/h264_mvpred.h
libavcodec/h264_parse.c
libavcodec/h264_parser.c
libavcodec/h264_picture.c
libavcodec/h264_ps.c
libavcodec/h264_refs.c
libavcodec/h264_sei.c
libavcodec/h264_slice.c
libavcodec/h264data.c
libavcodec/h264data.h
libavcodec/h264dec.c
libavcodec/h264dec.h
libavcodec/h264idct_template.c
libavcodec/mediacodecdec_h264.c
libavcodec/mips/h264chroma_mips.h
libavcodec/mips/h264dsp_mips.h
libavcodec/omx.c
libavcodec/ppc/h264dsp.c
libavcodec/qsvenc_h264.c
libavcodec/svq3.c
libavcodec/vaapi_encode_h264.c
libavcodec/vaapi_h264.c
libavcodec/vda_h264.c
libavcodec/vda_h264_dec.c
libavcodec/vdpau.c
libavcodec/vdpau_compat.h
libavcodec/vdpau_h264.c
libavcodec/videotoolbox.c
libavcodec/x86/h264_qpel.c
libavformat/mxfenc.c

Simple merge
index 3cb32a8,0000000..d6ebcee
mode 100644,000000..100644
--- /dev/null
@@@ -1,1226 -1,0 +1,1226 @@@
- #include "h264.h"
 +/*
 + * - CrystalHD decoder module -
 + *
 + * Copyright(C) 2010,2011 Philip Langdale <ffmpeg.philipl@overt.org>
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +/*
 + * - Principles of Operation -
 + *
 + * The CrystalHD decoder operates at the bitstream level - which is an even
 + * higher level than the decoding hardware you typically see in modern GPUs.
 + * This means it has a very simple interface, in principle. You feed demuxed
 + * packets in one end and get decoded picture (fields/frames) out the other.
 + *
 + * Of course, nothing is ever that simple. Due, at the very least, to b-frame
 + * dependencies in the supported formats, the hardware has a delay between
 + * when a packet goes in, and when a picture comes out. Furthermore, this delay
 + * is not just a function of time, but also one of the dependency on additional
 + * frames being fed into the decoder to satisfy the b-frame dependencies.
 + *
 + * As such, a pipeline will build up that is roughly equivalent to the required
 + * DPB for the file being played. If that was all it took, things would still
 + * be simple - so, of course, it isn't.
 + *
 + * The hardware has a way of indicating that a picture is ready to be copied out,
 + * but this is unreliable - and sometimes the attempt will still fail so, based
 + * on testing, the code will wait until 3 pictures are ready before starting
 + * to copy out - and this has the effect of extending the pipeline.
 + *
 + * Finally, while it is tempting to say that once the decoder starts outputting
 + * frames, the software should never fail to return a frame from a decode(),
 + * this is a hard assertion to make, because the stream may switch between
 + * differently encoded content (number of b-frames, interlacing, etc) which
 + * might require a longer pipeline than before. If that happened, you could
 + * deadlock trying to retrieve a frame that can't be decoded without feeding
 + * in additional packets.
 + *
 + * As such, the code will return in the event that a picture cannot be copied
 + * out, leading to an increase in the length of the pipeline. This in turn,
 + * means we have to be sensitive to the time it takes to decode a picture;
 + * We do not want to give up just because the hardware needed a little more
 + * time to prepare the picture! For this reason, there are delays included
 + * in the decode() path that ensure that, under normal conditions, the hardware
 + * will only fail to return a frame if it really needs additional packets to
 + * complete the decoding.
 + *
 + * Finally, to be explicit, we do not want the pipeline to grow without bound
 + * for two reasons: 1) The hardware can only buffer a finite number of packets,
 + * and 2) The client application may not be able to cope with arbitrarily long
 + * delays in the video path relative to the audio path. For example. MPlayer
 + * can only handle a 20 picture delay (although this is arbitrary, and needs
 + * to be extended to fully support the CrystalHD where the delay could be up
 + * to 32 pictures - consider PAFF H.264 content with 16 b-frames).
 + */
 +
 +/*****************************************************************************
 + * Includes
 + ****************************************************************************/
 +
 +#define _XOPEN_SOURCE 600
 +#include <inttypes.h>
 +#include <stdio.h>
 +#include <stdlib.h>
 +
 +#include <libcrystalhd/bc_dts_types.h>
 +#include <libcrystalhd/bc_dts_defs.h>
 +#include <libcrystalhd/libcrystalhd_if.h>
 +
 +#include "avcodec.h"
++#include "h264dec.h"
 +#include "internal.h"
 +#include "libavutil/imgutils.h"
 +#include "libavutil/intreadwrite.h"
 +#include "libavutil/opt.h"
 +
 +#if HAVE_UNISTD_H
 +#include <unistd.h>
 +#endif
 +
 +/** Timeout parameter passed to DtsProcOutput() in us */
 +#define OUTPUT_PROC_TIMEOUT 50
 +/** Step between fake timestamps passed to hardware in units of 100ns */
 +#define TIMESTAMP_UNIT 100000
 +/** Initial value in us of the wait in decode() */
 +#define BASE_WAIT 10000
 +/** Increment in us to adjust wait in decode() */
 +#define WAIT_UNIT 1000
 +
 +
 +/*****************************************************************************
 + * Module private data
 + ****************************************************************************/
 +
 +typedef enum {
 +    RET_ERROR           = -1,
 +    RET_OK              = 0,
 +    RET_COPY_AGAIN      = 1,
 +    RET_SKIP_NEXT_COPY  = 2,
 +    RET_COPY_NEXT_FIELD = 3,
 +} CopyRet;
 +
 +typedef struct OpaqueList {
 +    struct OpaqueList *next;
 +    uint64_t fake_timestamp;
 +    uint64_t reordered_opaque;
 +    uint8_t pic_type;
 +} OpaqueList;
 +
 +typedef struct {
 +    AVClass *av_class;
 +    AVCodecContext *avctx;
 +    AVFrame *pic;
 +    HANDLE dev;
 +
 +    uint8_t *orig_extradata;
 +    uint32_t orig_extradata_size;
 +
 +    AVBitStreamFilterContext *bsfc;
 +    AVCodecParserContext *parser;
 +
 +    uint8_t is_70012;
 +    uint8_t *sps_pps_buf;
 +    uint32_t sps_pps_size;
 +    uint8_t is_nal;
 +    uint8_t output_ready;
 +    uint8_t need_second_field;
 +    uint8_t skip_next_output;
 +    uint64_t decode_wait;
 +
 +    uint64_t last_picture;
 +
 +    OpaqueList *head;
 +    OpaqueList *tail;
 +
 +    /* Options */
 +    uint32_t sWidth;
 +    uint8_t bframe_bug;
 +} CHDContext;
 +
 +static const AVOption options[] = {
 +    { "crystalhd_downscale_width",
 +      "Turn on downscaling to the specified width",
 +      offsetof(CHDContext, sWidth),
 +      AV_OPT_TYPE_INT, {.i64 = 0}, 0, UINT32_MAX,
 +      AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM, },
 +    { NULL, },
 +};
 +
 +
 +/*****************************************************************************
 + * Helper functions
 + ****************************************************************************/
 +
 +static inline BC_MEDIA_SUBTYPE id2subtype(CHDContext *priv, enum AVCodecID id)
 +{
 +    switch (id) {
 +    case AV_CODEC_ID_MPEG4:
 +        return BC_MSUBTYPE_DIVX;
 +    case AV_CODEC_ID_MSMPEG4V3:
 +        return BC_MSUBTYPE_DIVX311;
 +    case AV_CODEC_ID_MPEG2VIDEO:
 +        return BC_MSUBTYPE_MPEG2VIDEO;
 +    case AV_CODEC_ID_VC1:
 +        return BC_MSUBTYPE_VC1;
 +    case AV_CODEC_ID_WMV3:
 +        return BC_MSUBTYPE_WMV3;
 +    case AV_CODEC_ID_H264:
 +        return priv->is_nal ? BC_MSUBTYPE_AVC1 : BC_MSUBTYPE_H264;
 +    default:
 +        return BC_MSUBTYPE_INVALID;
 +    }
 +}
 +
 +static inline void print_frame_info(CHDContext *priv, BC_DTS_PROC_OUT *output)
 +{
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tYBuffSz: %u\n", output->YbuffSz);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tYBuffDoneSz: %u\n",
 +           output->YBuffDoneSz);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tUVBuffDoneSz: %u\n",
 +           output->UVBuffDoneSz);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tTimestamp: %"PRIu64"\n",
 +           output->PicInfo.timeStamp);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tPicture Number: %u\n",
 +           output->PicInfo.picture_number);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tWidth: %u\n",
 +           output->PicInfo.width);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tHeight: %u\n",
 +           output->PicInfo.height);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tChroma: 0x%03x\n",
 +           output->PicInfo.chroma_format);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tPulldown: %u\n",
 +           output->PicInfo.pulldown);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tFlags: 0x%08x\n",
 +           output->PicInfo.flags);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tFrame Rate/Res: %u\n",
 +           output->PicInfo.frame_rate);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tAspect Ratio: %u\n",
 +           output->PicInfo.aspect_ratio);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tColor Primaries: %u\n",
 +           output->PicInfo.colour_primaries);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tMetaData: %u\n",
 +           output->PicInfo.picture_meta_payload);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tSession Number: %u\n",
 +           output->PicInfo.sess_num);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tycom: %u\n",
 +           output->PicInfo.ycom);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tCustom Aspect: %u\n",
 +           output->PicInfo.custom_aspect_ratio_width_height);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tFrames to Drop: %u\n",
 +           output->PicInfo.n_drop);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tH264 Valid Fields: 0x%08x\n",
 +           output->PicInfo.other.h264.valid);
 +}
 +
 +
 +/*****************************************************************************
 + * OpaqueList functions
 + ****************************************************************************/
 +
 +static uint64_t opaque_list_push(CHDContext *priv, uint64_t reordered_opaque,
 +                                 uint8_t pic_type)
 +{
 +    OpaqueList *newNode = av_mallocz(sizeof (OpaqueList));
 +    if (!newNode) {
 +        av_log(priv->avctx, AV_LOG_ERROR,
 +               "Unable to allocate new node in OpaqueList.\n");
 +        return 0;
 +    }
 +    if (!priv->head) {
 +        newNode->fake_timestamp = TIMESTAMP_UNIT;
 +        priv->head              = newNode;
 +    } else {
 +        newNode->fake_timestamp = priv->tail->fake_timestamp + TIMESTAMP_UNIT;
 +        priv->tail->next        = newNode;
 +    }
 +    priv->tail = newNode;
 +    newNode->reordered_opaque = reordered_opaque;
 +    newNode->pic_type = pic_type;
 +
 +    return newNode->fake_timestamp;
 +}
 +
 +/*
 + * The OpaqueList is built in decode order, while elements will be removed
 + * in presentation order. If frames are reordered, this means we must be
 + * able to remove elements that are not the first element.
 + *
 + * Returned node must be freed by caller.
 + */
 +static OpaqueList *opaque_list_pop(CHDContext *priv, uint64_t fake_timestamp)
 +{
 +    OpaqueList *node = priv->head;
 +
 +    if (!priv->head) {
 +        av_log(priv->avctx, AV_LOG_ERROR,
 +               "CrystalHD: Attempted to query non-existent timestamps.\n");
 +        return NULL;
 +    }
 +
 +    /*
 +     * The first element is special-cased because we have to manipulate
 +     * the head pointer rather than the previous element in the list.
 +     */
 +    if (priv->head->fake_timestamp == fake_timestamp) {
 +        priv->head = node->next;
 +
 +        if (!priv->head->next)
 +            priv->tail = priv->head;
 +
 +        node->next = NULL;
 +        return node;
 +    }
 +
 +    /*
 +     * The list is processed at arm's length so that we have the
 +     * previous element available to rewrite its next pointer.
 +     */
 +    while (node->next) {
 +        OpaqueList *current = node->next;
 +        if (current->fake_timestamp == fake_timestamp) {
 +            node->next = current->next;
 +
 +            if (!node->next)
 +               priv->tail = node;
 +
 +            current->next = NULL;
 +            return current;
 +        } else {
 +            node = current;
 +        }
 +    }
 +
 +    av_log(priv->avctx, AV_LOG_VERBOSE,
 +           "CrystalHD: Couldn't match fake_timestamp.\n");
 +    return NULL;
 +}
 +
 +
 +/*****************************************************************************
 + * Video decoder API function definitions
 + ****************************************************************************/
 +
 +static void flush(AVCodecContext *avctx)
 +{
 +    CHDContext *priv = avctx->priv_data;
 +
 +    avctx->has_b_frames     = 0;
 +    priv->last_picture      = -1;
 +    priv->output_ready      = 0;
 +    priv->need_second_field = 0;
 +    priv->skip_next_output  = 0;
 +    priv->decode_wait       = BASE_WAIT;
 +
 +    av_frame_unref (priv->pic);
 +
 +    /* Flush mode 4 flushes all software and hardware buffers. */
 +    DtsFlushInput(priv->dev, 4);
 +}
 +
 +
 +static av_cold int uninit(AVCodecContext *avctx)
 +{
 +    CHDContext *priv = avctx->priv_data;
 +    HANDLE device;
 +
 +    device = priv->dev;
 +    DtsStopDecoder(device);
 +    DtsCloseDecoder(device);
 +    DtsDeviceClose(device);
 +
 +    /*
 +     * Restore original extradata, so that if the decoder is
 +     * reinitialised, the bitstream detection and filtering
 +     * will work as expected.
 +     */
 +    if (priv->orig_extradata) {
 +        av_free(avctx->extradata);
 +        avctx->extradata = priv->orig_extradata;
 +        avctx->extradata_size = priv->orig_extradata_size;
 +        priv->orig_extradata = NULL;
 +        priv->orig_extradata_size = 0;
 +    }
 +
 +    av_parser_close(priv->parser);
 +    if (priv->bsfc) {
 +        av_bitstream_filter_close(priv->bsfc);
 +    }
 +
 +    av_freep(&priv->sps_pps_buf);
 +
 +    av_frame_free (&priv->pic);
 +
 +    if (priv->head) {
 +       OpaqueList *node = priv->head;
 +       while (node) {
 +          OpaqueList *next = node->next;
 +          av_free(node);
 +          node = next;
 +       }
 +    }
 +
 +    return 0;
 +}
 +
 +
 +static av_cold int init(AVCodecContext *avctx)
 +{
 +    CHDContext* priv;
 +    BC_STATUS ret;
 +    BC_INFO_CRYSTAL version;
 +    BC_INPUT_FORMAT format = {
 +        .FGTEnable   = FALSE,
 +        .Progressive = TRUE,
 +        .OptFlags    = 0x80000000 | vdecFrameRate59_94 | 0x40,
 +        .width       = avctx->width,
 +        .height      = avctx->height,
 +    };
 +
 +    BC_MEDIA_SUBTYPE subtype;
 +
 +    uint32_t mode = DTS_PLAYBACK_MODE |
 +                    DTS_LOAD_FILE_PLAY_FW |
 +                    DTS_SKIP_TX_CHK_CPB |
 +                    DTS_PLAYBACK_DROP_RPT_MODE |
 +                    DTS_SINGLE_THREADED_MODE |
 +                    DTS_DFLT_RESOLUTION(vdecRESOLUTION_1080p23_976);
 +
 +    av_log(avctx, AV_LOG_VERBOSE, "CrystalHD Init for %s\n",
 +           avctx->codec->name);
 +
 +    avctx->pix_fmt = AV_PIX_FMT_YUYV422;
 +
 +    /* Initialize the library */
 +    priv               = avctx->priv_data;
 +    priv->avctx        = avctx;
 +    priv->is_nal       = avctx->extradata_size > 0 && *(avctx->extradata) == 1;
 +    priv->last_picture = -1;
 +    priv->decode_wait  = BASE_WAIT;
 +    priv->pic          = av_frame_alloc();
 +
 +    subtype = id2subtype(priv, avctx->codec->id);
 +    switch (subtype) {
 +    case BC_MSUBTYPE_AVC1:
 +        {
 +            uint8_t *dummy_p;
 +            int dummy_int;
 +
 +            /* Back up the extradata so it can be restored at close time. */
 +            priv->orig_extradata = av_malloc(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
 +            if (!priv->orig_extradata) {
 +                av_log(avctx, AV_LOG_ERROR,
 +                       "Failed to allocate copy of extradata\n");
 +                return AVERROR(ENOMEM);
 +            }
 +            priv->orig_extradata_size = avctx->extradata_size;
 +            memcpy(priv->orig_extradata, avctx->extradata, avctx->extradata_size);
 +
 +            priv->bsfc = av_bitstream_filter_init("h264_mp4toannexb");
 +            if (!priv->bsfc) {
 +                av_log(avctx, AV_LOG_ERROR,
 +                       "Cannot open the h264_mp4toannexb BSF!\n");
 +                return AVERROR_BSF_NOT_FOUND;
 +            }
 +            av_bitstream_filter_filter(priv->bsfc, avctx, NULL, &dummy_p,
 +                                       &dummy_int, NULL, 0, 0);
 +        }
 +        subtype = BC_MSUBTYPE_H264;
 +        // Fall-through
 +    case BC_MSUBTYPE_H264:
 +        format.startCodeSz = 4;
 +        // Fall-through
 +    case BC_MSUBTYPE_VC1:
 +    case BC_MSUBTYPE_WVC1:
 +    case BC_MSUBTYPE_WMV3:
 +    case BC_MSUBTYPE_WMVA:
 +    case BC_MSUBTYPE_MPEG2VIDEO:
 +    case BC_MSUBTYPE_DIVX:
 +    case BC_MSUBTYPE_DIVX311:
 +        format.pMetaData  = avctx->extradata;
 +        format.metaDataSz = avctx->extradata_size;
 +        break;
 +    default:
 +        av_log(avctx, AV_LOG_ERROR, "CrystalHD: Unknown codec name\n");
 +        return AVERROR(EINVAL);
 +    }
 +    format.mSubtype = subtype;
 +
 +    if (priv->sWidth) {
 +        format.bEnableScaling = 1;
 +        format.ScalingParams.sWidth = priv->sWidth;
 +    }
 +
 +    /* Get a decoder instance */
 +    av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: starting up\n");
 +    // Initialize the Link and Decoder devices
 +    ret = DtsDeviceOpen(&priv->dev, mode);
 +    if (ret != BC_STS_SUCCESS) {
 +        av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: DtsDeviceOpen failed\n");
 +        goto fail;
 +    }
 +
 +    ret = DtsCrystalHDVersion(priv->dev, &version);
 +    if (ret != BC_STS_SUCCESS) {
 +        av_log(avctx, AV_LOG_VERBOSE,
 +               "CrystalHD: DtsCrystalHDVersion failed\n");
 +        goto fail;
 +    }
 +    priv->is_70012 = version.device == 0;
 +
 +    if (priv->is_70012 &&
 +        (subtype == BC_MSUBTYPE_DIVX || subtype == BC_MSUBTYPE_DIVX311)) {
 +        av_log(avctx, AV_LOG_VERBOSE,
 +               "CrystalHD: BCM70012 doesn't support MPEG4-ASP/DivX/Xvid\n");
 +        goto fail;
 +    }
 +
 +    ret = DtsSetInputFormat(priv->dev, &format);
 +    if (ret != BC_STS_SUCCESS) {
 +        av_log(avctx, AV_LOG_ERROR, "CrystalHD: SetInputFormat failed\n");
 +        goto fail;
 +    }
 +
 +    ret = DtsOpenDecoder(priv->dev, BC_STREAM_TYPE_ES);
 +    if (ret != BC_STS_SUCCESS) {
 +        av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsOpenDecoder failed\n");
 +        goto fail;
 +    }
 +
 +    ret = DtsSetColorSpace(priv->dev, OUTPUT_MODE422_YUY2);
 +    if (ret != BC_STS_SUCCESS) {
 +        av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsSetColorSpace failed\n");
 +        goto fail;
 +    }
 +    ret = DtsStartDecoder(priv->dev);
 +    if (ret != BC_STS_SUCCESS) {
 +        av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartDecoder failed\n");
 +        goto fail;
 +    }
 +    ret = DtsStartCapture(priv->dev);
 +    if (ret != BC_STS_SUCCESS) {
 +        av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartCapture failed\n");
 +        goto fail;
 +    }
 +
 +    if (avctx->codec->id == AV_CODEC_ID_H264) {
 +        priv->parser = av_parser_init(avctx->codec->id);
 +        if (!priv->parser)
 +            av_log(avctx, AV_LOG_WARNING,
 +                   "Cannot open the h.264 parser! Interlaced h.264 content "
 +                   "will not be detected reliably.\n");
 +        priv->parser->flags = PARSER_FLAG_COMPLETE_FRAMES;
 +    }
 +    av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Init complete.\n");
 +
 +    return 0;
 +
 + fail:
 +    uninit(avctx);
 +    return -1;
 +}
 +
 +
 +static inline CopyRet copy_frame(AVCodecContext *avctx,
 +                                 BC_DTS_PROC_OUT *output,
 +                                 void *data, int *got_frame)
 +{
 +    BC_STATUS ret;
 +    BC_DTS_STATUS decoder_status = { 0, };
 +    uint8_t trust_interlaced;
 +    uint8_t interlaced;
 +
 +    CHDContext *priv = avctx->priv_data;
 +    int64_t pkt_pts  = AV_NOPTS_VALUE;
 +    uint8_t pic_type = 0;
 +
 +    uint8_t bottom_field = (output->PicInfo.flags & VDEC_FLAG_BOTTOMFIELD) ==
 +                           VDEC_FLAG_BOTTOMFIELD;
 +    uint8_t bottom_first = !!(output->PicInfo.flags & VDEC_FLAG_BOTTOM_FIRST);
 +
 +    int width    = output->PicInfo.width;
 +    int height   = output->PicInfo.height;
 +    int bwidth;
 +    uint8_t *src = output->Ybuff;
 +    int sStride;
 +    uint8_t *dst;
 +    int dStride;
 +
 +    if (output->PicInfo.timeStamp != 0) {
 +        OpaqueList *node = opaque_list_pop(priv, output->PicInfo.timeStamp);
 +        if (node) {
 +            pkt_pts = node->reordered_opaque;
 +            pic_type = node->pic_type;
 +            av_free(node);
 +        } else {
 +            /*
 +             * We will encounter a situation where a timestamp cannot be
 +             * popped if a second field is being returned. In this case,
 +             * each field has the same timestamp and the first one will
 +             * cause it to be popped. To keep subsequent calculations
 +             * simple, pic_type should be set a FIELD value - doesn't
 +             * matter which, but I chose BOTTOM.
 +             */
 +            pic_type = PICT_BOTTOM_FIELD;
 +        }
 +        av_log(avctx, AV_LOG_VERBOSE, "output \"pts\": %"PRIu64"\n",
 +               output->PicInfo.timeStamp);
 +        av_log(avctx, AV_LOG_VERBOSE, "output picture type %d\n",
 +               pic_type);
 +    }
 +
 +    ret = DtsGetDriverStatus(priv->dev, &decoder_status);
 +    if (ret != BC_STS_SUCCESS) {
 +        av_log(avctx, AV_LOG_ERROR,
 +               "CrystalHD: GetDriverStatus failed: %u\n", ret);
 +       return RET_ERROR;
 +    }
 +
 +    /*
 +     * For most content, we can trust the interlaced flag returned
 +     * by the hardware, but sometimes we can't. These are the
 +     * conditions under which we can trust the flag:
 +     *
 +     * 1) It's not h.264 content
 +     * 2) The UNKNOWN_SRC flag is not set
 +     * 3) We know we're expecting a second field
 +     * 4) The hardware reports this picture and the next picture
 +     *    have the same picture number.
 +     *
 +     * Note that there can still be interlaced content that will
 +     * fail this check, if the hardware hasn't decoded the next
 +     * picture or if there is a corruption in the stream. (In either
 +     * case a 0 will be returned for the next picture number)
 +     */
 +    trust_interlaced = avctx->codec->id != AV_CODEC_ID_H264 ||
 +                       !(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) ||
 +                       priv->need_second_field ||
 +                       (decoder_status.picNumFlags & ~0x40000000) ==
 +                       output->PicInfo.picture_number;
 +
 +    /*
 +     * If we got a false negative for trust_interlaced on the first field,
 +     * we will realise our mistake here when we see that the picture number is that
 +     * of the previous picture. We cannot recover the frame and should discard the
 +     * second field to keep the correct number of output frames.
 +     */
 +    if (output->PicInfo.picture_number == priv->last_picture && !priv->need_second_field) {
 +        av_log(avctx, AV_LOG_WARNING,
 +               "Incorrectly guessed progressive frame. Discarding second field\n");
 +        /* Returning without providing a picture. */
 +        return RET_OK;
 +    }
 +
 +    interlaced = (output->PicInfo.flags & VDEC_FLAG_INTERLACED_SRC) &&
 +                 trust_interlaced;
 +
 +    if (!trust_interlaced && (decoder_status.picNumFlags & ~0x40000000) == 0) {
 +        av_log(avctx, AV_LOG_VERBOSE,
 +               "Next picture number unknown. Assuming progressive frame.\n");
 +    }
 +
 +    av_log(avctx, AV_LOG_VERBOSE, "Interlaced state: %d | trust_interlaced %d\n",
 +           interlaced, trust_interlaced);
 +
 +    if (priv->pic->data[0] && !priv->need_second_field)
 +        av_frame_unref(priv->pic);
 +
 +    priv->need_second_field = interlaced && !priv->need_second_field;
 +
 +    if (!priv->pic->data[0]) {
 +        if (ff_get_buffer(avctx, priv->pic, AV_GET_BUFFER_FLAG_REF) < 0)
 +            return RET_ERROR;
 +    }
 +
 +    bwidth = av_image_get_linesize(avctx->pix_fmt, width, 0);
 +    if (priv->is_70012) {
 +        int pStride;
 +
 +        if (width <= 720)
 +            pStride = 720;
 +        else if (width <= 1280)
 +            pStride = 1280;
 +        else pStride = 1920;
 +        sStride = av_image_get_linesize(avctx->pix_fmt, pStride, 0);
 +    } else {
 +        sStride = bwidth;
 +    }
 +
 +    dStride = priv->pic->linesize[0];
 +    dst     = priv->pic->data[0];
 +
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "CrystalHD: Copying out frame\n");
 +
 +    if (interlaced) {
 +        int dY = 0;
 +        int sY = 0;
 +
 +        height /= 2;
 +        if (bottom_field) {
 +            av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: bottom field\n");
 +            dY = 1;
 +        } else {
 +            av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: top field\n");
 +            dY = 0;
 +        }
 +
 +        for (sY = 0; sY < height; dY++, sY++) {
 +            memcpy(&(dst[dY * dStride]), &(src[sY * sStride]), bwidth);
 +            dY++;
 +        }
 +    } else {
 +        av_image_copy_plane(dst, dStride, src, sStride, bwidth, height);
 +    }
 +
 +    priv->pic->interlaced_frame = interlaced;
 +    if (interlaced)
 +        priv->pic->top_field_first = !bottom_first;
 +
 +    priv->pic->pkt_pts = pkt_pts;
 +
 +    if (!priv->need_second_field) {
 +        *got_frame       = 1;
 +        if ((ret = av_frame_ref(data, priv->pic)) < 0) {
 +            return ret;
 +        }
 +    }
 +
 +    /*
 +     * Two types of PAFF content have been observed. One form causes the
 +     * hardware to return a field pair and the other individual fields,
 +     * even though the input is always individual fields. We must skip
 +     * copying on the next decode() call to maintain pipeline length in
 +     * the first case.
 +     */
 +    if (!interlaced && (output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) &&
 +        (pic_type == PICT_TOP_FIELD || pic_type == PICT_BOTTOM_FIELD)) {
 +        av_log(priv->avctx, AV_LOG_VERBOSE, "Fieldpair from two packets.\n");
 +        return RET_SKIP_NEXT_COPY;
 +    }
 +
 +    /*
 +     * The logic here is purely based on empirical testing with samples.
 +     * If we need a second field, it could come from a second input packet,
 +     * or it could come from the same field-pair input packet at the current
 +     * field. In the first case, we should return and wait for the next time
 +     * round to get the second field, while in the second case, we should
 +     * ask the decoder for it immediately.
 +     *
 +     * Testing has shown that we are dealing with the fieldpair -> two fields
 +     * case if the VDEC_FLAG_UNKNOWN_SRC is not set or if the input picture
 +     * type was PICT_FRAME (in this second case, the flag might still be set)
 +     */
 +    return priv->need_second_field &&
 +           (!(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) ||
 +            pic_type == PICT_FRAME) ?
 +           RET_COPY_NEXT_FIELD : RET_OK;
 +}
 +
 +
 +static inline CopyRet receive_frame(AVCodecContext *avctx,
 +                                    void *data, int *got_frame)
 +{
 +    BC_STATUS ret;
 +    BC_DTS_PROC_OUT output = {
 +        .PicInfo.width  = avctx->width,
 +        .PicInfo.height = avctx->height,
 +    };
 +    CHDContext *priv = avctx->priv_data;
 +    HANDLE dev       = priv->dev;
 +
 +    *got_frame = 0;
 +
 +    // Request decoded data from the driver
 +    ret = DtsProcOutputNoCopy(dev, OUTPUT_PROC_TIMEOUT, &output);
 +    if (ret == BC_STS_FMT_CHANGE) {
 +        av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Initial format change\n");
 +        avctx->width  = output.PicInfo.width;
 +        avctx->height = output.PicInfo.height;
 +        switch ( output.PicInfo.aspect_ratio ) {
 +        case vdecAspectRatioSquare:
 +            avctx->sample_aspect_ratio = (AVRational) {  1,  1};
 +            break;
 +        case vdecAspectRatio12_11:
 +            avctx->sample_aspect_ratio = (AVRational) { 12, 11};
 +            break;
 +        case vdecAspectRatio10_11:
 +            avctx->sample_aspect_ratio = (AVRational) { 10, 11};
 +            break;
 +        case vdecAspectRatio16_11:
 +            avctx->sample_aspect_ratio = (AVRational) { 16, 11};
 +            break;
 +        case vdecAspectRatio40_33:
 +            avctx->sample_aspect_ratio = (AVRational) { 40, 33};
 +            break;
 +        case vdecAspectRatio24_11:
 +            avctx->sample_aspect_ratio = (AVRational) { 24, 11};
 +            break;
 +        case vdecAspectRatio20_11:
 +            avctx->sample_aspect_ratio = (AVRational) { 20, 11};
 +            break;
 +        case vdecAspectRatio32_11:
 +            avctx->sample_aspect_ratio = (AVRational) { 32, 11};
 +            break;
 +        case vdecAspectRatio80_33:
 +            avctx->sample_aspect_ratio = (AVRational) { 80, 33};
 +            break;
 +        case vdecAspectRatio18_11:
 +            avctx->sample_aspect_ratio = (AVRational) { 18, 11};
 +            break;
 +        case vdecAspectRatio15_11:
 +            avctx->sample_aspect_ratio = (AVRational) { 15, 11};
 +            break;
 +        case vdecAspectRatio64_33:
 +            avctx->sample_aspect_ratio = (AVRational) { 64, 33};
 +            break;
 +        case vdecAspectRatio160_99:
 +            avctx->sample_aspect_ratio = (AVRational) {160, 99};
 +            break;
 +        case vdecAspectRatio4_3:
 +            avctx->sample_aspect_ratio = (AVRational) {  4,  3};
 +            break;
 +        case vdecAspectRatio16_9:
 +            avctx->sample_aspect_ratio = (AVRational) { 16,  9};
 +            break;
 +        case vdecAspectRatio221_1:
 +            avctx->sample_aspect_ratio = (AVRational) {221,  1};
 +            break;
 +        }
 +        return RET_COPY_AGAIN;
 +    } else if (ret == BC_STS_SUCCESS) {
 +        int copy_ret = -1;
 +        if (output.PoutFlags & BC_POUT_FLAGS_PIB_VALID) {
 +            if (priv->last_picture == -1) {
 +                /*
 +                 * Init to one less, so that the incrementing code doesn't
 +                 * need to be special-cased.
 +                 */
 +                priv->last_picture = output.PicInfo.picture_number - 1;
 +            }
 +
 +            if (avctx->codec->id == AV_CODEC_ID_MPEG4 &&
 +                output.PicInfo.timeStamp == 0 && priv->bframe_bug) {
 +                av_log(avctx, AV_LOG_VERBOSE,
 +                       "CrystalHD: Not returning packed frame twice.\n");
 +                priv->last_picture++;
 +                DtsReleaseOutputBuffs(dev, NULL, FALSE);
 +                return RET_COPY_AGAIN;
 +            }
 +
 +            print_frame_info(priv, &output);
 +
 +            if (priv->last_picture + 1 < output.PicInfo.picture_number) {
 +                av_log(avctx, AV_LOG_WARNING,
 +                       "CrystalHD: Picture Number discontinuity\n");
 +                /*
 +                 * Have we lost frames? If so, we need to shrink the
 +                 * pipeline length appropriately.
 +                 *
 +                 * XXX: I have no idea what the semantics of this situation
 +                 * are so I don't even know if we've lost frames or which
 +                 * ones.
 +                 *
 +                 * In any case, only warn the first time.
 +                 */
 +               priv->last_picture = output.PicInfo.picture_number - 1;
 +            }
 +
 +            copy_ret = copy_frame(avctx, &output, data, got_frame);
 +            if (*got_frame > 0) {
 +                avctx->has_b_frames--;
 +                priv->last_picture++;
 +                av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Pipeline length: %u\n",
 +                       avctx->has_b_frames);
 +            }
 +        } else {
 +            /*
 +             * An invalid frame has been consumed.
 +             */
 +            av_log(avctx, AV_LOG_ERROR, "CrystalHD: ProcOutput succeeded with "
 +                                        "invalid PIB\n");
 +            avctx->has_b_frames--;
 +            copy_ret = RET_OK;
 +        }
 +        DtsReleaseOutputBuffs(dev, NULL, FALSE);
 +
 +        return copy_ret;
 +    } else if (ret == BC_STS_BUSY) {
 +        return RET_COPY_AGAIN;
 +    } else {
 +        av_log(avctx, AV_LOG_ERROR, "CrystalHD: ProcOutput failed %d\n", ret);
 +        return RET_ERROR;
 +    }
 +}
 +
 +
 +static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
 +{
 +    BC_STATUS ret;
 +    BC_DTS_STATUS decoder_status = { 0, };
 +    CopyRet rec_ret;
 +    CHDContext *priv   = avctx->priv_data;
 +    HANDLE dev         = priv->dev;
 +    uint8_t *in_data   = avpkt->data;
 +    int len            = avpkt->size;
 +    int free_data      = 0;
 +    uint8_t pic_type   = 0;
 +
 +    av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: decode_frame\n");
 +
 +    if (avpkt->size == 7 && !priv->bframe_bug) {
 +        /*
 +         * The use of a drop frame triggers the bug
 +         */
 +        av_log(avctx, AV_LOG_INFO,
 +               "CrystalHD: Enabling work-around for packed b-frame bug\n");
 +        priv->bframe_bug = 1;
 +    } else if (avpkt->size == 8 && priv->bframe_bug) {
 +        /*
 +         * Delay frames don't trigger the bug
 +         */
 +        av_log(avctx, AV_LOG_INFO,
 +               "CrystalHD: Disabling work-around for packed b-frame bug\n");
 +        priv->bframe_bug = 0;
 +    }
 +
 +    if (len) {
 +        int32_t tx_free = (int32_t)DtsTxFreeSize(dev);
 +
 +        if (priv->parser) {
 +            int ret = 0;
 +
 +            if (priv->bsfc) {
 +                ret = av_bitstream_filter_filter(priv->bsfc, avctx, NULL,
 +                                                 &in_data, &len,
 +                                                 avpkt->data, len, 0);
 +            }
 +            free_data = ret > 0;
 +
 +            if (ret >= 0) {
 +                uint8_t *pout;
 +                int psize;
 +                int index;
 +                H264Context *h = priv->parser->priv_data;
 +
 +                index = av_parser_parse2(priv->parser, avctx, &pout, &psize,
 +                                         in_data, len, avctx->internal->pkt->pts,
 +                                         avctx->internal->pkt->dts, 0);
 +                if (index < 0) {
 +                    av_log(avctx, AV_LOG_WARNING,
 +                           "CrystalHD: Failed to parse h.264 packet to "
 +                           "detect interlacing.\n");
 +                } else if (index != len) {
 +                    av_log(avctx, AV_LOG_WARNING,
 +                           "CrystalHD: Failed to parse h.264 packet "
 +                           "completely. Interlaced frames may be "
 +                           "incorrectly detected.\n");
 +                } else {
 +                    av_log(avctx, AV_LOG_VERBOSE,
 +                           "CrystalHD: parser picture type %d\n",
 +                           h->picture_structure);
 +                    pic_type = h->picture_structure;
 +                }
 +            } else {
 +                av_log(avctx, AV_LOG_WARNING,
 +                       "CrystalHD: mp4toannexb filter failed to filter "
 +                       "packet. Interlaced frames may be incorrectly "
 +                       "detected.\n");
 +            }
 +        }
 +
 +        if (len < tx_free - 1024) {
 +            /*
 +             * Despite being notionally opaque, either libcrystalhd or
 +             * the hardware itself will mangle pts values that are too
 +             * small or too large. The docs claim it should be in units
 +             * of 100ns. Given that we're nominally dealing with a black
 +             * box on both sides, any transform we do has no guarantee of
 +             * avoiding mangling so we need to build a mapping to values
 +             * we know will not be mangled.
 +             */
 +            uint64_t pts = opaque_list_push(priv, avctx->internal->pkt->pts, pic_type);
 +            if (!pts) {
 +                if (free_data) {
 +                    av_freep(&in_data);
 +                }
 +                return AVERROR(ENOMEM);
 +            }
 +            av_log(priv->avctx, AV_LOG_VERBOSE,
 +                   "input \"pts\": %"PRIu64"\n", pts);
 +            ret = DtsProcInput(dev, in_data, len, pts, 0);
 +            if (free_data) {
 +                av_freep(&in_data);
 +            }
 +            if (ret == BC_STS_BUSY) {
 +                av_log(avctx, AV_LOG_WARNING,
 +                       "CrystalHD: ProcInput returned busy\n");
 +                usleep(BASE_WAIT);
 +                return AVERROR(EBUSY);
 +            } else if (ret != BC_STS_SUCCESS) {
 +                av_log(avctx, AV_LOG_ERROR,
 +                       "CrystalHD: ProcInput failed: %u\n", ret);
 +                return -1;
 +            }
 +            avctx->has_b_frames++;
 +        } else {
 +            av_log(avctx, AV_LOG_WARNING, "CrystalHD: Input buffer full\n");
 +            len = 0; // We didn't consume any bytes.
 +        }
 +    } else {
 +        av_log(avctx, AV_LOG_INFO, "CrystalHD: No more input data\n");
 +    }
 +
 +    if (priv->skip_next_output) {
 +        av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Skipping next output.\n");
 +        priv->skip_next_output = 0;
 +        avctx->has_b_frames--;
 +        return len;
 +    }
 +
 +    ret = DtsGetDriverStatus(dev, &decoder_status);
 +    if (ret != BC_STS_SUCCESS) {
 +        av_log(avctx, AV_LOG_ERROR, "CrystalHD: GetDriverStatus failed\n");
 +        return -1;
 +    }
 +
 +    /*
 +     * No frames ready. Don't try to extract.
 +     *
 +     * Empirical testing shows that ReadyListCount can be a damn lie,
 +     * and ProcOut still fails when count > 0. The same testing showed
 +     * that two more iterations were needed before ProcOutput would
 +     * succeed.
 +     */
 +    if (priv->output_ready < 2) {
 +        if (decoder_status.ReadyListCount != 0)
 +            priv->output_ready++;
 +        usleep(BASE_WAIT);
 +        av_log(avctx, AV_LOG_INFO, "CrystalHD: Filling pipeline.\n");
 +        return len;
 +    } else if (decoder_status.ReadyListCount == 0) {
 +        /*
 +         * After the pipeline is established, if we encounter a lack of frames
 +         * that probably means we're not giving the hardware enough time to
 +         * decode them, so start increasing the wait time at the end of a
 +         * decode call.
 +         */
 +        usleep(BASE_WAIT);
 +        priv->decode_wait += WAIT_UNIT;
 +        av_log(avctx, AV_LOG_INFO, "CrystalHD: No frames ready. Returning\n");
 +        return len;
 +    }
 +
 +    do {
 +        rec_ret = receive_frame(avctx, data, got_frame);
 +        if (rec_ret == RET_OK && *got_frame == 0) {
 +            /*
 +             * This case is for when the encoded fields are stored
 +             * separately and we get a separate avpkt for each one. To keep
 +             * the pipeline stable, we should return nothing and wait for
 +             * the next time round to grab the second field.
 +             * H.264 PAFF is an example of this.
 +             */
 +            av_log(avctx, AV_LOG_VERBOSE, "Returning after first field.\n");
 +            avctx->has_b_frames--;
 +        } else if (rec_ret == RET_COPY_NEXT_FIELD) {
 +            /*
 +             * This case is for when the encoded fields are stored in a
 +             * single avpkt but the hardware returns then separately. Unless
 +             * we grab the second field before returning, we'll slip another
 +             * frame in the pipeline and if that happens a lot, we're sunk.
 +             * So we have to get that second field now.
 +             * Interlaced mpeg2 and vc1 are examples of this.
 +             */
 +            av_log(avctx, AV_LOG_VERBOSE, "Trying to get second field.\n");
 +            while (1) {
 +                usleep(priv->decode_wait);
 +                ret = DtsGetDriverStatus(dev, &decoder_status);
 +                if (ret == BC_STS_SUCCESS &&
 +                    decoder_status.ReadyListCount > 0) {
 +                    rec_ret = receive_frame(avctx, data, got_frame);
 +                    if ((rec_ret == RET_OK && *got_frame > 0) ||
 +                        rec_ret == RET_ERROR)
 +                        break;
 +                }
 +            }
 +            av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Got second field.\n");
 +        } else if (rec_ret == RET_SKIP_NEXT_COPY) {
 +            /*
 +             * Two input packets got turned into a field pair. Gawd.
 +             */
 +            av_log(avctx, AV_LOG_VERBOSE,
 +                   "Don't output on next decode call.\n");
 +            priv->skip_next_output = 1;
 +        }
 +        /*
 +         * If rec_ret == RET_COPY_AGAIN, that means that either we just handled
 +         * a FMT_CHANGE event and need to go around again for the actual frame,
 +         * we got a busy status and need to try again, or we're dealing with
 +         * packed b-frames, where the hardware strangely returns the packed
 +         * p-frame twice. We choose to keep the second copy as it carries the
 +         * valid pts.
 +         */
 +    } while (rec_ret == RET_COPY_AGAIN);
 +    usleep(priv->decode_wait);
 +    return len;
 +}
 +
 +
 +#if CONFIG_H264_CRYSTALHD_DECODER
 +static AVClass h264_class = {
 +    "h264_crystalhd",
 +    av_default_item_name,
 +    options,
 +    LIBAVUTIL_VERSION_INT,
 +};
 +
 +AVCodec ff_h264_crystalhd_decoder = {
 +    .name           = "h264_crystalhd",
 +    .long_name      = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (CrystalHD acceleration)"),
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_H264,
 +    .priv_data_size = sizeof(CHDContext),
 +    .init           = init,
 +    .close          = uninit,
 +    .decode         = decode,
 +    .capabilities   = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
 +    .flush          = flush,
 +    .pix_fmts       = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
 +    .priv_class     = &h264_class,
 +};
 +#endif
 +
 +#if CONFIG_MPEG2_CRYSTALHD_DECODER
 +static AVClass mpeg2_class = {
 +    "mpeg2_crystalhd",
 +    av_default_item_name,
 +    options,
 +    LIBAVUTIL_VERSION_INT,
 +};
 +
 +AVCodec ff_mpeg2_crystalhd_decoder = {
 +    .name           = "mpeg2_crystalhd",
 +    .long_name      = NULL_IF_CONFIG_SMALL("MPEG-2 Video (CrystalHD acceleration)"),
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_MPEG2VIDEO,
 +    .priv_data_size = sizeof(CHDContext),
 +    .init           = init,
 +    .close          = uninit,
 +    .decode         = decode,
 +    .capabilities   = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
 +    .flush          = flush,
 +    .pix_fmts       = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
 +    .priv_class     = &mpeg2_class,
 +};
 +#endif
 +
 +#if CONFIG_MPEG4_CRYSTALHD_DECODER
 +static AVClass mpeg4_class = {
 +    "mpeg4_crystalhd",
 +    av_default_item_name,
 +    options,
 +    LIBAVUTIL_VERSION_INT,
 +};
 +
 +AVCodec ff_mpeg4_crystalhd_decoder = {
 +    .name           = "mpeg4_crystalhd",
 +    .long_name      = NULL_IF_CONFIG_SMALL("MPEG-4 Part 2 (CrystalHD acceleration)"),
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_MPEG4,
 +    .priv_data_size = sizeof(CHDContext),
 +    .init           = init,
 +    .close          = uninit,
 +    .decode         = decode,
 +    .capabilities   = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
 +    .flush          = flush,
 +    .pix_fmts       = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
 +    .priv_class     = &mpeg4_class,
 +};
 +#endif
 +
 +#if CONFIG_MSMPEG4_CRYSTALHD_DECODER
 +static AVClass msmpeg4_class = {
 +    "msmpeg4_crystalhd",
 +    av_default_item_name,
 +    options,
 +    LIBAVUTIL_VERSION_INT,
 +};
 +
 +AVCodec ff_msmpeg4_crystalhd_decoder = {
 +    .name           = "msmpeg4_crystalhd",
 +    .long_name      = NULL_IF_CONFIG_SMALL("MPEG-4 Part 2 Microsoft variant version 3 (CrystalHD acceleration)"),
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_MSMPEG4V3,
 +    .priv_data_size = sizeof(CHDContext),
 +    .init           = init,
 +    .close          = uninit,
 +    .decode         = decode,
 +    .capabilities   = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_EXPERIMENTAL,
 +    .flush          = flush,
 +    .pix_fmts       = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
 +    .priv_class     = &msmpeg4_class,
 +};
 +#endif
 +
 +#if CONFIG_VC1_CRYSTALHD_DECODER
 +static AVClass vc1_class = {
 +    "vc1_crystalhd",
 +    av_default_item_name,
 +    options,
 +    LIBAVUTIL_VERSION_INT,
 +};
 +
 +AVCodec ff_vc1_crystalhd_decoder = {
 +    .name           = "vc1_crystalhd",
 +    .long_name      = NULL_IF_CONFIG_SMALL("SMPTE VC-1 (CrystalHD acceleration)"),
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_VC1,
 +    .priv_data_size = sizeof(CHDContext),
 +    .init           = init,
 +    .close          = uninit,
 +    .decode         = decode,
 +    .capabilities   = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
 +    .flush          = flush,
 +    .pix_fmts       = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
 +    .priv_class     = &vc1_class,
 +};
 +#endif
 +
 +#if CONFIG_WMV3_CRYSTALHD_DECODER
 +static AVClass wmv3_class = {
 +    "wmv3_crystalhd",
 +    av_default_item_name,
 +    options,
 +    LIBAVUTIL_VERSION_INT,
 +};
 +
 +AVCodec ff_wmv3_crystalhd_decoder = {
 +    .name           = "wmv3_crystalhd",
 +    .long_name      = NULL_IF_CONFIG_SMALL("Windows Media Video 9 (CrystalHD acceleration)"),
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_WMV3,
 +    .priv_data_size = sizeof(CHDContext),
 +    .init           = init,
 +    .close          = uninit,
 +    .decode         = decode,
 +    .capabilities   = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
 +    .flush          = flush,
 +    .pix_fmts       = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
 +    .priv_class     = &wmv3_class,
 +};
 +#endif
@@@ -20,9 -20,7 +20,9 @@@
   * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
   */
  
- #include "h264.h"
 +#include "libavutil/avassert.h"
 +
+ #include "h264dec.h"
  #include "h264data.h"
  #include "mpegutils.h"
  
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
  
  #include "internal.h"
  #include "avcodec.h"
- #include "h264.h"
+ #include "h264dec.h"
  #include "mpegutils.h"
 +#include "libavutil/avassert.h"
  
 -#include <assert.h>
  
  static av_always_inline int fetch_diagonal_mv(const H264Context *h, H264SliceContext *sl,
                                                const int16_t **C,
Simple merge
Simple merge
Simple merge
Simple merge
  
  #include <inttypes.h>
  
 +#include "libavutil/avassert.h"
  #include "internal.h"
  #include "avcodec.h"
- #include "h264.h"
+ #include "h264dec.h"
  #include "golomb.h"
  #include "mpegutils.h"
  
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -1003,6 -935,4 +1003,6 @@@ void ff_h264_flush_change(H264Context *
  
  void ff_h264_free_tables(H264Context *h);
  
- #endif /* AVCODEC_H264_H */
 +void ff_h264_set_erpic(ERPicture *dst, H264Picture *src);
 +
+ #endif /* AVCODEC_H264DEC_H */
Simple merge
index caeb64b,0000000..18f186b
mode 100644,000000..100644
--- /dev/null
@@@ -1,371 -1,0 +1,371 @@@
- #include "h264.h"
 +/*
 + * Android MediaCodec H.264 decoder
 + *
 + * Copyright (c) 2015-2016 Matthieu Bouron <matthieu.bouron stupeflix.com>
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +#include <stdint.h>
 +#include <string.h>
 +
 +#include "libavutil/avassert.h"
 +#include "libavutil/common.h"
 +#include "libavutil/fifo.h"
 +#include "libavutil/opt.h"
 +#include "libavutil/intreadwrite.h"
 +#include "libavutil/pixfmt.h"
 +#include "libavutil/atomic.h"
 +
 +#include "avcodec.h"
++#include "h264dec.h"
 +#include "internal.h"
 +#include "mediacodecdec.h"
 +#include "mediacodec_wrapper.h"
 +
 +#define CODEC_MIME "video/avc"
 +
 +typedef struct MediaCodecH264DecContext {
 +
 +    MediaCodecDecContext *ctx;
 +
 +    AVBSFContext *bsf;
 +
 +    AVFifoBuffer *fifo;
 +
 +    AVPacket filtered_pkt;
 +
 +} MediaCodecH264DecContext;
 +
 +static av_cold int mediacodec_decode_close(AVCodecContext *avctx)
 +{
 +    MediaCodecH264DecContext *s = avctx->priv_data;
 +
 +    ff_mediacodec_dec_close(avctx, s->ctx);
 +    s->ctx = NULL;
 +
 +    av_fifo_free(s->fifo);
 +
 +    av_bsf_free(&s->bsf);
 +    av_packet_unref(&s->filtered_pkt);
 +
 +    return 0;
 +}
 +
 +static int h264_ps_to_nalu(const uint8_t *src, int src_size, uint8_t **out, int *out_size)
 +{
 +    int i;
 +    int ret = 0;
 +    uint8_t *p = NULL;
 +    static const uint8_t nalu_header[] = { 0x00, 0x00, 0x00, 0x01 };
 +
 +    if (!out || !out_size) {
 +        return AVERROR(EINVAL);
 +    }
 +
 +    p = av_malloc(sizeof(nalu_header) + src_size);
 +    if (!p) {
 +        return AVERROR(ENOMEM);
 +    }
 +
 +    *out = p;
 +    *out_size = sizeof(nalu_header) + src_size;
 +
 +    memcpy(p, nalu_header, sizeof(nalu_header));
 +    memcpy(p + sizeof(nalu_header), src, src_size);
 +
 +    /* Escape 0x00, 0x00, 0x0{0-3} pattern */
 +    for (i = 4; i < *out_size; i++) {
 +        if (i < *out_size - 3 &&
 +            p[i + 0] == 0 &&
 +            p[i + 1] == 0 &&
 +            p[i + 2] <= 3) {
 +            uint8_t *new;
 +
 +            *out_size += 1;
 +            new = av_realloc(*out, *out_size);
 +            if (!new) {
 +                ret = AVERROR(ENOMEM);
 +                goto done;
 +            }
 +            *out = p = new;
 +
 +            i = i + 3;
 +            memmove(p + i, p + i - 1, *out_size - i);
 +            p[i - 1] = 0x03;
 +        }
 +    }
 +done:
 +    if (ret < 0) {
 +        av_freep(out);
 +        *out_size = 0;
 +    }
 +
 +    return ret;
 +}
 +
 +static av_cold int mediacodec_decode_init(AVCodecContext *avctx)
 +{
 +    int i;
 +    int ret;
 +
 +    H264ParamSets ps;
 +    const PPS *pps = NULL;
 +    const SPS *sps = NULL;
 +    int is_avc = 0;
 +    int nal_length_size = 0;
 +
 +    FFAMediaFormat *format = NULL;
 +    MediaCodecH264DecContext *s = avctx->priv_data;
 +
 +    memset(&ps, 0, sizeof(ps));
 +
 +    format = ff_AMediaFormat_new();
 +    if (!format) {
 +        av_log(avctx, AV_LOG_ERROR, "Failed to create media format\n");
 +        ret = AVERROR_EXTERNAL;
 +        goto done;
 +    }
 +
 +    ff_AMediaFormat_setString(format, "mime", CODEC_MIME);
 +    ff_AMediaFormat_setInt32(format, "width", avctx->width);
 +    ff_AMediaFormat_setInt32(format, "height", avctx->height);
 +
 +    ret = ff_h264_decode_extradata(avctx->extradata, avctx->extradata_size,
 +                                   &ps, &is_avc, &nal_length_size, 0, avctx);
 +    if (ret < 0) {
 +        goto done;
 +    }
 +
 +    for (i = 0; i < MAX_PPS_COUNT; i++) {
 +        if (ps.pps_list[i]) {
 +            pps = (const PPS*)ps.pps_list[i]->data;
 +            break;
 +        }
 +    }
 +
 +    if (pps) {
 +        if (ps.sps_list[pps->sps_id]) {
 +            sps = (const SPS*)ps.sps_list[pps->sps_id]->data;
 +        }
 +    }
 +
 +    if (pps && sps) {
 +        uint8_t *data = NULL;
 +        size_t data_size = 0;
 +
 +        if ((ret = h264_ps_to_nalu(sps->data, sps->data_size, &data, &data_size)) < 0) {
 +            goto done;
 +        }
 +        ff_AMediaFormat_setBuffer(format, "csd-0", (void*)data, data_size);
 +        av_freep(&data);
 +
 +        if ((ret = h264_ps_to_nalu(pps->data, pps->data_size, &data, &data_size)) < 0) {
 +            goto done;
 +        }
 +        ff_AMediaFormat_setBuffer(format, "csd-1", (void*)data, data_size);
 +        av_freep(&data);
 +    } else {
 +        av_log(avctx, AV_LOG_ERROR, "Could not extract PPS/SPS from extradata");
 +        ret = AVERROR_INVALIDDATA;
 +        goto done;
 +    }
 +
 +    s->ctx = av_mallocz(sizeof(*s->ctx));
 +    if (!s->ctx) {
 +        av_log(avctx, AV_LOG_ERROR, "Failed to allocate MediaCodecDecContext\n");
 +        ret = AVERROR(ENOMEM);
 +        goto done;
 +    }
 +
 +    if ((ret = ff_mediacodec_dec_init(avctx, s->ctx, CODEC_MIME, format)) < 0) {
 +        s->ctx = NULL;
 +        goto done;
 +    }
 +
 +    av_log(avctx, AV_LOG_INFO, "MediaCodec started successfully, ret = %d\n", ret);
 +
 +    s->fifo = av_fifo_alloc(sizeof(AVPacket));
 +    if (!s->fifo) {
 +        ret = AVERROR(ENOMEM);
 +        goto done;
 +    }
 +
 +    const AVBitStreamFilter *bsf = av_bsf_get_by_name("h264_mp4toannexb");
 +    if(!bsf) {
 +        ret = AVERROR_BSF_NOT_FOUND;
 +        goto done;
 +    }
 +
 +    if ((ret = av_bsf_alloc(bsf, &s->bsf))) {
 +        goto done;
 +    }
 +
 +    if (((ret = avcodec_parameters_from_context(s->bsf->par_in, avctx)) < 0) ||
 +        ((ret = av_bsf_init(s->bsf)) < 0)) {
 +          goto done;
 +    }
 +
 +    av_init_packet(&s->filtered_pkt);
 +
 +done:
 +    if (format) {
 +        ff_AMediaFormat_delete(format);
 +    }
 +
 +    if (ret < 0) {
 +        mediacodec_decode_close(avctx);
 +    }
 +
 +    ff_h264_ps_uninit(&ps);
 +
 +    return ret;
 +}
 +
 +
 +static int mediacodec_process_data(AVCodecContext *avctx, AVFrame *frame,
 +                                   int *got_frame, AVPacket *pkt)
 +{
 +    MediaCodecH264DecContext *s = avctx->priv_data;
 +
 +    return ff_mediacodec_dec_decode(avctx, s->ctx, frame, got_frame, pkt);
 +}
 +
 +static int mediacodec_decode_frame(AVCodecContext *avctx, void *data,
 +                                   int *got_frame, AVPacket *avpkt)
 +{
 +    MediaCodecH264DecContext *s = avctx->priv_data;
 +    AVFrame *frame    = data;
 +    int ret;
 +
 +    /* buffer the input packet */
 +    if (avpkt->size) {
 +        AVPacket input_pkt = { 0 };
 +
 +        if (av_fifo_space(s->fifo) < sizeof(input_pkt)) {
 +            ret = av_fifo_realloc2(s->fifo,
 +                                   av_fifo_size(s->fifo) + sizeof(input_pkt));
 +            if (ret < 0)
 +                return ret;
 +        }
 +
 +        ret = av_packet_ref(&input_pkt, avpkt);
 +        if (ret < 0)
 +            return ret;
 +        av_fifo_generic_write(s->fifo, &input_pkt, sizeof(input_pkt), NULL);
 +    }
 +
 +    /*
 +     * MediaCodec.flush() discards both input and output buffers, thus we
 +     * need to delay the call to this function until the user has released or
 +     * renderered the frames he retains.
 +     *
 +     * After we have buffered an input packet, check if the codec is in the
 +     * flushing state. If it is, we need to call ff_mediacodec_dec_flush.
 +     *
 +     * ff_mediacodec_dec_flush returns 0 if the flush cannot be performed on
 +     * the codec (because the user retains frames). The codec stays in the
 +     * flushing state.
 +     *
 +     * ff_mediacodec_dec_flush returns 1 if the flush can actually be
 +     * performed on the codec. The codec leaves the flushing state and can
 +     * process again packets.
 +     *
 +     * ff_mediacodec_dec_flush returns a negative value if an error has
 +     * occurred.
 +     *
 +     */
 +    if (ff_mediacodec_dec_is_flushing(avctx, s->ctx)) {
 +        if (!ff_mediacodec_dec_flush(avctx, s->ctx)) {
 +            return avpkt->size;
 +        }
 +    }
 +
 +    /* process buffered data */
 +    while (!*got_frame) {
 +        /* prepare the input data -- convert to Annex B if needed */
 +        if (s->filtered_pkt.size <= 0) {
 +            AVPacket input_pkt = { 0 };
 +
 +            av_packet_unref(&s->filtered_pkt);
 +
 +            /* no more data */
 +            if (av_fifo_size(s->fifo) < sizeof(AVPacket)) {
 +                return avpkt->size ? avpkt->size :
 +                    ff_mediacodec_dec_decode(avctx, s->ctx, frame, got_frame, avpkt);
 +            }
 +
 +            av_fifo_generic_read(s->fifo, &input_pkt, sizeof(input_pkt), NULL);
 +
 +            ret = av_bsf_send_packet(s->bsf, &input_pkt);
 +            if (ret < 0) {
 +                return ret;
 +            }
 +
 +            ret = av_bsf_receive_packet(s->bsf, &s->filtered_pkt);
 +            if (ret == AVERROR(EAGAIN)) {
 +                goto done;
 +            }
 +
 +            /* h264_mp4toannexb is used here and does not requires flushing */
 +            av_assert0(ret != AVERROR_EOF);
 +
 +            if (ret < 0) {
 +                return ret;
 +            }
 +        }
 +
 +        ret = mediacodec_process_data(avctx, frame, got_frame, &s->filtered_pkt);
 +        if (ret < 0)
 +            return ret;
 +
 +        s->filtered_pkt.size -= ret;
 +        s->filtered_pkt.data += ret;
 +    }
 +done:
 +    return avpkt->size;
 +}
 +
 +static void mediacodec_decode_flush(AVCodecContext *avctx)
 +{
 +    MediaCodecH264DecContext *s = avctx->priv_data;
 +
 +    while (av_fifo_size(s->fifo)) {
 +        AVPacket pkt;
 +        av_fifo_generic_read(s->fifo, &pkt, sizeof(pkt), NULL);
 +        av_packet_unref(&pkt);
 +    }
 +    av_fifo_reset(s->fifo);
 +
 +    av_packet_unref(&s->filtered_pkt);
 +
 +    ff_mediacodec_dec_flush(avctx, s->ctx);
 +}
 +
 +AVCodec ff_h264_mediacodec_decoder = {
 +    .name           = "h264_mediacodec",
 +    .long_name      = NULL_IF_CONFIG_SMALL("H.264 Android MediaCodec decoder"),
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_H264,
 +    .priv_data_size = sizeof(MediaCodecH264DecContext),
 +    .init           = mediacodec_decode_init,
 +    .decode         = mediacodec_decode_frame,
 +    .flush          = mediacodec_decode_flush,
 +    .close          = mediacodec_decode_close,
 +    .capabilities   = CODEC_CAP_DELAY,
 +    .caps_internal  = FF_CODEC_CAP_SETS_PKT_DTS,
 +};
index 0ef6c74,0000000..6e6127d
mode 100644,000000..100644
--- /dev/null
@@@ -1,47 -1,0 +1,47 @@@
- #include "libavcodec/h264.h"
 +/*
 + * Copyright (c) 2015 Zhou Xiaoyong <zhouxiaoyong@loongson.cn>
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +#ifndef AVCODEC_MIPS_H264CHROMA_MIPS_H
 +#define AVCODEC_MIPS_H264CHROMA_MIPS_H
 +
++#include "libavcodec/h264dec.h"
 +void ff_put_h264_chroma_mc8_msa(uint8_t *dst, uint8_t *src, int stride,
 +                                int height, int x, int y);
 +void ff_put_h264_chroma_mc4_msa(uint8_t *dst, uint8_t *src, int stride,
 +                                int height, int x, int y);
 +void ff_put_h264_chroma_mc2_msa(uint8_t *dst, uint8_t *src, int stride,
 +                                int height, int x, int y);
 +void ff_avg_h264_chroma_mc8_msa(uint8_t *dst, uint8_t *src, int stride,
 +                                int height, int x, int y);
 +void ff_avg_h264_chroma_mc4_msa(uint8_t *dst, uint8_t *src, int stride,
 +                                int height, int x, int y);
 +void ff_avg_h264_chroma_mc2_msa(uint8_t *dst, uint8_t *src, int stride,
 +                                int height, int x, int y);
 +
 +void ff_put_h264_chroma_mc8_mmi(uint8_t *dst, uint8_t *src, int stride,
 +        int h, int x, int y);
 +void ff_avg_h264_chroma_mc8_mmi(uint8_t *dst, uint8_t *src, int stride,
 +        int h, int x, int y);
 +void ff_put_h264_chroma_mc4_mmi(uint8_t *dst, uint8_t *src, int stride,
 +        int h, int x, int y);
 +void ff_avg_h264_chroma_mc4_mmi(uint8_t *dst, uint8_t *src, int stride,
 +        int h, int x, int y);
 +
 +#endif /* AVCODEC_MIPS_H264CHROMA_MIPS_H */
index 2fdfd11,0000000..a578457
mode 100644,000000..100644
--- /dev/null
@@@ -1,577 -1,0 +1,577 @@@
- #include "libavcodec/h264.h"
 +/*
 + * Copyright (c) 2015 Parag Salasakar (Parag.Salasakar@imgtec.com)
 +                      Zhou Xiaoyong <zhouxiaoyong@loongson.cn>
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +#ifndef AVCODEC_MIPS_H264DSP_MIPS_H
 +#define AVCODEC_MIPS_H264DSP_MIPS_H
 +
++#include "libavcodec/h264dec.h"
 +#include "constants.h"
 +
 +void ff_h264_h_lpf_luma_inter_msa(uint8_t *src, int stride,
 +                                  int alpha, int beta, int8_t *tc0);
 +void ff_h264_v_lpf_luma_inter_msa(uint8_t *src, int stride,
 +                                  int alpha, int beta, int8_t *tc0);
 +void ff_h264_h_lpf_chroma_inter_msa(uint8_t *src, int stride,
 +                                    int alpha, int beta, int8_t *tc0);
 +void ff_h264_v_lpf_chroma_inter_msa(uint8_t *src, int stride,
 +                                    int alpha, int beta, int8_t *tc0);
 +void ff_h264_h_loop_filter_chroma422_msa(uint8_t *src, int32_t stride,
 +                                         int32_t alpha, int32_t beta,
 +                                         int8_t *tc0);
 +void ff_h264_h_loop_filter_chroma422_mbaff_msa(uint8_t *src, int32_t stride,
 +                                               int32_t alpha, int32_t beta,
 +                                               int8_t *tc0);
 +void ff_h264_h_loop_filter_luma_mbaff_msa(uint8_t *src, int32_t stride,
 +                                          int32_t alpha, int32_t beta,
 +                                          int8_t *tc0);
 +
 +void ff_h264_idct_add_msa(uint8_t *dst, int16_t *src, int32_t dst_stride);
 +void ff_h264_idct4x4_addblk_dc_msa(uint8_t *dst, int16_t *src,
 +                                   int32_t dst_stride);
 +void ff_h264_deq_idct_luma_dc_msa(int16_t *dst, int16_t *src,
 +                                  int32_t de_q_val);
 +void ff_h264_idct_add16_msa(uint8_t *dst, const int32_t *blk_offset,
 +                            int16_t *block, int32_t stride,
 +                            const uint8_t nnzc[15 * 8]);
 +void ff_h264_idct_add16_intra_msa(uint8_t *dst, const int32_t *blk_offset,
 +                                  int16_t *block, int32_t dst_stride,
 +                                  const uint8_t nnzc[15 * 8]);
 +void ff_h264_idct_add8_msa(uint8_t **dst, const int32_t *blk_offset,
 +                           int16_t *block, int32_t dst_stride,
 +                           const uint8_t nnzc[15 * 8]);
 +void ff_h264_idct_add8_422_msa(uint8_t **dst, const int32_t *blk_offset,
 +                               int16_t *block, int32_t dst_stride,
 +                               const uint8_t nnzc[15 * 8]);
 +void ff_h264_idct8_addblk_msa(uint8_t *dst, int16_t *src, int32_t dst_stride);
 +void ff_h264_idct8_dc_addblk_msa(uint8_t *dst, int16_t *src,
 +                                 int32_t dst_stride);
 +void ff_h264_idct8_add4_msa(uint8_t *dst, const int *blk_offset,
 +                            int16_t *blk, int dst_stride,
 +                            const uint8_t nnzc[15 * 8]);
 +
 +void ff_h264_h_lpf_luma_intra_msa(uint8_t *src, int stride,
 +                                  int alpha, int beta);
 +void ff_h264_v_lpf_luma_intra_msa(uint8_t *src, int stride,
 +                                  int alpha, int beta);
 +void ff_h264_h_lpf_chroma_intra_msa(uint8_t *src, int stride,
 +                                    int alpha, int beta);
 +void ff_h264_v_lpf_chroma_intra_msa(uint8_t *src, int stride,
 +                                    int alpha, int beta);
 +void ff_h264_h_loop_filter_luma_mbaff_intra_msa(uint8_t *src, int stride,
 +                                                int alpha, int beta);
 +
 +void ff_biweight_h264_pixels16_8_msa(uint8_t *dst, uint8_t *src,
 +                                     int stride, int height, int log2_denom,
 +                                     int weightd, int weights, int offset);
 +void ff_biweight_h264_pixels8_8_msa(uint8_t *dst, uint8_t *src,
 +                                    int stride, int height, int log2_denom,
 +                                    int weightd, int weights, int offset);
 +void ff_biweight_h264_pixels4_8_msa(uint8_t *dst, uint8_t *src,
 +                                    int stride, int height, int log2_denom,
 +                                    int weightd, int weights, int offset);
 +void ff_weight_h264_pixels16_8_msa(uint8_t *src, int stride, int height,
 +                                   int log2_denom, int weight, int offset);
 +void ff_weight_h264_pixels8_8_msa(uint8_t *src, int stride, int height,
 +                                  int log2_denom, int weight, int offset);
 +void ff_weight_h264_pixels4_8_msa(uint8_t *src, int stride, int height,
 +                                  int log2_denom, int weight, int offset);
 +
 +void ff_put_h264_qpel16_mc00_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc10_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc20_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc30_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc01_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc11_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc21_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc31_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc02_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc12_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc22_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc32_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc03_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc13_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc23_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc33_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +
 +void ff_put_h264_qpel8_mc00_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc10_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc20_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc30_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc01_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc11_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc21_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc31_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc02_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc12_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc22_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc32_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc03_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc13_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc23_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc33_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +
 +void ff_put_h264_qpel4_mc00_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc10_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc20_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc30_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc01_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc11_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc21_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc31_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc02_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc12_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc22_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc32_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc03_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc13_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc23_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc33_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +
 +void ff_avg_h264_qpel16_mc00_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc10_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc20_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc30_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc01_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc11_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc21_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc31_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc02_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc12_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc22_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc32_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc03_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc13_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc23_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc33_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +
 +void ff_avg_h264_qpel8_mc00_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc10_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc20_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc30_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc01_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc11_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc21_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc31_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc02_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc12_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc22_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc32_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc03_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc13_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc23_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc33_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +
 +void ff_avg_h264_qpel4_mc00_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc10_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc20_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc30_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc01_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc11_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc21_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc31_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc02_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc12_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc22_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc32_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc03_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc13_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc23_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc33_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +
 +void ff_h264_intra_predict_plane_8x8_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_predict_dc_4blk_8x8_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_predict_hor_dc_8x8_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_predict_vert_dc_8x8_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_predict_mad_cow_dc_l0t_8x8_msa(uint8_t *src,
 +                                                  ptrdiff_t stride);
 +void ff_h264_intra_predict_mad_cow_dc_0lt_8x8_msa(uint8_t *src,
 +                                                  ptrdiff_t stride);
 +void ff_h264_intra_predict_mad_cow_dc_l00_8x8_msa(uint8_t *src,
 +                                                  ptrdiff_t stride);
 +void ff_h264_intra_predict_mad_cow_dc_0l0_8x8_msa(uint8_t *src,
 +                                                  ptrdiff_t stride);
 +void ff_h264_intra_predict_plane_16x16_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_pred_vert_8x8_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_pred_horiz_8x8_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_pred_dc_16x16_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_pred_vert_16x16_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_pred_horiz_16x16_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_pred_dc_left_16x16_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_pred_dc_top_16x16_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_pred_dc_128_8x8_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_pred_dc_128_16x16_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_vp8_pred8x8_127_dc_8_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_vp8_pred8x8_129_dc_8_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_vp8_pred16x16_127_dc_8_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_vp8_pred16x16_129_dc_8_msa(uint8_t *src, ptrdiff_t stride);
 +
 +void ff_h264_add_pixels4_8_mmi(uint8_t *_dst, int16_t *_src, int stride);
 +void ff_h264_idct_add_8_mmi(uint8_t *dst, int16_t *block, int stride);
 +void ff_h264_idct8_add_8_mmi(uint8_t *dst, int16_t *block, int stride);
 +void ff_h264_idct_dc_add_8_mmi(uint8_t *dst, int16_t *block, int stride);
 +void ff_h264_idct8_dc_add_8_mmi(uint8_t *dst, int16_t *block, int stride);
 +void ff_h264_idct_add16_8_mmi(uint8_t *dst, const int *block_offset,
 +        int16_t *block, int stride, const uint8_t nnzc[15*8]);
 +void ff_h264_idct_add16intra_8_mmi(uint8_t *dst, const int *block_offset,
 +        int16_t *block, int stride, const uint8_t nnzc[15*8]);
 +void ff_h264_idct8_add4_8_mmi(uint8_t *dst, const int *block_offset,
 +        int16_t *block, int stride, const uint8_t nnzc[15*8]);
 +void ff_h264_idct_add8_8_mmi(uint8_t **dest, const int *block_offset,
 +        int16_t *block, int stride, const uint8_t nnzc[15*8]);
 +void ff_h264_idct_add8_422_8_mmi(uint8_t **dest, const int *block_offset,
 +        int16_t *block, int stride, const uint8_t nnzc[15*8]);
 +void ff_h264_luma_dc_dequant_idct_8_mmi(int16_t *output, int16_t *input,
 +        int qmul);
 +void ff_h264_chroma_dc_dequant_idct_8_mmi(int16_t *block, int qmul);
 +void ff_h264_chroma422_dc_dequant_idct_8_mmi(int16_t *block, int qmul);
 +
 +void ff_h264_weight_pixels16_8_mmi(uint8_t *block, int stride, int height,
 +        int log2_denom, int weight, int offset);
 +void ff_h264_biweight_pixels16_8_mmi(uint8_t *dst, uint8_t *src,
 +        int stride, int height, int log2_denom, int weightd, int weights,
 +        int offset);
 +void ff_h264_weight_pixels8_8_mmi(uint8_t *block, int stride, int height,
 +        int log2_denom, int weight, int offset);
 +void ff_h264_biweight_pixels8_8_mmi(uint8_t *dst, uint8_t *src,
 +        int stride, int height, int log2_denom, int weightd, int weights,
 +        int offset);
 +void ff_h264_weight_pixels4_8_mmi(uint8_t *block, int stride, int height,
 +        int log2_denom, int weight, int offset);
 +void ff_h264_biweight_pixels4_8_mmi(uint8_t *dst, uint8_t *src,
 +        int stride, int height, int log2_denom, int weightd, int weights,
 +        int offset);
 +
 +void ff_deblock_v_chroma_8_mmi(uint8_t *pix, int stride, int alpha, int beta,
 +        int8_t *tc0);
 +void ff_deblock_v_chroma_intra_8_mmi(uint8_t *pix, int stride, int alpha,
 +        int beta);
 +void ff_deblock_h_chroma_8_mmi(uint8_t *pix, int stride, int alpha, int beta,
 +        int8_t *tc0);
 +void ff_deblock_h_chroma_intra_8_mmi(uint8_t *pix, int stride, int alpha,
 +        int beta);
 +void ff_deblock_v_luma_8_mmi(uint8_t *pix, int stride, int alpha, int beta,
 +        int8_t *tc0);
 +void ff_deblock_v_luma_intra_8_mmi(uint8_t *pix, int stride, int alpha,
 +        int beta);
 +void ff_deblock_h_luma_8_mmi(uint8_t *pix, int stride, int alpha, int beta,
 +        int8_t *tc0);
 +void ff_deblock_h_luma_intra_8_mmi(uint8_t *pix, int stride, int alpha,
 +        int beta);
 +void ff_deblock_v8_luma_8_mmi(uint8_t *pix, int stride, int alpha, int beta,
 +        int8_t *tc0);
 +void ff_deblock_v8_luma_intra_8_mmi(uint8_t *pix, int stride, int alpha,
 +        int beta);
 +
 +void ff_put_h264_qpel16_mc00_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc10_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc20_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc30_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc01_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc11_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc21_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc31_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc02_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc12_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc22_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc32_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc03_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc13_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc23_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc33_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +
 +void ff_put_h264_qpel8_mc00_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc10_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc20_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc30_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc01_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc11_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc21_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc31_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc02_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc12_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc22_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc32_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc03_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc13_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc23_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc33_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +
 +void ff_put_h264_qpel4_mc00_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc10_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc20_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc30_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc01_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc11_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc21_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc31_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc02_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc12_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc22_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc32_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc03_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc13_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc23_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc33_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +
 +void ff_avg_h264_qpel16_mc00_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc10_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc20_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc30_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc01_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc11_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc21_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc31_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc02_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc12_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc22_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc32_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc03_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc13_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc23_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc33_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +
 +void ff_avg_h264_qpel8_mc00_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc10_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc20_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc30_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc01_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc11_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc21_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc31_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc02_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc12_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc22_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc32_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc03_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc13_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc23_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc33_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +
 +void ff_avg_h264_qpel4_mc00_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc10_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc20_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc30_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc01_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc11_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc21_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc31_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc02_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc12_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc22_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc32_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc03_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc13_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc23_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc33_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +
 +#endif  // #ifndef AVCODEC_MIPS_H264DSP_MIPS_H
Simple merge
  #include "libavutil/ppc/types_altivec.h"
  #include "libavutil/ppc/util_altivec.h"
  
- #include "libavcodec/h264.h"
+ #include "libavcodec/h264dec.h"
  #include "libavcodec/h264dsp.h"
  
 -#if HAVE_ALTIVEC && HAVE_BIGENDIAN
 +#if HAVE_ALTIVEC
  
  /****************************************************************************
   * IDCT transform:
Simple merge
Simple merge
Simple merge
Simple merge
   * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
   */
  
 +#include <CoreFoundation/CFDictionary.h>
  #include <CoreFoundation/CFNumber.h>
  #include <CoreFoundation/CFData.h>
 -#include <CoreFoundation/CFString.h>
  
 +#include "vda.h"
  #include "libavutil/avutil.h"
- #include "h264.h"
+ #include "h264dec.h"
 -#include "internal.h"
 -#include "vda.h"
 -#include "vda_internal.h"
 -
 -typedef struct VDAContext {
 -    // The current bitstream buffer.
 -    uint8_t             *bitstream;
 -
 -    // The current size of the bitstream.
 -    int                  bitstream_size;
 -
 -    // The reference size used for fast reallocation.
 -    int                  allocated_size;
  
 -    CVImageBufferRef frame;
 -} VDAContext;
 +struct vda_buffer {
 +    CVPixelBufferRef cv_buffer;
 +};
 +#include "internal.h"
 +#include "vda_vt_internal.h"
  
 -/* Decoder callback that adds the VDA frame to the queue in display order. */
 +/* Decoder callback that adds the vda frame to the queue in display order. */
  static void vda_decoder_callback(void *vda_hw_ctx,
                                   CFDictionaryRef user_info,
                                   OSStatus status,
index a196eb7,0000000..92839e2
mode 100644,000000..100644
--- /dev/null
@@@ -1,263 -1,0 +1,263 @@@
- #include "h264.h"
 +/*
 + * Copyright (c) 2012, Xidorn Quan
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +/**
 + * @file
 + * H.264 decoder via VDA
 + * @author Xidorn Quan <quanxunzhen@gmail.com>
 + */
 +
 +#include <string.h>
 +#include <CoreFoundation/CoreFoundation.h>
 +
 +#include "vda.h"
++#include "h264dec.h"
 +#include "avcodec.h"
 +
 +#ifndef kCFCoreFoundationVersionNumber10_7
 +#define kCFCoreFoundationVersionNumber10_7      635.00
 +#endif
 +
 +extern AVCodec ff_h264_decoder, ff_h264_vda_decoder;
 +
 +static const enum AVPixelFormat vda_pixfmts_prior_10_7[] = {
 +    AV_PIX_FMT_UYVY422,
 +    AV_PIX_FMT_YUV420P,
 +    AV_PIX_FMT_NONE
 +};
 +
 +static const enum AVPixelFormat vda_pixfmts[] = {
 +    AV_PIX_FMT_UYVY422,
 +    AV_PIX_FMT_YUYV422,
 +    AV_PIX_FMT_NV12,
 +    AV_PIX_FMT_YUV420P,
 +    AV_PIX_FMT_NONE
 +};
 +
 +typedef struct {
 +    H264Context h264ctx;
 +    int h264_initialized;
 +    struct vda_context vda_ctx;
 +    enum AVPixelFormat pix_fmt;
 +
 +    /* for backing-up fields set by user.
 +     * we have to gain full control of such fields here */
 +    void *hwaccel_context;
 +    enum AVPixelFormat (*get_format)(struct AVCodecContext *s, const enum AVPixelFormat * fmt);
 +    int (*get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags);
 +} VDADecoderContext;
 +
 +static enum AVPixelFormat get_format(struct AVCodecContext *avctx,
 +        const enum AVPixelFormat *fmt)
 +{
 +    return AV_PIX_FMT_VDA_VLD;
 +}
 +
 +typedef struct {
 +    CVPixelBufferRef cv_buffer;
 +} VDABufferContext;
 +
 +static void release_buffer(void *opaque, uint8_t *data)
 +{
 +    VDABufferContext *context = opaque;
 +    CVPixelBufferUnlockBaseAddress(context->cv_buffer, 0);
 +    CVPixelBufferRelease(context->cv_buffer);
 +    av_free(context);
 +}
 +
 +static int get_buffer2(AVCodecContext *avctx, AVFrame *pic, int flag)
 +{
 +    VDABufferContext *context = av_mallocz(sizeof(VDABufferContext));
 +    AVBufferRef *buffer = av_buffer_create(NULL, 0, release_buffer, context, 0);
 +    if (!context || !buffer) {
 +        av_free(context);
 +        return AVERROR(ENOMEM);
 +    }
 +
 +    pic->buf[0] = buffer;
 +    pic->data[0] = (void *)1;
 +    return 0;
 +}
 +
 +static inline void set_context(AVCodecContext *avctx)
 +{
 +    VDADecoderContext *ctx = avctx->priv_data;
 +    ctx->hwaccel_context = avctx->hwaccel_context;
 +    avctx->hwaccel_context = &ctx->vda_ctx;
 +    ctx->get_format = avctx->get_format;
 +    avctx->get_format = get_format;
 +    ctx->get_buffer2 = avctx->get_buffer2;
 +    avctx->get_buffer2 = get_buffer2;
 +}
 +
 +static inline void restore_context(AVCodecContext *avctx)
 +{
 +    VDADecoderContext *ctx = avctx->priv_data;
 +    avctx->hwaccel_context = ctx->hwaccel_context;
 +    avctx->get_format = ctx->get_format;
 +    avctx->get_buffer2 = ctx->get_buffer2;
 +}
 +
 +static int vdadec_decode(AVCodecContext *avctx,
 +        void *data, int *got_frame, AVPacket *avpkt)
 +{
 +    VDADecoderContext *ctx = avctx->priv_data;
 +    AVFrame *pic = data;
 +    int ret;
 +
 +    set_context(avctx);
 +    ret = ff_h264_decoder.decode(avctx, data, got_frame, avpkt);
 +    restore_context(avctx);
 +    if (*got_frame) {
 +        AVBufferRef *buffer = pic->buf[0];
 +        VDABufferContext *context = av_buffer_get_opaque(buffer);
 +        CVPixelBufferRef cv_buffer = (CVPixelBufferRef)pic->data[3];
 +
 +        CVPixelBufferRetain(cv_buffer);
 +        CVPixelBufferLockBaseAddress(cv_buffer, 0);
 +        context->cv_buffer = cv_buffer;
 +        pic->format = ctx->pix_fmt;
 +        if (CVPixelBufferIsPlanar(cv_buffer)) {
 +            int i, count = CVPixelBufferGetPlaneCount(cv_buffer);
 +            av_assert0(count < 4);
 +            for (i = 0; i < count; i++) {
 +                pic->data[i] = CVPixelBufferGetBaseAddressOfPlane(cv_buffer, i);
 +                pic->linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(cv_buffer, i);
 +            }
 +        } else {
 +            pic->data[0] = CVPixelBufferGetBaseAddress(cv_buffer);
 +            pic->linesize[0] = CVPixelBufferGetBytesPerRow(cv_buffer);
 +        }
 +    }
 +    avctx->pix_fmt = ctx->pix_fmt;
 +
 +    return ret;
 +}
 +
 +static av_cold int vdadec_close(AVCodecContext *avctx)
 +{
 +    VDADecoderContext *ctx = avctx->priv_data;
 +    /* release buffers and decoder */
 +    ff_vda_destroy_decoder(&ctx->vda_ctx);
 +    /* close H.264 decoder */
 +    if (ctx->h264_initialized) {
 +        set_context(avctx);
 +        ff_h264_decoder.close(avctx);
 +        restore_context(avctx);
 +    }
 +    return 0;
 +}
 +
 +static av_cold int vdadec_init(AVCodecContext *avctx)
 +{
 +    VDADecoderContext *ctx = avctx->priv_data;
 +    struct vda_context *vda_ctx = &ctx->vda_ctx;
 +    OSStatus status;
 +    int ret, i;
 +
 +    ctx->h264_initialized = 0;
 +
 +    /* init pix_fmts of codec */
 +    if (!ff_h264_vda_decoder.pix_fmts) {
 +        if (kCFCoreFoundationVersionNumber < kCFCoreFoundationVersionNumber10_7)
 +            ff_h264_vda_decoder.pix_fmts = vda_pixfmts_prior_10_7;
 +        else
 +            ff_h264_vda_decoder.pix_fmts = vda_pixfmts;
 +    }
 +
 +    /* init vda */
 +    memset(vda_ctx, 0, sizeof(struct vda_context));
 +    vda_ctx->width = avctx->width;
 +    vda_ctx->height = avctx->height;
 +    vda_ctx->format = 'avc1';
 +    vda_ctx->use_sync_decoding = 1;
 +    vda_ctx->use_ref_buffer = 1;
 +    ctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
 +    switch (ctx->pix_fmt) {
 +    case AV_PIX_FMT_UYVY422:
 +        vda_ctx->cv_pix_fmt_type = '2vuy';
 +        break;
 +    case AV_PIX_FMT_YUYV422:
 +        vda_ctx->cv_pix_fmt_type = 'yuvs';
 +        break;
 +    case AV_PIX_FMT_NV12:
 +        vda_ctx->cv_pix_fmt_type = '420v';
 +        break;
 +    case AV_PIX_FMT_YUV420P:
 +        vda_ctx->cv_pix_fmt_type = 'y420';
 +        break;
 +    default:
 +        av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format: %d\n", avctx->pix_fmt);
 +        goto failed;
 +    }
 +    status = ff_vda_create_decoder(vda_ctx,
 +                                   avctx->extradata, avctx->extradata_size);
 +    if (status != kVDADecoderNoErr) {
 +        av_log(avctx, AV_LOG_ERROR,
 +                "Failed to init VDA decoder: %d.\n", status);
 +        goto failed;
 +    }
 +
 +    /* init H.264 decoder */
 +    set_context(avctx);
 +    ret = ff_h264_decoder.init(avctx);
 +    restore_context(avctx);
 +    if (ret < 0) {
 +        av_log(avctx, AV_LOG_ERROR, "Failed to open H.264 decoder.\n");
 +        goto failed;
 +    }
 +    ctx->h264_initialized = 1;
 +
 +    for (i = 0; i < MAX_SPS_COUNT; i++) {
 +        const SPS *sps = (const SPS*)ctx->h264ctx.ps.sps_list[i]->data;
 +        if (sps && (sps->bit_depth_luma != 8 ||
 +                sps->chroma_format_idc == 2 ||
 +                sps->chroma_format_idc == 3)) {
 +            av_log(avctx, AV_LOG_ERROR, "Format is not supported.\n");
 +            goto failed;
 +        }
 +    }
 +
 +    return 0;
 +
 +failed:
 +    vdadec_close(avctx);
 +    return -1;
 +}
 +
 +static void vdadec_flush(AVCodecContext *avctx)
 +{
 +    set_context(avctx);
 +    ff_h264_decoder.flush(avctx);
 +    restore_context(avctx);
 +}
 +
 +AVCodec ff_h264_vda_decoder = {
 +    .name           = "h264_vda",
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_H264,
 +    .priv_data_size = sizeof(VDADecoderContext),
 +    .init           = vdadec_init,
 +    .close          = vdadec_close,
 +    .decode         = vdadec_decode,
 +    .capabilities   = AV_CODEC_CAP_DELAY,
 +    .flush          = vdadec_flush,
 +    .long_name      = NULL_IF_CONFIG_SMALL("H.264 (VDA acceleration)"),
 +};
  
  #include "avcodec.h"
  #include "internal.h"
- #include "h264.h"
+ #include "h264dec.h"
  #include "vc1.h"
  #include "vdpau.h"
 +#include "vdpau_compat.h"
  #include "vdpau_internal.h"
  
 +// XXX: at the time of adding this ifdefery, av_assert* wasn't use outside.
 +// When dropping it, make sure other av_assert* were not added since then.
 +#if FF_API_BUFS_VDPAU
 +#include "libavutil/avassert.h"
 +#endif
 +
 +#if FF_API_VDPAU
 +#undef NDEBUG
 +#include <assert.h>
 +#endif
 +
  /**
   * @addtogroup VDPAU_Decoding
   *
index 6b4b086,0000000..768acce
mode 100644,000000..100644
--- /dev/null
@@@ -1,48 -1,0 +1,48 @@@
- #include "h264.h"
 +/*
 + * Video Decode and Presentation API for UNIX (VDPAU) is used for
 + * HW decode acceleration for MPEG-1/2, H.264 and VC-1.
 + *
 + * Copyright (C) 2008 NVIDIA
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +#ifndef AVCODEC_VDPAU_COMPAT_H
 +#define AVCODEC_VDPAU_COMPAT_H
 +
 +#include <stdint.h>
 +
++#include "h264dec.h"
 +#include "mpeg4video.h"
 +
 +void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf,
 +                             int buf_size);
 +
 +void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf,
 +                                    int buf_size, int slice_count);
 +
 +void ff_vdpau_h264_picture_start(H264Context *h);
 +void ff_vdpau_h264_set_reference_frames(H264Context *h);
 +void ff_vdpau_h264_picture_complete(H264Context *h);
 +
 +void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf,
 +                                 int buf_size);
 +
 +void ff_vdpau_mpeg4_decode_picture(Mpeg4DecContext *s, const uint8_t *buf,
 +                                   int buf_size);
 +
 +#endif /* AVCODEC_VDPAU_COMPAT_H */
Simple merge
index c2c621d,0000000..1288aa5
mode 100644,000000..100644
--- /dev/null
@@@ -1,701 -1,0 +1,701 @@@
- #include "h264.h"
 +/*
 + * Videotoolbox hardware acceleration
 + *
 + * copyright (c) 2012 Sebastien Zwickert
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +#include "config.h"
 +#if CONFIG_VIDEOTOOLBOX
 +#  include "videotoolbox.h"
 +#else
 +#  include "vda.h"
 +#endif
 +#include "vda_vt_internal.h"
 +#include "libavutil/avutil.h"
 +#include "bytestream.h"
++#include "h264dec.h"
 +#include "mpegvideo.h"
 +
 +#ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
 +#  define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
 +#endif
 +
 +#define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING  12
 +
 +static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
 +{
 +    CVPixelBufferRef cv_buffer = (CVImageBufferRef)data;
 +    CVPixelBufferRelease(cv_buffer);
 +}
 +
 +static int videotoolbox_buffer_copy(VTContext *vtctx,
 +                                    const uint8_t *buffer,
 +                                    uint32_t size)
 +{
 +    void *tmp;
 +
 +    tmp = av_fast_realloc(vtctx->bitstream,
 +                         &vtctx->allocated_size,
 +                         size);
 +
 +    if (!tmp)
 +        return AVERROR(ENOMEM);
 +
 +    vtctx->bitstream = tmp;
 +    memcpy(vtctx->bitstream, buffer, size);
 +    vtctx->bitstream_size = size;
 +
 +    return 0;
 +}
 +
 +int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
 +{
 +    frame->width  = avctx->width;
 +    frame->height = avctx->height;
 +    frame->format = avctx->pix_fmt;
 +    frame->buf[0] = av_buffer_alloc(1);
 +
 +    if (!frame->buf[0])
 +        return AVERROR(ENOMEM);
 +
 +    return 0;
 +}
 +
 +#define AV_W8(p, v) *(p) = (v)
 +
 +CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
 +{
 +    H264Context *h     = avctx->priv_data;
 +    CFDataRef data = NULL;
 +    uint8_t *p;
 +    int vt_extradata_size = 6 + 2 + h->ps.sps->data_size + 3 + h->ps.pps->data_size;
 +    uint8_t *vt_extradata = av_malloc(vt_extradata_size);
 +    if (!vt_extradata)
 +        return NULL;
 +
 +    p = vt_extradata;
 +
 +    AV_W8(p + 0, 1); /* version */
 +    AV_W8(p + 1, h->ps.sps->data[1]); /* profile */
 +    AV_W8(p + 2, h->ps.sps->data[2]); /* profile compat */
 +    AV_W8(p + 3, h->ps.sps->data[3]); /* level */
 +    AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
 +    AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
 +    AV_WB16(p + 6, h->ps.sps->data_size);
 +    memcpy(p + 8, h->ps.sps->data, h->ps.sps->data_size);
 +    p += 8 + h->ps.sps->data_size;
 +    AV_W8(p + 0, 1); /* number of pps */
 +    AV_WB16(p + 1, h->ps.pps->data_size);
 +    memcpy(p + 3, h->ps.pps->data, h->ps.pps->data_size);
 +
 +    p += 3 + h->ps.pps->data_size;
 +    av_assert0(p - vt_extradata == vt_extradata_size);
 +
 +    data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
 +    av_free(vt_extradata);
 +    return data;
 +}
 +
 +int ff_videotoolbox_buffer_create(VTContext *vtctx, AVFrame *frame)
 +{
 +    av_buffer_unref(&frame->buf[0]);
 +
 +    frame->buf[0] = av_buffer_create((uint8_t*)vtctx->frame,
 +                                     sizeof(vtctx->frame),
 +                                     videotoolbox_buffer_release,
 +                                     NULL,
 +                                     AV_BUFFER_FLAG_READONLY);
 +    if (!frame->buf[0]) {
 +        return AVERROR(ENOMEM);
 +    }
 +
 +    frame->data[3] = (uint8_t*)vtctx->frame;
 +    vtctx->frame = NULL;
 +
 +    return 0;
 +}
 +
 +int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx,
 +                                     const uint8_t *buffer,
 +                                     uint32_t size)
 +{
 +    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
 +    H264Context *h  = avctx->priv_data;
 +
 +    vtctx->bitstream_size = 0;
 +
 +    if (h->is_avc == 1) {
 +        return videotoolbox_buffer_copy(vtctx, buffer, size);
 +    }
 +
 +    return 0;
 +}
 +
 +int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx,
 +                                      const uint8_t *buffer,
 +                                      uint32_t size)
 +{
 +    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
 +    H264Context *h  = avctx->priv_data;
 +    void *tmp;
 +
 +    if (h->is_avc == 1)
 +        return 0;
 +
 +    tmp = av_fast_realloc(vtctx->bitstream,
 +                          &vtctx->allocated_size,
 +                          vtctx->bitstream_size+size+4);
 +    if (!tmp)
 +        return AVERROR(ENOMEM);
 +
 +    vtctx->bitstream = tmp;
 +
 +    AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size);
 +    memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size);
 +
 +    vtctx->bitstream_size += size + 4;
 +
 +    return 0;
 +}
 +
 +int ff_videotoolbox_uninit(AVCodecContext *avctx)
 +{
 +    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
 +    if (vtctx) {
 +        av_freep(&vtctx->bitstream);
 +        if (vtctx->frame)
 +            CVPixelBufferRelease(vtctx->frame);
 +    }
 +
 +    return 0;
 +}
 +
 +#if CONFIG_VIDEOTOOLBOX
 +static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
 +{
 +    int i;
 +    uint8_t b;
 +
 +    for (i = 3; i >= 0; i--) {
 +        b = (length >> (i * 7)) & 0x7F;
 +        if (i != 0)
 +            b |= 0x80;
 +
 +        bytestream2_put_byteu(pb, b);
 +    }
 +}
 +
 +static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx)
 +{
 +    CFDataRef data;
 +    uint8_t *rw_extradata;
 +    PutByteContext pb;
 +    int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3;
 +    // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
 +    int config_size = 13 + 5 + avctx->extradata_size;
 +    int s;
 +
 +    if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING)))
 +        return NULL;
 +
 +    bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING);
 +    bytestream2_put_byteu(&pb, 0);        // version
 +    bytestream2_put_ne24(&pb, 0);         // flags
 +
 +    // elementary stream descriptor
 +    bytestream2_put_byteu(&pb, 0x03);     // ES_DescrTag
 +    videotoolbox_write_mp4_descr_length(&pb, full_size);
 +    bytestream2_put_ne16(&pb, 0);         // esid
 +    bytestream2_put_byteu(&pb, 0);        // stream priority (0-32)
 +
 +    // decoder configuration descriptor
 +    bytestream2_put_byteu(&pb, 0x04);     // DecoderConfigDescrTag
 +    videotoolbox_write_mp4_descr_length(&pb, config_size);
 +    bytestream2_put_byteu(&pb, 32);       // object type indication. 32 = AV_CODEC_ID_MPEG4
 +    bytestream2_put_byteu(&pb, 0x11);     // stream type
 +    bytestream2_put_ne24(&pb, 0);         // buffer size
 +    bytestream2_put_ne32(&pb, 0);         // max bitrate
 +    bytestream2_put_ne32(&pb, 0);         // avg bitrate
 +
 +    // decoder specific descriptor
 +    bytestream2_put_byteu(&pb, 0x05);     ///< DecSpecificInfoTag
 +    videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size);
 +
 +    bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size);
 +
 +    // SLConfigDescriptor
 +    bytestream2_put_byteu(&pb, 0x06);     // SLConfigDescrTag
 +    bytestream2_put_byteu(&pb, 0x01);     // length
 +    bytestream2_put_byteu(&pb, 0x02);     //
 +
 +    s = bytestream2_size_p(&pb);
 +
 +    data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s);
 +
 +    av_freep(&rw_extradata);
 +    return data;
 +}
 +
 +static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
 +                                                           void *buffer,
 +                                                           int size)
 +{
 +    OSStatus status;
 +    CMBlockBufferRef  block_buf;
 +    CMSampleBufferRef sample_buf;
 +
 +    block_buf  = NULL;
 +    sample_buf = NULL;
 +
 +    status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
 +                                                buffer,             // memoryBlock
 +                                                size,               // blockLength
 +                                                kCFAllocatorNull,   // blockAllocator
 +                                                NULL,               // customBlockSource
 +                                                0,                  // offsetToData
 +                                                size,               // dataLength
 +                                                0,                  // flags
 +                                                &block_buf);
 +
 +    if (!status) {
 +        status = CMSampleBufferCreate(kCFAllocatorDefault,  // allocator
 +                                      block_buf,            // dataBuffer
 +                                      TRUE,                 // dataReady
 +                                      0,                    // makeDataReadyCallback
 +                                      0,                    // makeDataReadyRefcon
 +                                      fmt_desc,             // formatDescription
 +                                      1,                    // numSamples
 +                                      0,                    // numSampleTimingEntries
 +                                      NULL,                 // sampleTimingArray
 +                                      0,                    // numSampleSizeEntries
 +                                      NULL,                 // sampleSizeArray
 +                                      &sample_buf);
 +    }
 +
 +    if (block_buf)
 +        CFRelease(block_buf);
 +
 +    return sample_buf;
 +}
 +
 +static void videotoolbox_decoder_callback(void *opaque,
 +                                          void *sourceFrameRefCon,
 +                                          OSStatus status,
 +                                          VTDecodeInfoFlags flags,
 +                                          CVImageBufferRef image_buffer,
 +                                          CMTime pts,
 +                                          CMTime duration)
 +{
 +    AVCodecContext *avctx = opaque;
 +    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
 +
 +    if (vtctx->frame) {
 +        CVPixelBufferRelease(vtctx->frame);
 +        vtctx->frame = NULL;
 +    }
 +
 +    if (!image_buffer) {
 +        av_log(NULL, AV_LOG_DEBUG, "vt decoder cb: output image buffer is null\n");
 +        return;
 +    }
 +
 +    vtctx->frame = CVPixelBufferRetain(image_buffer);
 +}
 +
 +static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
 +{
 +    OSStatus status;
 +    CMSampleBufferRef sample_buf;
 +    AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
 +    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
 +
 +    sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
 +                                                   vtctx->bitstream,
 +                                                   vtctx->bitstream_size);
 +
 +    if (!sample_buf)
 +        return -1;
 +
 +    status = VTDecompressionSessionDecodeFrame(videotoolbox->session,
 +                                               sample_buf,
 +                                               0,       // decodeFlags
 +                                               NULL,    // sourceFrameRefCon
 +                                               0);      // infoFlagsOut
 +    if (status == noErr)
 +        status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session);
 +
 +    CFRelease(sample_buf);
 +
 +    return status;
 +}
 +
 +static int videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
 +{
 +    int status;
 +    AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
 +    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
 +
 +    av_buffer_unref(&frame->buf[0]);
 +
 +    if (!videotoolbox->session || !vtctx->bitstream)
 +        return AVERROR_INVALIDDATA;
 +
 +    status = videotoolbox_session_decode_frame(avctx);
 +
 +    if (status) {
 +        av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%d)\n", status);
 +        return AVERROR_UNKNOWN;
 +    }
 +
 +    if (!vtctx->frame)
 +        return AVERROR_UNKNOWN;
 +
 +    return ff_videotoolbox_buffer_create(vtctx, frame);
 +}
 +
 +static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
 +{
 +    H264Context *h = avctx->priv_data;
 +    AVFrame *frame = h->cur_pic_ptr->f;
 +
 +    return videotoolbox_common_end_frame(avctx, frame);
 +}
 +
 +static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
 +                                         const uint8_t *buffer,
 +                                         uint32_t size)
 +{
 +    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
 +
 +    return videotoolbox_buffer_copy(vtctx, buffer, size);
 +}
 +
 +static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
 +                                          const uint8_t *buffer,
 +                                          uint32_t size)
 +{
 +    return 0;
 +}
 +
 +static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
 +{
 +    MpegEncContext *s = avctx->priv_data;
 +    AVFrame *frame = s->current_picture_ptr->f;
 +
 +    return videotoolbox_common_end_frame(avctx, frame);
 +}
 +
 +static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
 +                                                          AVCodecContext *avctx)
 +{
 +    CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
 +                                                                   0,
 +                                                                   &kCFTypeDictionaryKeyCallBacks,
 +                                                                   &kCFTypeDictionaryValueCallBacks);
 +
 +    CFDictionarySetValue(config_info,
 +                         kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder,
 +                         kCFBooleanTrue);
 +
 +    if (avctx->extradata_size) {
 +        CFMutableDictionaryRef avc_info;
 +        CFDataRef data = NULL;
 +
 +        avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
 +                                             1,
 +                                             &kCFTypeDictionaryKeyCallBacks,
 +                                             &kCFTypeDictionaryValueCallBacks);
 +
 +        switch (codec_type) {
 +        case kCMVideoCodecType_MPEG4Video :
 +            data = videotoolbox_esds_extradata_create(avctx);
 +            if (data)
 +                CFDictionarySetValue(avc_info, CFSTR("esds"), data);
 +            break;
 +        case kCMVideoCodecType_H264 :
 +            data = ff_videotoolbox_avcc_extradata_create(avctx);
 +            if (data)
 +                CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
 +            break;
 +        default:
 +            break;
 +        }
 +
 +        CFDictionarySetValue(config_info,
 +                kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
 +                avc_info);
 +
 +        if (data)
 +            CFRelease(data);
 +
 +        CFRelease(avc_info);
 +    }
 +    return config_info;
 +}
 +
 +static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
 +                                                             int height,
 +                                                             OSType pix_fmt)
 +{
 +    CFMutableDictionaryRef buffer_attributes;
 +    CFMutableDictionaryRef io_surface_properties;
 +    CFNumberRef cv_pix_fmt;
 +    CFNumberRef w;
 +    CFNumberRef h;
 +
 +    w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width);
 +    h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height);
 +    cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt);
 +
 +    buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
 +                                                  4,
 +                                                  &kCFTypeDictionaryKeyCallBacks,
 +                                                  &kCFTypeDictionaryValueCallBacks);
 +    io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
 +                                                      0,
 +                                                      &kCFTypeDictionaryKeyCallBacks,
 +                                                      &kCFTypeDictionaryValueCallBacks);
 +
 +    CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
 +    CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
 +    CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
 +    CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
 +
 +    CFRelease(io_surface_properties);
 +    CFRelease(cv_pix_fmt);
 +    CFRelease(w);
 +    CFRelease(h);
 +
 +    return buffer_attributes;
 +}
 +
 +static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
 +                                                                   CFDictionaryRef decoder_spec,
 +                                                                   int width,
 +                                                                   int height)
 +{
 +    CMFormatDescriptionRef cm_fmt_desc;
 +    OSStatus status;
 +
 +    status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
 +                                            codec_type,
 +                                            width,
 +                                            height,
 +                                            decoder_spec, // Dictionary of extension
 +                                            &cm_fmt_desc);
 +
 +    if (status)
 +        return NULL;
 +
 +    return cm_fmt_desc;
 +}
 +
 +static int videotoolbox_default_init(AVCodecContext *avctx)
 +{
 +    AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
 +    OSStatus status;
 +    VTDecompressionOutputCallbackRecord decoder_cb;
 +    CFDictionaryRef decoder_spec;
 +    CFDictionaryRef buf_attr;
 +
 +    if (!videotoolbox) {
 +        av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n");
 +        return -1;
 +    }
 +
 +    switch( avctx->codec_id ) {
 +    case AV_CODEC_ID_H263 :
 +        videotoolbox->cm_codec_type = kCMVideoCodecType_H263;
 +        break;
 +    case AV_CODEC_ID_H264 :
 +        videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
 +        break;
 +    case AV_CODEC_ID_MPEG1VIDEO :
 +        videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
 +        break;
 +    case AV_CODEC_ID_MPEG2VIDEO :
 +        videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video;
 +        break;
 +    case AV_CODEC_ID_MPEG4 :
 +        videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video;
 +        break;
 +    default :
 +        break;
 +    }
 +
 +    decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
 +
 +    videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
 +                                                                decoder_spec,
 +                                                                avctx->width,
 +                                                                avctx->height);
 +    if (!videotoolbox->cm_fmt_desc) {
 +        if (decoder_spec)
 +            CFRelease(decoder_spec);
 +
 +        av_log(avctx, AV_LOG_ERROR, "format description creation failed\n");
 +        return -1;
 +    }
 +
 +    buf_attr = videotoolbox_buffer_attributes_create(avctx->width,
 +                                                     avctx->height,
 +                                                     videotoolbox->cv_pix_fmt_type);
 +
 +    decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
 +    decoder_cb.decompressionOutputRefCon   = avctx;
 +
 +    status = VTDecompressionSessionCreate(NULL,                      // allocator
 +                                          videotoolbox->cm_fmt_desc, // videoFormatDescription
 +                                          decoder_spec,              // videoDecoderSpecification
 +                                          buf_attr,                  // destinationImageBufferAttributes
 +                                          &decoder_cb,               // outputCallback
 +                                          &videotoolbox->session);   // decompressionSessionOut
 +
 +    if (decoder_spec)
 +        CFRelease(decoder_spec);
 +    if (buf_attr)
 +        CFRelease(buf_attr);
 +
 +    switch (status) {
 +    case kVTVideoDecoderNotAvailableNowErr:
 +    case kVTVideoDecoderUnsupportedDataFormatErr:
 +        return AVERROR(ENOSYS);
 +    case kVTVideoDecoderMalfunctionErr:
 +        return AVERROR(EINVAL);
 +    case kVTVideoDecoderBadDataErr :
 +        return AVERROR_INVALIDDATA;
 +    case 0:
 +        return 0;
 +    default:
 +        return AVERROR_UNKNOWN;
 +    }
 +}
 +
 +static void videotoolbox_default_free(AVCodecContext *avctx)
 +{
 +    AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
 +
 +    if (videotoolbox) {
 +        if (videotoolbox->cm_fmt_desc)
 +            CFRelease(videotoolbox->cm_fmt_desc);
 +
 +        if (videotoolbox->session) {
 +            VTDecompressionSessionInvalidate(videotoolbox->session);
 +            CFRelease(videotoolbox->session);
 +        }
 +    }
 +}
 +
 +AVHWAccel ff_h263_videotoolbox_hwaccel = {
 +    .name           = "h263_videotoolbox",
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_H263,
 +    .pix_fmt        = AV_PIX_FMT_VIDEOTOOLBOX,
 +    .alloc_frame    = ff_videotoolbox_alloc_frame,
 +    .start_frame    = videotoolbox_mpeg_start_frame,
 +    .decode_slice   = videotoolbox_mpeg_decode_slice,
 +    .end_frame      = videotoolbox_mpeg_end_frame,
 +    .uninit         = ff_videotoolbox_uninit,
 +    .priv_data_size = sizeof(VTContext),
 +};
 +
 +AVHWAccel ff_h264_videotoolbox_hwaccel = {
 +    .name           = "h264_videotoolbox",
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_H264,
 +    .pix_fmt        = AV_PIX_FMT_VIDEOTOOLBOX,
 +    .alloc_frame    = ff_videotoolbox_alloc_frame,
 +    .start_frame    = ff_videotoolbox_h264_start_frame,
 +    .decode_slice   = ff_videotoolbox_h264_decode_slice,
 +    .end_frame      = videotoolbox_h264_end_frame,
 +    .uninit         = ff_videotoolbox_uninit,
 +    .priv_data_size = sizeof(VTContext),
 +};
 +
 +AVHWAccel ff_mpeg1_videotoolbox_hwaccel = {
 +    .name           = "mpeg1_videotoolbox",
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_MPEG1VIDEO,
 +    .pix_fmt        = AV_PIX_FMT_VIDEOTOOLBOX,
 +    .alloc_frame    = ff_videotoolbox_alloc_frame,
 +    .start_frame    = videotoolbox_mpeg_start_frame,
 +    .decode_slice   = videotoolbox_mpeg_decode_slice,
 +    .end_frame      = videotoolbox_mpeg_end_frame,
 +    .uninit         = ff_videotoolbox_uninit,
 +    .priv_data_size = sizeof(VTContext),
 +};
 +
 +AVHWAccel ff_mpeg2_videotoolbox_hwaccel = {
 +    .name           = "mpeg2_videotoolbox",
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_MPEG2VIDEO,
 +    .pix_fmt        = AV_PIX_FMT_VIDEOTOOLBOX,
 +    .alloc_frame    = ff_videotoolbox_alloc_frame,
 +    .start_frame    = videotoolbox_mpeg_start_frame,
 +    .decode_slice   = videotoolbox_mpeg_decode_slice,
 +    .end_frame      = videotoolbox_mpeg_end_frame,
 +    .uninit         = ff_videotoolbox_uninit,
 +    .priv_data_size = sizeof(VTContext),
 +};
 +
 +AVHWAccel ff_mpeg4_videotoolbox_hwaccel = {
 +    .name           = "mpeg4_videotoolbox",
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_MPEG4,
 +    .pix_fmt        = AV_PIX_FMT_VIDEOTOOLBOX,
 +    .alloc_frame    = ff_videotoolbox_alloc_frame,
 +    .start_frame    = videotoolbox_mpeg_start_frame,
 +    .decode_slice   = videotoolbox_mpeg_decode_slice,
 +    .end_frame      = videotoolbox_mpeg_end_frame,
 +    .uninit         = ff_videotoolbox_uninit,
 +    .priv_data_size = sizeof(VTContext),
 +};
 +
 +AVVideotoolboxContext *av_videotoolbox_alloc_context(void)
 +{
 +    AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
 +
 +    if (ret) {
 +        ret->output_callback = videotoolbox_decoder_callback;
 +        ret->cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
 +    }
 +
 +    return ret;
 +}
 +
 +int av_videotoolbox_default_init(AVCodecContext *avctx)
 +{
 +    return av_videotoolbox_default_init2(avctx, NULL);
 +}
 +
 +int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx)
 +{
 +    avctx->hwaccel_context = vtctx ?: av_videotoolbox_alloc_context();
 +    if (!avctx->hwaccel_context)
 +        return AVERROR(ENOMEM);
 +    return videotoolbox_default_init(avctx);
 +}
 +
 +void av_videotoolbox_default_free(AVCodecContext *avctx)
 +{
 +
 +    videotoolbox_default_free(avctx);
 +    av_freep(&avctx->hwaccel_context);
 +}
 +#endif /* CONFIG_VIDEOTOOLBOX */
Simple merge
  #include <math.h>
  #include <time.h>
  
 +#include "libavutil/opt.h"
  #include "libavutil/random_seed.h"
 +#include "libavutil/timecode.h"
 +#include "libavutil/avassert.h"
 +#include "libavutil/pixdesc.h"
  #include "libavutil/time_internal.h"
  #include "libavcodec/bytestream.h"
- #include "libavcodec/h264.h"
 +#include "libavcodec/dnxhddata.h"
++#include "libavcodec/h264dec.h"
 +#include "libavcodec/internal.h"
  #include "audiointerleave.h"
  #include "avformat.h"
 +#include "avio_internal.h"
  #include "internal.h"
  #include "mxf.h"
 -
 -static const int NTSC_samples_per_frame[] = { 1602, 1601, 1602, 1601, 1602, 0 };
 -static const int PAL_samples_per_frame[]  = { 1920, 0 };
 +#include "config.h"
  
  extern AVOutputFormat ff_mxf_d10_muxer;
 +extern AVOutputFormat ff_mxf_opatom_muxer;
  
  #define EDIT_UNITS_PER_BODY 250
  #define KAG_SIZE 512