Merge commit '737d35e33408263c04d7730f5487eed0d04938ba'
authorMichael Niedermayer <michaelni@gmx.at>
Sat, 27 Dec 2014 12:37:39 +0000 (13:37 +0100)
committerMichael Niedermayer <michaelni@gmx.at>
Sat, 27 Dec 2014 13:46:15 +0000 (14:46 +0100)
* commit '737d35e33408263c04d7730f5487eed0d04938ba':
  vdpau: add support for the H.264 High 4:4:4 Predictive profile

Conflicts:
libavcodec/vdpau_internal.h

Merged-by: Michael Niedermayer <michaelni@gmx.at>
1  2 
libavcodec/vdpau.c
libavcodec/vdpau_h264.c
libavcodec/vdpau_internal.h

diff --combined libavcodec/vdpau.c
index 7578e62a8639be48df786fcd8af0b1a46c69c27a,8606624a850d46071a2b79a1386bb562485af39f..6f9afa9fcc15b048f17bfcc8351e253149b2774b
@@@ -4,20 -4,20 +4,20 @@@
   *
   * Copyright (c) 2008 NVIDIA
   *
 - * This file is part of Libav.
 + * This file is part of FFmpeg.
   *
 - * Libav is free software; you can redistribute it and/or
 + * FFmpeg is free software; you can redistribute it and/or
   * modify it under the terms of the GNU Lesser General Public
   * License as published by the Free Software Foundation; either
   * version 2.1 of the License, or (at your option) any later version.
   *
 - * Libav is distributed in the hope that it will be useful,
 + * FFmpeg is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   * Lesser General Public License for more details.
   *
   * You should have received a copy of the GNU Lesser General Public
 - * License along with Libav; if not, write to the Free Software
 + * License along with FFmpeg; if not, write to the Free Software
   * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
   */
  
@@@ -64,13 -64,6 +64,13 @@@ static int vdpau_error(VdpStatus status
      }
  }
  
 +AVVDPAUContext *av_alloc_vdpaucontext(void)
 +{
 +    return av_vdpau_alloc_context();
 +}
 +
 +MAKE_ACCESSORS(AVVDPAUContext, vdpau_hwaccel, AVVDPAU_Render2, render2)
 +
  int av_vdpau_get_surface_parameters(AVCodecContext *avctx,
                                      VdpChromaType *type,
                                      uint32_t *width, uint32_t *height)
@@@ -129,12 -122,7 +129,12 @@@ int ff_vdpau_common_init(AVCodecContex
  
      vdctx->width            = UINT32_MAX;
      vdctx->height           = UINT32_MAX;
 -    hwctx->reset            = 0;
 +
 +    if (!hwctx) {
 +        vdctx->device  = VDP_INVALID_HANDLE;
 +        av_log(avctx, AV_LOG_WARNING, "hwaccel_context has not been setup by the user application, cannot initialize\n");
 +        return 0;
 +    }
  
      if (hwctx->context.decoder != VDP_INVALID_HANDLE) {
          vdctx->decoder = hwctx->context.decoder;
          vdctx->device  = VDP_INVALID_HANDLE;
          return 0; /* Decoder created by user */
      }
 +    hwctx->reset            = 0;
  
      vdctx->device           = hwctx->device;
      vdctx->get_proc_address = hwctx->get_proc_address;
@@@ -277,7 -264,6 +277,7 @@@ int ff_vdpau_common_end_frame(AVCodecCo
                                struct vdpau_picture_context *pic_ctx)
  {
      VDPAUContext *vdctx = avctx->internal->hwaccel_priv_data;
 +    AVVDPAUContext *hwctx = avctx->hwaccel_context;
      VdpVideoSurface surf = ff_vdpau_get_surface_id(frame);
      VdpStatus status;
      int val;
      if (val < 0)
          return val;
  
-     hwctx->info = pic_ctx->info;
 +#if FF_API_BUFS_VDPAU
 +FF_DISABLE_DEPRECATION_WARNINGS
++    av_assert0(sizeof(hwctx->info) >= sizeof(pic_ctx->info));
++    memset(&hwctx->info, 0, sizeof(hwctx->info));
++    memcpy(&hwctx->info, &pic_ctx->info, sizeof(pic_ctx->info));
 +    hwctx->bitstream_buffers = pic_ctx->bitstream_buffers;
 +    hwctx->bitstream_buffers_used = pic_ctx->bitstream_buffers_used;
 +    hwctx->bitstream_buffers_allocated = pic_ctx->bitstream_buffers_allocated;
 +FF_ENABLE_DEPRECATION_WARNINGS
 +#endif
 +
 +    if (!hwctx->render && hwctx->render2) {
 +        status = hwctx->render2(avctx, frame, (void *)&pic_ctx->info,
 +                                pic_ctx->bitstream_buffers_used, pic_ctx->bitstream_buffers);
 +    } else
      status = vdctx->render(vdctx->decoder, surf, (void *)&pic_ctx->info,
                             pic_ctx->bitstream_buffers_used,
                             pic_ctx->bitstream_buffers);
  
      av_freep(&pic_ctx->bitstream_buffers);
 +
 +#if FF_API_BUFS_VDPAU
 +FF_DISABLE_DEPRECATION_WARNINGS
 +    hwctx->bitstream_buffers = NULL;
 +    hwctx->bitstream_buffers_used = 0;
 +    hwctx->bitstream_buffers_allocated = 0;
 +FF_ENABLE_DEPRECATION_WARNINGS
 +#endif
 +
      return vdpau_error(status);
  }
  
@@@ -354,343 -318,6 +356,343 @@@ int ff_vdpau_add_buffer(struct vdpau_pi
      return 0;
  }
  
 +/* Obsolete non-hwaccel VDPAU support below... */
 +
 +void ff_vdpau_h264_set_reference_frames(H264Context *h)
 +{
 +    struct vdpau_render_state *render, *render_ref;
 +    VdpReferenceFrameH264 *rf, *rf2;
 +    H264Picture *pic;
 +    int i, list, pic_frame_idx;
 +
 +    render = (struct vdpau_render_state *)h->cur_pic_ptr->f.data[0];
 +    assert(render);
 +
 +    rf = &render->info.h264.referenceFrames[0];
 +#define H264_RF_COUNT FF_ARRAY_ELEMS(render->info.h264.referenceFrames)
 +
 +    for (list = 0; list < 2; ++list) {
 +        H264Picture **lp = list ? h->long_ref : h->short_ref;
 +        int ls = list ? 16 : h->short_ref_count;
 +
 +        for (i = 0; i < ls; ++i) {
 +            pic = lp[i];
 +            if (!pic || !pic->reference)
 +                continue;
 +            pic_frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num;
 +
 +            render_ref = (struct vdpau_render_state *)pic->f.data[0];
 +            assert(render_ref);
 +
 +            rf2 = &render->info.h264.referenceFrames[0];
 +            while (rf2 != rf) {
 +                if (
 +                    (rf2->surface == render_ref->surface)
 +                    && (rf2->is_long_term == pic->long_ref)
 +                    && (rf2->frame_idx == pic_frame_idx)
 +                )
 +                    break;
 +                ++rf2;
 +            }
 +            if (rf2 != rf) {
 +                rf2->top_is_reference    |= (pic->reference & PICT_TOP_FIELD)    ? VDP_TRUE : VDP_FALSE;
 +                rf2->bottom_is_reference |= (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE;
 +                continue;
 +            }
 +
 +            if (rf >= &render->info.h264.referenceFrames[H264_RF_COUNT])
 +                continue;
 +
 +            rf->surface             = render_ref->surface;
 +            rf->is_long_term        = pic->long_ref;
 +            rf->top_is_reference    = (pic->reference & PICT_TOP_FIELD)    ? VDP_TRUE : VDP_FALSE;
 +            rf->bottom_is_reference = (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE;
 +            rf->field_order_cnt[0]  = pic->field_poc[0];
 +            rf->field_order_cnt[1]  = pic->field_poc[1];
 +            rf->frame_idx           = pic_frame_idx;
 +
 +            ++rf;
 +        }
 +    }
 +
 +    for (; rf < &render->info.h264.referenceFrames[H264_RF_COUNT]; ++rf) {
 +        rf->surface             = VDP_INVALID_HANDLE;
 +        rf->is_long_term        = 0;
 +        rf->top_is_reference    = 0;
 +        rf->bottom_is_reference = 0;
 +        rf->field_order_cnt[0]  = 0;
 +        rf->field_order_cnt[1]  = 0;
 +        rf->frame_idx           = 0;
 +    }
 +}
 +
 +void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, int buf_size)
 +{
 +    struct vdpau_render_state *render = (struct vdpau_render_state*)data;
 +    assert(render);
 +
 +    render->bitstream_buffers= av_fast_realloc(
 +        render->bitstream_buffers,
 +        &render->bitstream_buffers_allocated,
 +        sizeof(*render->bitstream_buffers)*(render->bitstream_buffers_used + 1)
 +    );
 +
 +    render->bitstream_buffers[render->bitstream_buffers_used].struct_version  = VDP_BITSTREAM_BUFFER_VERSION;
 +    render->bitstream_buffers[render->bitstream_buffers_used].bitstream       = buf;
 +    render->bitstream_buffers[render->bitstream_buffers_used].bitstream_bytes = buf_size;
 +    render->bitstream_buffers_used++;
 +}
 +
 +#if CONFIG_H264_VDPAU_DECODER
 +void ff_vdpau_h264_picture_start(H264Context *h)
 +{
 +    struct vdpau_render_state *render;
 +    int i;
 +
 +    render = (struct vdpau_render_state *)h->cur_pic_ptr->f.data[0];
 +    assert(render);
 +
 +    for (i = 0; i < 2; ++i) {
 +        int foc = h->cur_pic_ptr->field_poc[i];
 +        if (foc == INT_MAX)
 +            foc = 0;
 +        render->info.h264.field_order_cnt[i] = foc;
 +    }
 +
 +    render->info.h264.frame_num = h->frame_num;
 +}
 +
 +void ff_vdpau_h264_picture_complete(H264Context *h)
 +{
 +    struct vdpau_render_state *render;
 +
 +    render = (struct vdpau_render_state *)h->cur_pic_ptr->f.data[0];
 +    assert(render);
 +
 +    render->info.h264.slice_count = h->slice_num;
 +    if (render->info.h264.slice_count < 1)
 +        return;
 +
 +    render->info.h264.is_reference                           = (h->cur_pic_ptr->reference & 3) ? VDP_TRUE : VDP_FALSE;
 +    render->info.h264.field_pic_flag                         = h->picture_structure != PICT_FRAME;
 +    render->info.h264.bottom_field_flag                      = h->picture_structure == PICT_BOTTOM_FIELD;
 +    render->info.h264.num_ref_frames                         = h->sps.ref_frame_count;
 +    render->info.h264.mb_adaptive_frame_field_flag           = h->sps.mb_aff && !render->info.h264.field_pic_flag;
 +    render->info.h264.constrained_intra_pred_flag            = h->pps.constrained_intra_pred;
 +    render->info.h264.weighted_pred_flag                     = h->pps.weighted_pred;
 +    render->info.h264.weighted_bipred_idc                    = h->pps.weighted_bipred_idc;
 +    render->info.h264.frame_mbs_only_flag                    = h->sps.frame_mbs_only_flag;
 +    render->info.h264.transform_8x8_mode_flag                = h->pps.transform_8x8_mode;
 +    render->info.h264.chroma_qp_index_offset                 = h->pps.chroma_qp_index_offset[0];
 +    render->info.h264.second_chroma_qp_index_offset          = h->pps.chroma_qp_index_offset[1];
 +    render->info.h264.pic_init_qp_minus26                    = h->pps.init_qp - 26;
 +    render->info.h264.num_ref_idx_l0_active_minus1           = h->pps.ref_count[0] - 1;
 +    render->info.h264.num_ref_idx_l1_active_minus1           = h->pps.ref_count[1] - 1;
 +    render->info.h264.log2_max_frame_num_minus4              = h->sps.log2_max_frame_num - 4;
 +    render->info.h264.pic_order_cnt_type                     = h->sps.poc_type;
 +    render->info.h264.log2_max_pic_order_cnt_lsb_minus4      = h->sps.poc_type ? 0 : h->sps.log2_max_poc_lsb - 4;
 +    render->info.h264.delta_pic_order_always_zero_flag       = h->sps.delta_pic_order_always_zero_flag;
 +    render->info.h264.direct_8x8_inference_flag              = h->sps.direct_8x8_inference_flag;
 +    render->info.h264.entropy_coding_mode_flag               = h->pps.cabac;
 +    render->info.h264.pic_order_present_flag                 = h->pps.pic_order_present;
 +    render->info.h264.deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present;
 +    render->info.h264.redundant_pic_cnt_present_flag         = h->pps.redundant_pic_cnt_present;
 +    memcpy(render->info.h264.scaling_lists_4x4, h->pps.scaling_matrix4, sizeof(render->info.h264.scaling_lists_4x4));
 +    memcpy(render->info.h264.scaling_lists_8x8[0], h->pps.scaling_matrix8[0], sizeof(render->info.h264.scaling_lists_8x8[0]));
 +    memcpy(render->info.h264.scaling_lists_8x8[1], h->pps.scaling_matrix8[3], sizeof(render->info.h264.scaling_lists_8x8[0]));
 +
 +    ff_h264_draw_horiz_band(h, 0, h->avctx->height);
 +    render->bitstream_buffers_used = 0;
 +}
 +#endif /* CONFIG_H264_VDPAU_DECODER */
 +
 +#if CONFIG_MPEG_VDPAU_DECODER || CONFIG_MPEG1_VDPAU_DECODER
 +void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf,
 +                                    int buf_size, int slice_count)
 +{
 +    struct vdpau_render_state *render, *last, *next;
 +    int i;
 +
 +    if (!s->current_picture_ptr) return;
 +
 +    render = (struct vdpau_render_state *)s->current_picture_ptr->f->data[0];
 +    assert(render);
 +
 +    /* fill VdpPictureInfoMPEG1Or2 struct */
 +    render->info.mpeg.picture_structure          = s->picture_structure;
 +    render->info.mpeg.picture_coding_type        = s->pict_type;
 +    render->info.mpeg.intra_dc_precision         = s->intra_dc_precision;
 +    render->info.mpeg.frame_pred_frame_dct       = s->frame_pred_frame_dct;
 +    render->info.mpeg.concealment_motion_vectors = s->concealment_motion_vectors;
 +    render->info.mpeg.intra_vlc_format           = s->intra_vlc_format;
 +    render->info.mpeg.alternate_scan             = s->alternate_scan;
 +    render->info.mpeg.q_scale_type               = s->q_scale_type;
 +    render->info.mpeg.top_field_first            = s->top_field_first;
 +    render->info.mpeg.full_pel_forward_vector    = s->full_pel[0]; // MPEG-1 only.  Set 0 for MPEG-2
 +    render->info.mpeg.full_pel_backward_vector   = s->full_pel[1]; // MPEG-1 only.  Set 0 for MPEG-2
 +    render->info.mpeg.f_code[0][0]               = s->mpeg_f_code[0][0]; // For MPEG-1 fill both horiz. & vert.
 +    render->info.mpeg.f_code[0][1]               = s->mpeg_f_code[0][1];
 +    render->info.mpeg.f_code[1][0]               = s->mpeg_f_code[1][0];
 +    render->info.mpeg.f_code[1][1]               = s->mpeg_f_code[1][1];
 +    for (i = 0; i < 64; ++i) {
 +        render->info.mpeg.intra_quantizer_matrix[i]     = s->intra_matrix[i];
 +        render->info.mpeg.non_intra_quantizer_matrix[i] = s->inter_matrix[i];
 +    }
 +
 +    render->info.mpeg.forward_reference          = VDP_INVALID_HANDLE;
 +    render->info.mpeg.backward_reference         = VDP_INVALID_HANDLE;
 +
 +    switch(s->pict_type){
 +    case  AV_PICTURE_TYPE_B:
 +        next = (struct vdpau_render_state *)s->next_picture.f->data[0];
 +        assert(next);
 +        render->info.mpeg.backward_reference     = next->surface;
 +        // no return here, going to set forward prediction
 +    case  AV_PICTURE_TYPE_P:
 +        last = (struct vdpau_render_state *)s->last_picture.f->data[0];
 +        if (!last) // FIXME: Does this test make sense?
 +            last = render; // predict second field from the first
 +        render->info.mpeg.forward_reference      = last->surface;
 +    }
 +
 +    ff_vdpau_add_data_chunk(s->current_picture_ptr->f->data[0], buf, buf_size);
 +
 +    render->info.mpeg.slice_count                = slice_count;
 +
 +    if (slice_count)
 +        ff_mpeg_draw_horiz_band(s, 0, s->avctx->height);
 +    render->bitstream_buffers_used               = 0;
 +}
 +#endif /* CONFIG_MPEG_VDPAU_DECODER || CONFIG_MPEG1_VDPAU_DECODER */
 +
 +#if CONFIG_VC1_VDPAU_DECODER
 +void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf,
 +                                 int buf_size)
 +{
 +    VC1Context *v = s->avctx->priv_data;
 +    struct vdpau_render_state *render, *last, *next;
 +
 +    render = (struct vdpau_render_state *)s->current_picture.f->data[0];
 +    assert(render);
 +
 +    /*  fill LvPictureInfoVC1 struct */
 +    render->info.vc1.frame_coding_mode  = v->fcm ? v->fcm + 1 : 0;
 +    render->info.vc1.postprocflag       = v->postprocflag;
 +    render->info.vc1.pulldown           = v->broadcast;
 +    render->info.vc1.interlace          = v->interlace;
 +    render->info.vc1.tfcntrflag         = v->tfcntrflag;
 +    render->info.vc1.finterpflag        = v->finterpflag;
 +    render->info.vc1.psf                = v->psf;
 +    render->info.vc1.dquant             = v->dquant;
 +    render->info.vc1.panscan_flag       = v->panscanflag;
 +    render->info.vc1.refdist_flag       = v->refdist_flag;
 +    render->info.vc1.quantizer          = v->quantizer_mode;
 +    render->info.vc1.extended_mv        = v->extended_mv;
 +    render->info.vc1.extended_dmv       = v->extended_dmv;
 +    render->info.vc1.overlap            = v->overlap;
 +    render->info.vc1.vstransform        = v->vstransform;
 +    render->info.vc1.loopfilter         = v->s.loop_filter;
 +    render->info.vc1.fastuvmc           = v->fastuvmc;
 +    render->info.vc1.range_mapy_flag    = v->range_mapy_flag;
 +    render->info.vc1.range_mapy         = v->range_mapy;
 +    render->info.vc1.range_mapuv_flag   = v->range_mapuv_flag;
 +    render->info.vc1.range_mapuv        = v->range_mapuv;
 +    /* Specific to simple/main profile only */
 +    render->info.vc1.multires           = v->multires;
 +    render->info.vc1.syncmarker         = v->resync_marker;
 +    render->info.vc1.rangered           = v->rangered | (v->rangeredfrm << 1);
 +    render->info.vc1.maxbframes         = v->s.max_b_frames;
 +
 +    render->info.vc1.deblockEnable      = v->postprocflag & 1;
 +    render->info.vc1.pquant             = v->pq;
 +
 +    render->info.vc1.forward_reference  = VDP_INVALID_HANDLE;
 +    render->info.vc1.backward_reference = VDP_INVALID_HANDLE;
 +
 +    if (v->bi_type)
 +        render->info.vc1.picture_type = 4;
 +    else
 +        render->info.vc1.picture_type = s->pict_type - 1 + s->pict_type / 3;
 +
 +    switch(s->pict_type){
 +    case  AV_PICTURE_TYPE_B:
 +        next = (struct vdpau_render_state *)s->next_picture.f->data[0];
 +        assert(next);
 +        render->info.vc1.backward_reference = next->surface;
 +        // no break here, going to set forward prediction
 +    case  AV_PICTURE_TYPE_P:
 +        last = (struct vdpau_render_state *)s->last_picture.f->data[0];
 +        if (!last) // FIXME: Does this test make sense?
 +            last = render; // predict second field from the first
 +        render->info.vc1.forward_reference = last->surface;
 +    }
 +
 +    ff_vdpau_add_data_chunk(s->current_picture_ptr->f->data[0], buf, buf_size);
 +
 +    render->info.vc1.slice_count          = 1;
 +
 +    ff_mpeg_draw_horiz_band(s, 0, s->avctx->height);
 +    render->bitstream_buffers_used        = 0;
 +}
 +#endif /* (CONFIG_VC1_VDPAU_DECODER */
 +
 +#if CONFIG_MPEG4_VDPAU_DECODER
 +void ff_vdpau_mpeg4_decode_picture(Mpeg4DecContext *ctx, const uint8_t *buf,
 +                                   int buf_size)
 +{
 +    MpegEncContext *s = &ctx->m;
 +    struct vdpau_render_state *render, *last, *next;
 +    int i;
 +
 +    if (!s->current_picture_ptr) return;
 +
 +    render = (struct vdpau_render_state *)s->current_picture_ptr->f->data[0];
 +    assert(render);
 +
 +    /* fill VdpPictureInfoMPEG4Part2 struct */
 +    render->info.mpeg4.trd[0]                            = s->pp_time;
 +    render->info.mpeg4.trb[0]                            = s->pb_time;
 +    render->info.mpeg4.trd[1]                            = s->pp_field_time >> 1;
 +    render->info.mpeg4.trb[1]                            = s->pb_field_time >> 1;
 +    render->info.mpeg4.vop_time_increment_resolution     = s->avctx->time_base.den;
 +    render->info.mpeg4.vop_coding_type                   = 0;
 +    render->info.mpeg4.vop_fcode_forward                 = s->f_code;
 +    render->info.mpeg4.vop_fcode_backward                = s->b_code;
 +    render->info.mpeg4.resync_marker_disable             = !ctx->resync_marker;
 +    render->info.mpeg4.interlaced                        = !s->progressive_sequence;
 +    render->info.mpeg4.quant_type                        = s->mpeg_quant;
 +    render->info.mpeg4.quarter_sample                    = s->quarter_sample;
 +    render->info.mpeg4.short_video_header                = s->avctx->codec->id == AV_CODEC_ID_H263;
 +    render->info.mpeg4.rounding_control                  = s->no_rounding;
 +    render->info.mpeg4.alternate_vertical_scan_flag      = s->alternate_scan;
 +    render->info.mpeg4.top_field_first                   = s->top_field_first;
 +    for (i = 0; i < 64; ++i) {
 +        render->info.mpeg4.intra_quantizer_matrix[i]     = s->intra_matrix[i];
 +        render->info.mpeg4.non_intra_quantizer_matrix[i] = s->inter_matrix[i];
 +    }
 +    render->info.mpeg4.forward_reference                 = VDP_INVALID_HANDLE;
 +    render->info.mpeg4.backward_reference                = VDP_INVALID_HANDLE;
 +
 +    switch (s->pict_type) {
 +    case AV_PICTURE_TYPE_B:
 +        next = (struct vdpau_render_state *)s->next_picture.f->data[0];
 +        assert(next);
 +        render->info.mpeg4.backward_reference     = next->surface;
 +        render->info.mpeg4.vop_coding_type        = 2;
 +        // no break here, going to set forward prediction
 +    case AV_PICTURE_TYPE_P:
 +        last = (struct vdpau_render_state *)s->last_picture.f->data[0];
 +        assert(last);
 +        render->info.mpeg4.forward_reference      = last->surface;
 +    }
 +
 +    ff_vdpau_add_data_chunk(s->current_picture_ptr->f->data[0], buf, buf_size);
 +
 +    ff_mpeg_draw_horiz_band(s, 0, s->avctx->height);
 +    render->bitstream_buffers_used = 0;
 +}
 +#endif /* CONFIG_MPEG4_VDPAU_DECODER */
 +
  int av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile)
  {
  #define PROFILE(prof)                      \
diff --combined libavcodec/vdpau_h264.c
index 0b2c0466b4672645da30076fc71cc296718142e6,742b6485138180a97b246360a7fc102f35763cac..1736bbd1787f91a7c1e6d1b05a9d22dc65916429
@@@ -4,20 -4,20 +4,20 @@@
   * Copyright (c) 2008 NVIDIA
   * Copyright (c) 2013 RĂ©mi Denis-Courmont
   *
 - * This file is part of Libav.
 + * This file is part of FFmpeg.
   *
 - * Libav is free software; you can redistribute it and/or
 + * FFmpeg is free software; you can redistribute it and/or
   * modify it under the terms of the GNU Lesser General Public
   * License as published by the Free Software Foundation; either
   * version 2.1 of the License, or (at your option) any later version.
   *
 - * Libav is distributed in the hope that it will be useful,
 + * FFmpeg is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   * Lesser General Public License for more details.
   *
   * You should have received a copy of the GNU Lesser General Public
 - * License along with Libav; if not, write to the Free Software Foundation,
 + * License along with FFmpeg; if not, write to the Free Software Foundation,
   * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
   */
  
@@@ -123,6 -123,9 +123,9 @@@ static int vdpau_h264_start_frame(AVCod
      H264Picture *pic = h->cur_pic_ptr;
      struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
      VdpPictureInfoH264 *info = &pic_ctx->info.h264;
+ #ifdef VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE
+     VdpPictureInfoH264Predictive *info2 = &pic_ctx->info.h264_predictive;
+ #endif
  
      /* init VdpPictureInfoH264 */
      info->slice_count                            = 0;
      info->log2_max_pic_order_cnt_lsb_minus4      = h->sps.poc_type ? 0 : h->sps.log2_max_poc_lsb - 4;
      info->delta_pic_order_always_zero_flag       = h->sps.delta_pic_order_always_zero_flag;
      info->direct_8x8_inference_flag              = h->sps.direct_8x8_inference_flag;
+ #ifdef VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE
+     info2->qpprime_y_zero_transform_bypass_flag  = h->sps.transform_bypass;
+     info2->separate_colour_plane_flag            = h->sps.residual_color_transform_flag;
+ #endif
      info->entropy_coding_mode_flag               = h->pps.cabac;
      info->pic_order_present_flag                 = h->pps.pic_order_present;
      info->deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present;
@@@ -227,6 -234,18 +234,18 @@@ static int vdpau_h264_init(AVCodecConte
      case FF_PROFILE_H264_EXTENDED:
          profile = VDP_DECODER_PROFILE_H264_EXTENDED;
          break;
+ #endif
+     case FF_PROFILE_H264_HIGH_10:
+         /* XXX: High 10 can be treated as High so long as only 8-bits per
+          * formats are supported. */
+         profile = VDP_DECODER_PROFILE_H264_HIGH;
+         break;
+ #ifdef VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE
+     case FF_PROFILE_H264_HIGH_422:
+     case FF_PROFILE_H264_HIGH_444_PREDICTIVE:
+     case FF_PROFILE_H264_CAVLC_444:
+         profile = VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE;
+         break;
  #endif
      default:
          return AVERROR(ENOTSUP);
index e1ea4306f2a18ad20b70bf8494f375e91ac70096,9cc953887a91ee15ea866075e394006861edee7f..e5fe63dd197c102eacd66f53f4e2ec2054ee89e7
@@@ -4,35 -4,30 +4,35 @@@
   *
   * Copyright (C) 2008 NVIDIA
   *
 - * This file is part of Libav.
 + * This file is part of FFmpeg.
   *
 - * Libav is free software; you can redistribute it and/or
 + * FFmpeg is free software; you can redistribute it and/or
   * modify it under the terms of the GNU Lesser General Public
   * License as published by the Free Software Foundation; either
   * version 2.1 of the License, or (at your option) any later version.
   *
 - * Libav is distributed in the hope that it will be useful,
 + * FFmpeg is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   * Lesser General Public License for more details.
   *
   * You should have received a copy of the GNU Lesser General Public
 - * License along with Libav; if not, write to the Free Software
 + * License along with FFmpeg; if not, write to the Free Software
   * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
   */
  
  #ifndef AVCODEC_VDPAU_INTERNAL_H
  #define AVCODEC_VDPAU_INTERNAL_H
  
 +#include "config.h"
  #include <stdint.h>
 +#if CONFIG_VDPAU
  #include <vdpau/vdpau.h>
 +#endif
 +#include "h264.h"
  
  #include "avcodec.h"
 +#include "mpeg4video.h"
  #include "mpegvideo.h"
  #include "version.h"
  
@@@ -42,19 -37,16 +42,20 @@@ static inline uintptr_t ff_vdpau_get_su
      return (uintptr_t)pic->data[3];
  }
  
- #if !FF_API_BUFS_VDPAU
- union AVVDPAUPictureInfo {
 +struct vdpau_picture_context;
 +#if CONFIG_VDPAU
+ union VDPAUPictureInfo {
      VdpPictureInfoH264        h264;
      VdpPictureInfoMPEG1Or2    mpeg;
      VdpPictureInfoVC1          vc1;
      VdpPictureInfoMPEG4Part2 mpeg4;
+ #ifdef VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE
+     VdpPictureInfoH264Predictive h264_predictive;
+ #endif
  };
- #else
 +#include "vdpau.h"
- #endif
 +
  typedef struct VDPAUHWContext {
      AVVDPAUContext context;
      VdpDevice device;
@@@ -92,7 -84,7 +93,7 @@@ struct vdpau_picture_context 
      /**
       * VDPAU picture information.
       */
-     union AVVDPAUPictureInfo info;
+     union VDPAUPictureInfo info;
  
      /**
       * Allocated size of the bitstream_buffers table.
  
  int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile,
                           int level);
 +#endif //CONFIG_VDPAU
 +
  int ff_vdpau_common_uninit(AVCodecContext *avctx);
  
  int ff_vdpau_common_start_frame(struct vdpau_picture_context *pic,
@@@ -124,21 -114,4 +125,21 @@@ int ff_vdpau_mpeg_end_frame(AVCodecCont
  int ff_vdpau_add_buffer(struct vdpau_picture_context *pic, const uint8_t *buf,
                          uint32_t buf_size);
  
 +
 +void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf,
 +                             int buf_size);
 +
 +void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf,
 +                                    int buf_size, int slice_count);
 +
 +void ff_vdpau_h264_picture_start(H264Context *h);
 +void ff_vdpau_h264_set_reference_frames(H264Context *h);
 +void ff_vdpau_h264_picture_complete(H264Context *h);
 +
 +void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf,
 +                                 int buf_size);
 +
 +void ff_vdpau_mpeg4_decode_picture(Mpeg4DecContext *s, const uint8_t *buf,
 +                                   int buf_size);
 +
  #endif /* AVCODEC_VDPAU_INTERNAL_H */