Merge remote-tracking branch 'qatar/master'
authorMichael Niedermayer <michaelni@gmx.at>
Tue, 27 Nov 2012 13:36:12 +0000 (14:36 +0100)
committerMichael Niedermayer <michaelni@gmx.at>
Tue, 27 Nov 2012 13:36:53 +0000 (14:36 +0100)
* qatar/master:
  h264: set Picture.owner2 to the current thread

Merged-by: Michael Niedermayer <michaelni@gmx.at>
1  2 
libavcodec/h264.c
libavcodec/mpegvideo.c

diff --combined libavcodec/h264.c
@@@ -2,20 -2,20 +2,20 @@@
   * H.26L/H.264/AVC/JVT/14496-10/... decoder
   * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
   *
 - * This file is part of Libav.
 + * This file is part of FFmpeg.
   *
 - * Libav is free software; you can redistribute it and/or
 + * FFmpeg is free software; you can redistribute it and/or
   * modify it under the terms of the GNU Lesser General Public
   * License as published by the Free Software Foundation; either
   * version 2.1 of the License, or (at your option) any later version.
   *
 - * Libav is distributed in the hope that it will be useful,
 + * FFmpeg is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   * Lesser General Public License for more details.
   *
   * You should have received a copy of the GNU Lesser General Public
 - * License along with Libav; if not, write to the Free Software
 + * License along with FFmpeg; if not, write to the Free Software
   * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
   */
  
   * @author Michael Niedermayer <michaelni@gmx.at>
   */
  
 +#define UNCHECKED_BITSTREAM_READER 1
 +
  #include "libavutil/imgutils.h"
 +#include "libavutil/opt.h"
  #include "internal.h"
  #include "cabac.h"
  #include "cabac_functions.h"
@@@ -53,17 -50,13 +53,17 @@@ const uint16_t ff_h264_mb_sizes[4] = { 
  static const uint8_t rem6[QP_MAX_NUM + 1] = {
      0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2,
      3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
 -    0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
 +    0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2,
 +    3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
 +    0, 1, 2, 3,
  };
  
  static const uint8_t div6[QP_MAX_NUM + 1] = {
      0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3,  3,  3,
      3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6,  6,  6,
 -    7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10,
 +    7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10,
 +   10,10,10,11,11,11,11,11,11,12,12,12,12,12,12,13,13,13, 13, 13, 13,
 +   14,14,14,14,
  };
  
  static const enum AVPixelFormat hwaccel_pixfmt_list_h264_jpeg_420[] = {
      AV_PIX_FMT_NONE
  };
  
 +int avpriv_h264_has_num_reorder_frames(AVCodecContext *avctx)
 +{
 +    H264Context *h = avctx->priv_data;
 +    return h ? h->sps.num_reorder_frames : 0;
 +}
 +
  /**
   * Check if the top & left blocks are available if needed and
   * change the dc mode so it only uses the available blocks.
@@@ -233,27 -220,21 +233,27 @@@ const uint8_t *ff_h264_decode_nal(H264C
      }
  #endif
  
 -    if (i >= length - 1) { // no escaped 0
 -        *dst_length = length;
 -        *consumed   = length + 1; // +1 for the header
 -        return src;
 -    }
 -
      // use second escape buffer for inter data
      bufidx = h->nal_unit_type == NAL_DPC ? 1 : 0;
 -    av_fast_malloc(&h->rbsp_buffer[bufidx], &h->rbsp_buffer_size[bufidx],
 -                   length + FF_INPUT_BUFFER_PADDING_SIZE);
 +
 +    si = h->rbsp_buffer_size[bufidx];
 +    av_fast_padded_malloc(&h->rbsp_buffer[bufidx], &h->rbsp_buffer_size[bufidx], length+MAX_MBPAIR_SIZE);
      dst = h->rbsp_buffer[bufidx];
  
      if (dst == NULL)
          return NULL;
  
 +    if(i>=length-1){ //no escaped 0
 +        *dst_length= length;
 +        *consumed= length+1; //+1 for the header
 +        if(h->s.avctx->flags2 & CODEC_FLAG2_FAST){
 +            return src;
 +        }else{
 +            memcpy(dst, src, length);
 +            return dst;
 +        }
 +    }
 +
      memcpy(dst, src, i);
      si = di = i;
      while (si + 2 < length) {
@@@ -388,7 -369,7 +388,7 @@@ static void await_references(H264Contex
      } else {
          int i;
  
 -        assert(IS_8X8(mb_type));
 +        av_assert2(IS_8X8(mb_type));
  
          for (i = 0; i < 4; i++) {
              const int sub_mb_type = h->sub_mb_type[i];
                                    nrefs);
              } else {
                  int j;
 -                assert(IS_SUB_4X4(sub_mb_type));
 +                av_assert2(IS_SUB_4X4(sub_mb_type));
                  for (j = 0; j < 4; j++) {
                      int sub_y_offset = y_offset + 2 * (j & 2);
                      get_lowest_part_y(h, refs, n + j, 4, sub_y_offset,
@@@ -759,7 -740,9 +759,7 @@@ static av_always_inline void prefetch_m
              s->dsp.prefetch(src[1] + off, s->linesize, 4);
              s->dsp.prefetch(src[2] + off, s->linesize, 4);
          } else {
 -            off = ((mx >> 1) << pixel_shift) +
 -                  ((my >> 1) + (s->mb_x & 7)) * s->uvlinesize +
 -                  (64 << pixel_shift);
 +            off= (((mx>>1)+64)<<pixel_shift) + ((my>>1) + (s->mb_x&7))*s->uvlinesize;
              s->dsp.prefetch(src[1] + off, src[2] - src[1], 2);
          }
      }
@@@ -876,7 -859,7 +876,7 @@@ int ff_h264_alloc_tables(H264Context *h
  {
      MpegEncContext *const s = &h->s;
      const int big_mb_num    = s->mb_stride * (s->mb_height + 1);
 -    const int row_mb_num    = s->mb_stride * 2 * s->avctx->thread_count;
 +    const int row_mb_num    = 2*s->mb_stride*FFMAX(s->avctx->thread_count, 1);
      int x, y;
  
      FF_ALLOCZ_OR_GOTO(h->s.avctx, h->intra4x4_pred_mode,
@@@ -983,18 -966,12 +983,18 @@@ static av_cold void common_init(H264Con
      s->height   = s->avctx->height;
      s->codec_id = s->avctx->codec->id;
  
 -    ff_h264dsp_init(&h->h264dsp, 8, 1);
 -    ff_h264_pred_init(&h->hpc, s->codec_id, 8, 1);
 +    s->avctx->bits_per_raw_sample = 8;
 +    h->cur_chroma_format_idc = 1;
 +
 +    ff_h264dsp_init(&h->h264dsp,
 +                    s->avctx->bits_per_raw_sample, h->cur_chroma_format_idc);
 +    ff_h264_pred_init(&h->hpc, s->codec_id,
 +                      s->avctx->bits_per_raw_sample, h->cur_chroma_format_idc);
  
      h->dequant_coeff_pps = -1;
      s->unrestricted_mv   = 1;
  
 +    s->dsp.dct_bits = 16;
      /* needed so that IDCT permutation is known early */
      ff_dsputil_init(&s->dsp, s->avctx);
  
      memset(h->pps.scaling_matrix8, 16, 2 * 64 * sizeof(uint8_t));
  }
  
 -int ff_h264_decode_extradata(H264Context *h)
 +static int ff_h264_decode_extradata_internal(H264Context *h, const uint8_t *buf, int size)
  {
      AVCodecContext *avctx = h->s.avctx;
  
 -    if (avctx->extradata[0] == 1) {
 +    if (!buf || size <= 0)
 +        return -1;
 +
 +    if (buf[0] == 1) {
          int i, cnt, nalsize;
 -        unsigned char *p = avctx->extradata;
 +        const unsigned char *p = buf;
  
          h->is_avc = 1;
  
 -        if (avctx->extradata_size < 7) {
 +        if (size < 7) {
              av_log(avctx, AV_LOG_ERROR, "avcC too short\n");
              return -1;
          }
          p  += 6;
          for (i = 0; i < cnt; i++) {
              nalsize = AV_RB16(p) + 2;
 -            if (p - avctx->extradata + nalsize > avctx->extradata_size)
 +            if(nalsize > size - (p-buf))
                  return -1;
              if (decode_nal_units(h, p, nalsize) < 0) {
                  av_log(avctx, AV_LOG_ERROR,
          cnt = *(p++); // Number of pps
          for (i = 0; i < cnt; i++) {
              nalsize = AV_RB16(p) + 2;
 -            if (p - avctx->extradata + nalsize > avctx->extradata_size)
 +            if(nalsize > size - (p-buf))
                  return -1;
              if (decode_nal_units(h, p, nalsize) < 0) {
                  av_log(avctx, AV_LOG_ERROR,
              p += nalsize;
          }
          // Now store right nal length size, that will be used to parse all other nals
 -        h->nal_length_size = (avctx->extradata[4] & 0x03) + 1;
 +        h->nal_length_size = (buf[4] & 0x03) + 1;
      } else {
          h->is_avc = 0;
 -        if (decode_nal_units(h, avctx->extradata, avctx->extradata_size) < 0)
 +        if (decode_nal_units(h, buf, size) < 0)
              return -1;
      }
 -    return 0;
 +    return size;
 +}
 +
 +int ff_h264_decode_extradata(H264Context *h, const uint8_t *buf, int size)
 +{
 +    int ret;
 +    h->decoding_extradata = 1;
 +    ret = ff_h264_decode_extradata_internal(h, buf, size);
 +    h->decoding_extradata = 0;
 +    return ret;
  }
  
  av_cold int ff_h264_decode_init(AVCodecContext *avctx)
      for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
          h->last_pocs[i] = INT_MIN;
      h->prev_poc_msb = 1 << 16;
 +    h->prev_frame_num = -1;
      h->x264_build   = -1;
      ff_h264_reset_sei(h);
      if (avctx->codec_id == AV_CODEC_ID_H264) {
 -        if (avctx->ticks_per_frame == 1)
 -            s->avctx->time_base.den *= 2;
 +        if (avctx->ticks_per_frame == 1) {
 +            if(s->avctx->time_base.den < INT_MAX/2) {
 +                s->avctx->time_base.den *= 2;
 +            } else
 +                s->avctx->time_base.num /= 2;
 +        }
          avctx->ticks_per_frame = 2;
      }
  
      if (avctx->extradata_size > 0 && avctx->extradata &&
 -        ff_h264_decode_extradata(h))
 +        ff_h264_decode_extradata(h, avctx->extradata, avctx->extradata_size) < 0) {
 +        ff_h264_free_context(h);
          return -1;
 +    }
  
      if (h->sps.bitstream_restriction_flag &&
          s->avctx->has_b_frames < h->sps.num_reorder_frames) {
          s->low_delay           = 0;
      }
  
 +    ff_init_cabac_states();
 +
      return 0;
  }
  
@@@ -1186,7 -1142,7 +1186,7 @@@ static int decode_update_thread_context
      int inited = s->context_initialized, err;
      int i;
  
 -    if (dst == src || !s1->context_initialized)
 +    if (dst == src)
          return 0;
  
      err = ff_mpeg_update_thread_context(dst, src);
                 sizeof(H264Context) - sizeof(MpegEncContext));
          memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
          memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
 +
 +        if (s1->context_initialized) {
          if (ff_h264_alloc_tables(h) < 0) {
              av_log(dst, AV_LOG_ERROR, "Could not allocate memory for h264\n");
              return AVERROR(ENOMEM);
          }
          context_init(h);
  
 +        /* frame_start may not be called for the next thread (if it's decoding
 +         * a bottom field) so this has to be allocated here */
 +        h->s.obmc_scratchpad = av_malloc(16 * 6 * s->linesize);
 +        }
 +
          for (i = 0; i < 2; i++) {
              h->rbsp_buffer[i]      = NULL;
              h->rbsp_buffer_size[i] = 0;
  
          h->thread_context[0] = h;
  
 -        /* frame_start may not be called for the next thread (if it's decoding
 -         * a bottom field) so this has to be allocated here */
 -        h->s.obmc_scratchpad = av_malloc(16 * 6 * s->linesize);
 -
          s->dsp.clear_blocks(h->mb);
          s->dsp.clear_blocks(h->mb + (24 * 16 << h->pixel_shift));
      }
                         MAX_DELAYED_PIC_COUNT + 2, s, s1);
  
      h->last_slice_type = h1->last_slice_type;
 +    h->sync            = h1->sync;
  
      if (!s->current_picture_ptr)
          return 0;
@@@ -1302,7 -1254,6 +1302,7 @@@ int ff_h264_frame_start(H264Context *h
       * See decode_nal_units().
       */
      s->current_picture_ptr->f.key_frame = 0;
 +    s->current_picture_ptr->sync        = 0;
      s->current_picture_ptr->mmco_reset  = 0;
  
      assert(s->linesize && s->uvlinesize);
@@@ -1365,6 -1316,7 +1365,6 @@@ static void decode_postinit(H264Contex
      Picture *out = s->current_picture_ptr;
      Picture *cur = s->current_picture_ptr;
      int i, pics, out_of_order, out_idx;
 -    int invalid = 0, cnt = 0;
  
      s->current_picture_ptr->f.qscale_type = FF_QSCALE_TYPE_H264;
      s->current_picture_ptr->f.pict_type   = s->pict_type;
          }
      }
  
 +    cur->mmco_reset = h->mmco_reset;
 +    h->mmco_reset = 0;
      // FIXME do something with unavailable reference frames
  
      /* Sort B-frames into display order */
          s->low_delay           = 0;
      }
  
 +    for (i = 0; 1; i++) {
 +        if(i == MAX_DELAYED_PIC_COUNT || cur->poc < h->last_pocs[i]){
 +            if(i)
 +                h->last_pocs[i-1] = cur->poc;
 +            break;
 +        } else if(i) {
 +            h->last_pocs[i-1]= h->last_pocs[i];
 +        }
 +    }
 +    out_of_order = MAX_DELAYED_PIC_COUNT - i;
 +    if(   cur->f.pict_type == AV_PICTURE_TYPE_B
 +       || (h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > INT_MIN && h->last_pocs[MAX_DELAYED_PIC_COUNT-1] - h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > 2))
 +        out_of_order = FFMAX(out_of_order, 1);
 +    if(s->avctx->has_b_frames < out_of_order && !h->sps.bitstream_restriction_flag){
 +        av_log(s->avctx, AV_LOG_VERBOSE, "Increasing reorder buffer to %d\n", out_of_order);
 +        s->avctx->has_b_frames = out_of_order;
 +        s->low_delay = 0;
 +    }
 +
      pics = 0;
      while (h->delayed_pic[pics])
          pics++;
  
 -    assert(pics <= MAX_DELAYED_PIC_COUNT);
 +    av_assert0(pics <= MAX_DELAYED_PIC_COUNT);
  
      h->delayed_pic[pics++] = cur;
      if (cur->f.reference == 0)
          cur->f.reference = DELAYED_PIC_REF;
  
 -    /* Frame reordering. This code takes pictures from coding order and sorts
 -     * them by their incremental POC value into display order. It supports POC
 -     * gaps, MMCO reset codes and random resets.
 -     * A "display group" can start either with a IDR frame (f.key_frame = 1),
 -     * and/or can be closed down with a MMCO reset code. In sequences where
 -     * there is no delay, we can't detect that (since the frame was already
 -     * output to the user), so we also set h->mmco_reset to detect the MMCO
 -     * reset code.
 -     * FIXME: if we detect insufficient delays (as per s->avctx->has_b_frames),
 -     * we increase the delay between input and output. All frames affected by
 -     * the lag (e.g. those that should have been output before another frame
 -     * that we already returned to the user) will be dropped. This is a bug
 -     * that we will fix later. */
 -    for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) {
 -        cnt     += out->poc < h->last_pocs[i];
 -        invalid += out->poc == INT_MIN;
 -    }
 -    if (!h->mmco_reset && !cur->f.key_frame &&
 -        cnt + invalid == MAX_DELAYED_PIC_COUNT && cnt > 0) {
 -        h->mmco_reset = 2;
 -        if (pics > 1)
 -            h->delayed_pic[pics - 2]->mmco_reset = 2;
 -    }
 -    if (h->mmco_reset || cur->f.key_frame) {
 -        for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
 -            h->last_pocs[i] = INT_MIN;
 -        cnt     = 0;
 -        invalid = MAX_DELAYED_PIC_COUNT;
 -    }
 -    out     = h->delayed_pic[0];
 +    out = h->delayed_pic[0];
      out_idx = 0;
 -    for (i = 1; i < MAX_DELAYED_PIC_COUNT &&
 -                h->delayed_pic[i] &&
 -                !h->delayed_pic[i - 1]->mmco_reset &&
 -                !h->delayed_pic[i]->f.key_frame;
 +    for (i = 1; h->delayed_pic[i] &&
 +                !h->delayed_pic[i]->f.key_frame &&
 +                !h->delayed_pic[i]->mmco_reset;
           i++)
          if (h->delayed_pic[i]->poc < out->poc) {
              out     = h->delayed_pic[i];
              out_idx = i;
          }
      if (s->avctx->has_b_frames == 0 &&
 -        (h->delayed_pic[0]->f.key_frame || h->mmco_reset))
 +        (h->delayed_pic[0]->f.key_frame || h->delayed_pic[0]->mmco_reset))
          h->next_outputed_poc = INT_MIN;
 -    out_of_order = !out->f.key_frame && !h->mmco_reset &&
 -                   (out->poc < h->next_outputed_poc);
 -
 -    if (h->sps.bitstream_restriction_flag &&
 -        s->avctx->has_b_frames >= h->sps.num_reorder_frames) {
 -    } else if (out_of_order && pics - 1 == s->avctx->has_b_frames &&
 -               s->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT) {
 -        if (invalid + cnt < MAX_DELAYED_PIC_COUNT) {
 -            s->avctx->has_b_frames = FFMAX(s->avctx->has_b_frames, cnt);
 -        }
 -        s->low_delay = 0;
 -    } else if (s->low_delay &&
 -               ((h->next_outputed_poc != INT_MIN &&
 -                 out->poc > h->next_outputed_poc + 2) ||
 -                cur->f.pict_type == AV_PICTURE_TYPE_B)) {
 -        s->low_delay = 0;
 -        s->avctx->has_b_frames++;
 -    }
 +    out_of_order = out->poc < h->next_outputed_poc;
  
 -    if (pics > s->avctx->has_b_frames) {
 +    if (out_of_order || pics > s->avctx->has_b_frames) {
          out->f.reference &= ~DELAYED_PIC_REF;
          // for frame threading, the owner must be the second field's thread or
          // else the first thread can release the picture and reuse it unsafely
          for (i = out_idx; h->delayed_pic[i]; i++)
              h->delayed_pic[i] = h->delayed_pic[i + 1];
      }
 -    memmove(h->last_pocs, &h->last_pocs[1],
 -            sizeof(*h->last_pocs) * (MAX_DELAYED_PIC_COUNT - 1));
 -    h->last_pocs[MAX_DELAYED_PIC_COUNT - 1] = cur->poc;
      if (!out_of_order && pics > s->avctx->has_b_frames) {
          h->next_output_pic = out;
 -        if (out->mmco_reset) {
 -            if (out_idx > 0) {
 -                h->next_outputed_poc                    = out->poc;
 -                h->delayed_pic[out_idx - 1]->mmco_reset = out->mmco_reset;
 -            } else {
 -                h->next_outputed_poc = INT_MIN;
 -            }
 -        } else {
 -            if (out_idx == 0 && pics > 1 && h->delayed_pic[0]->f.key_frame) {
 -                h->next_outputed_poc = INT_MIN;
 -            } else {
 -                h->next_outputed_poc = out->poc;
 -            }
 -        }
 -        h->mmco_reset = 0;
 +        if (out_idx == 0 && h->delayed_pic[0] && (h->delayed_pic[0]->f.key_frame || h->delayed_pic[0]->mmco_reset)) {
 +            h->next_outputed_poc = INT_MIN;
 +        } else
 +            h->next_outputed_poc = out->poc;
      } else {
 -        av_log(s->avctx, AV_LOG_DEBUG, "no picture\n");
 +        av_log(s->avctx, AV_LOG_DEBUG, "no picture %s\n", out_of_order ? "ooo" : "");
 +    }
 +
 +    if (h->next_output_pic && h->next_output_pic->sync) {
 +        h->sync |= 2;
      }
  
      if (setup_finished)
@@@ -1802,7 -1790,7 +1802,7 @@@ static av_always_inline void hl_decode_
                          uint64_t tr_high;
                          if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
                              const int topright_avail = (h->topright_samples_available << i) & 0x8000;
 -                            assert(s->mb_y || linesize <= block_offset[i]);
 +                            av_assert2(s->mb_y || linesize <= block_offset[i]);
                              if (!topright_avail) {
                                  if (pixel_shift) {
                                      tr_high  = ((uint16_t *)ptr)[3 - linesize / 2] * 0x0001000100010001ULL;
@@@ -2096,14 -2084,11 +2096,14 @@@ static void implicit_weight_table(H264C
   */
  static void idr(H264Context *h)
  {
 +    int i;
      ff_h264_remove_all_refs(h);
      h->prev_frame_num        = 0;
      h->prev_frame_num_offset = 0;
 -    h->prev_poc_msb          =
 +    h->prev_poc_msb          = 1<<16;
      h->prev_poc_lsb          = 0;
 +    for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
 +        h->last_pocs[i] = INT_MIN;
  }
  
  /* forget old pics after a seek */
@@@ -2111,22 -2096,21 +2111,22 @@@ static void flush_dpb(AVCodecContext *a
  {
      H264Context *h = avctx->priv_data;
      int i;
 -    for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) {
 +    for (i=0; i<=MAX_DELAYED_PIC_COUNT; i++) {
          if (h->delayed_pic[i])
              h->delayed_pic[i]->f.reference = 0;
          h->delayed_pic[i] = NULL;
      }
 -    for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
 -        h->last_pocs[i] = INT_MIN;
      h->outputed_poc = h->next_outputed_poc = INT_MIN;
      h->prev_interlaced_frame = 1;
      idr(h);
 +    h->prev_frame_num = -1;
      if (h->s.current_picture_ptr)
          h->s.current_picture_ptr->f.reference = 0;
      h->s.first_field = 0;
      ff_h264_reset_sei(h);
      ff_mpeg_flush(avctx);
 +    h->recovery_frame= -1;
 +    h->sync= 0;
  }
  
  static int init_poc(H264Context *h)
@@@ -2228,19 -2212,19 +2228,19 @@@ static void init_scan_tables(H264Contex
  #undef T
      }
      if (h->sps.transform_bypass) { // FIXME same ugly
 -        h->zigzag_scan_q0          = zigzag_scan;
 -        h->zigzag_scan8x8_q0       = ff_zigzag_direct;
 -        h->zigzag_scan8x8_cavlc_q0 = zigzag_scan8x8_cavlc;
 -        h->field_scan_q0           = field_scan;
 -        h->field_scan8x8_q0        = field_scan8x8;
 -        h->field_scan8x8_cavlc_q0  = field_scan8x8_cavlc;
 +        memcpy(h->zigzag_scan_q0          , zigzag_scan             , sizeof(h->zigzag_scan_q0         ));
 +        memcpy(h->zigzag_scan8x8_q0       , ff_zigzag_direct        , sizeof(h->zigzag_scan8x8_q0      ));
 +        memcpy(h->zigzag_scan8x8_cavlc_q0 , zigzag_scan8x8_cavlc    , sizeof(h->zigzag_scan8x8_cavlc_q0));
 +        memcpy(h->field_scan_q0           , field_scan              , sizeof(h->field_scan_q0          ));
 +        memcpy(h->field_scan8x8_q0        , field_scan8x8           , sizeof(h->field_scan8x8_q0       ));
 +        memcpy(h->field_scan8x8_cavlc_q0  , field_scan8x8_cavlc     , sizeof(h->field_scan8x8_cavlc_q0 ));
      } else {
 -        h->zigzag_scan_q0          = h->zigzag_scan;
 -        h->zigzag_scan8x8_q0       = h->zigzag_scan8x8;
 -        h->zigzag_scan8x8_cavlc_q0 = h->zigzag_scan8x8_cavlc;
 -        h->field_scan_q0           = h->field_scan;
 -        h->field_scan8x8_q0        = h->field_scan8x8;
 -        h->field_scan8x8_cavlc_q0  = h->field_scan8x8_cavlc;
 +        memcpy(h->zigzag_scan_q0          , h->zigzag_scan          , sizeof(h->zigzag_scan_q0         ));
 +        memcpy(h->zigzag_scan8x8_q0       , h->zigzag_scan8x8       , sizeof(h->zigzag_scan8x8_q0      ));
 +        memcpy(h->zigzag_scan8x8_cavlc_q0 , h->zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0));
 +        memcpy(h->field_scan_q0           , h->field_scan           , sizeof(h->field_scan_q0          ));
 +        memcpy(h->field_scan8x8_q0        , h->field_scan8x8        , sizeof(h->field_scan8x8_q0       ));
 +        memcpy(h->field_scan8x8_cavlc_q0  , h->field_scan8x8_cavlc  , sizeof(h->field_scan8x8_cavlc_q0 ));
      }
  }
  
@@@ -2372,11 -2356,10 +2372,11 @@@ static int decode_slice_header(H264Cont
      MpegEncContext *const s0 = &h0->s;
      unsigned int first_mb_in_slice;
      unsigned int pps_id;
 -    int num_ref_idx_active_override_flag, max_refs;
 +    int num_ref_idx_active_override_flag;
      unsigned int slice_type, tmp, i, j;
      int default_ref_list_done = 0;
      int last_pic_structure, last_pic_dropable;
 +    int must_reinit;
  
      /* FIXME: 2tap qpel isn't implemented for high bit depth. */
      if ((s->avctx->flags2 & CODEC_FLAG2_FAST) &&
          s->me.qpel_avg = s->dsp.avg_h264_qpel_pixels_tab;
      }
  
 -    first_mb_in_slice = get_ue_golomb(&s->gb);
 +    first_mb_in_slice = get_ue_golomb_long(&s->gb);
  
      if (first_mb_in_slice == 0) { // FIXME better field boundary detection
          if (h0->current_slice && FIELD_PICTURE) {
      if (slice_type > 9) {
          av_log(h->s.avctx, AV_LOG_ERROR,
                 "slice type too large (%d) at %d %d\n",
 -               h->slice_type, s->mb_x, s->mb_y);
 +               slice_type, s->mb_x, s->mb_y);
          return -1;
      }
      if (slice_type > 4) {
  
      pps_id = get_ue_golomb(&s->gb);
      if (pps_id >= MAX_PPS_COUNT) {
 -        av_log(h->s.avctx, AV_LOG_ERROR, "pps_id out of range\n");
 +        av_log(h->s.avctx, AV_LOG_ERROR, "pps_id %d out of range\n", pps_id);
          return -1;
      }
      if (!h0->pps_buffers[pps_id]) {
      s->avctx->level   = h->sps.level_idc;
      s->avctx->refs    = h->sps.ref_frame_count;
  
 +    must_reinit = (s->context_initialized &&
 +                    (   16*h->sps.mb_width != s->avctx->coded_width
 +                     || 16*h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) != s->avctx->coded_height
 +                     || s->avctx->bits_per_raw_sample != h->sps.bit_depth_luma
 +                     || h->cur_chroma_format_idc != h->sps.chroma_format_idc
 +                     || av_cmp_q(h->sps.sar, s->avctx->sample_aspect_ratio)));
 +
 +    if(must_reinit && (h != h0 || (s->avctx->active_thread_type & FF_THREAD_FRAME))) {
 +        av_log_missing_feature(s->avctx,
 +                                "Width/height/bit depth/chroma idc changing with threads", 0);
 +        return AVERROR_PATCHWELCOME;   // width / height changed during parallelized decoding
 +    }
 +
      s->mb_width  = h->sps.mb_width;
      s->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
  
  
      s->chroma_y_shift = h->sps.chroma_format_idc <= 1; // 400 uses yuv420p
  
 -    s->width = 16 * s->mb_width - (2 >> CHROMA444) * FFMIN(h->sps.crop_right, (8 << CHROMA444) - 1);
 -    if (h->sps.frame_mbs_only_flag)
 -        s->height = 16 * s->mb_height - (1 << s->chroma_y_shift) * FFMIN(h->sps.crop_bottom, (16 >> s->chroma_y_shift) - 1);
 -    else
 -        s->height = 16 * s->mb_height - (2 << s->chroma_y_shift) * FFMIN(h->sps.crop_bottom, (16 >> s->chroma_y_shift) - 1);
 +    s->width  = 16 * s->mb_width;
 +    s->height = 16 * s->mb_height;
  
 -    if (FFALIGN(s->avctx->width,  16) == s->width &&
 -        FFALIGN(s->avctx->height, 16) == s->height) {
 -        s->width  = s->avctx->width;
 -        s->height = s->avctx->height;
 -    }
 -
 -    if (s->context_initialized &&
 -        (s->width != s->avctx->width || s->height != s->avctx->height ||
 -         av_cmp_q(h->sps.sar, s->avctx->sample_aspect_ratio))) {
 -        if (h != h0 || (HAVE_THREADS && h->s.avctx->active_thread_type & FF_THREAD_FRAME)) {
 -            av_log_missing_feature(s->avctx,
 -                                   "Width/height changing with threads", 0);
 -            return AVERROR_PATCHWELCOME;   // width / height changed during parallelized decoding
 -        }
 +    if(must_reinit) {
          free_tables(h, 0);
          flush_dpb(s->avctx);
          ff_MPV_common_end(s);
 +        h->list_count = 0;
 +        h->current_slice = 0;
      }
      if (!s->context_initialized) {
          if (h != h0) {
                     "Cannot (re-)initialize context during parallel decoding.\n");
              return -1;
          }
 -
 -        avcodec_set_dimensions(s->avctx, s->width, s->height);
 +        if(   FFALIGN(s->avctx->width , 16                                 ) == s->width
 +           && FFALIGN(s->avctx->height, 16*(2 - h->sps.frame_mbs_only_flag)) == s->height
 +           && !h->sps.crop_right && !h->sps.crop_bottom
 +           && (s->avctx->width != s->width || s->avctx->height && s->height)
 +        ) {
 +            av_log(h->s.avctx, AV_LOG_DEBUG, "Using externally provided dimensions\n");
 +            s->avctx->coded_width  = s->width;
 +            s->avctx->coded_height = s->height;
 +        } else{
 +            avcodec_set_dimensions(s->avctx, s->width, s->height);
 +            s->avctx->width  -= (2>>CHROMA444)*FFMIN(h->sps.crop_right, (8<<CHROMA444)-1);
 +            s->avctx->height -= (1<<s->chroma_y_shift)*FFMIN(h->sps.crop_bottom, (16>>s->chroma_y_shift)-1) * (2 - h->sps.frame_mbs_only_flag);
 +        }
          s->avctx->sample_aspect_ratio = h->sps.sar;
          av_assert0(s->avctx->sample_aspect_ratio.den);
  
 +        if (s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU
 +            && (h->sps.bit_depth_luma != 8 ||
 +                h->sps.chroma_format_idc > 1)) {
 +            av_log(s->avctx, AV_LOG_ERROR,
 +                   "VDPAU decoding does not support video "
 +                   "colorspace\n");
 +            return -1;
 +        }
 +
 +        if (s->avctx->bits_per_raw_sample != h->sps.bit_depth_luma ||
 +            h->cur_chroma_format_idc != h->sps.chroma_format_idc) {
 +            if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 14 && h->sps.bit_depth_luma != 11 && h->sps.bit_depth_luma != 13 &&
 +                (h->sps.bit_depth_luma != 9 || !CHROMA422)) {
 +                s->avctx->bits_per_raw_sample = h->sps.bit_depth_luma;
 +                h->cur_chroma_format_idc = h->sps.chroma_format_idc;
 +                h->pixel_shift = h->sps.bit_depth_luma > 8;
 +
 +                ff_h264dsp_init(&h->h264dsp, h->sps.bit_depth_luma, h->sps.chroma_format_idc);
 +                ff_h264_pred_init(&h->hpc, s->codec_id, h->sps.bit_depth_luma, h->sps.chroma_format_idc);
 +                s->dsp.dct_bits = h->sps.bit_depth_luma > 8 ? 32 : 16;
 +                ff_dsputil_init(&s->dsp, s->avctx);
 +            } else {
 +                av_log(s->avctx, AV_LOG_ERROR, "Unsupported bit depth: %d chroma_idc: %d\n",
 +                       h->sps.bit_depth_luma, h->sps.chroma_format_idc);
 +                return -1;
 +            }
 +        }
 +
          if (h->sps.video_signal_type_present_flag) {
 -            s->avctx->color_range = h->sps.full_range ? AVCOL_RANGE_JPEG
 +            s->avctx->color_range = h->sps.full_range>0 ? AVCOL_RANGE_JPEG
                                                        : AVCOL_RANGE_MPEG;
              if (h->sps.colour_description_present_flag) {
                  s->avctx->color_primaries = h->sps.color_primaries;
              else
                  s->avctx->pix_fmt = AV_PIX_FMT_YUV420P10;
              break;
 -        case 8:
 +        case 12:
 +            if (CHROMA444) {
 +                if (s->avctx->colorspace == AVCOL_SPC_RGB) {
 +                    s->avctx->pix_fmt = AV_PIX_FMT_GBRP12;
 +                } else
 +                    s->avctx->pix_fmt = AV_PIX_FMT_YUV444P12;
 +            } else if (CHROMA422)
 +                s->avctx->pix_fmt = AV_PIX_FMT_YUV422P12;
 +            else
 +                s->avctx->pix_fmt = AV_PIX_FMT_YUV420P12;
 +            break;
 +        case 14:
              if (CHROMA444) {
                  if (s->avctx->colorspace == AVCOL_SPC_RGB) {
 -                    s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
 +                    s->avctx->pix_fmt = AV_PIX_FMT_GBRP14;
                  } else
 +                    s->avctx->pix_fmt = AV_PIX_FMT_YUV444P14;
 +            } else if (CHROMA422)
 +                s->avctx->pix_fmt = AV_PIX_FMT_YUV422P14;
 +            else
 +                s->avctx->pix_fmt = AV_PIX_FMT_YUV420P14;
 +            break;
 +        case 8:
 +            if (CHROMA444) {
                      s->avctx->pix_fmt = s->avctx->color_range == AVCOL_RANGE_JPEG ? AV_PIX_FMT_YUVJ444P
                                                                                    : AV_PIX_FMT_YUV444P;
 +                    if (s->avctx->colorspace == AVCOL_SPC_RGB) {
 +                        s->avctx->pix_fmt = AV_PIX_FMT_GBR24P;
 +                        av_log(h->s.avctx, AV_LOG_DEBUG, "Detected GBR colorspace.\n");
 +                    } else if (s->avctx->colorspace == AVCOL_SPC_YCGCO) {
 +                        av_log(h->s.avctx, AV_LOG_WARNING, "Detected unsupported YCgCo colorspace.\n");
 +                    }
              } else if (CHROMA422) {
                  s->avctx->pix_fmt = s->avctx->color_range == AVCOL_RANGE_JPEG ? AV_PIX_FMT_YUVJ422P
                                                                                : AV_PIX_FMT_YUV422P;
                  c->sps         = h->sps;
                  c->pps         = h->pps;
                  c->pixel_shift = h->pixel_shift;
 +                c->cur_chroma_format_idc = h->cur_chroma_format_idc;
                  init_scan_tables(c);
                  clone_tables(c, h, i);
              }
      if (h->sps.frame_mbs_only_flag) {
          s->picture_structure = PICT_FRAME;
      } else {
 +        if (!h->sps.direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) {
 +            av_log(h->s.avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n");
 +            return -1;
 +        }
          if (get_bits1(&s->gb)) { // field_pic_flag
              s->picture_structure = PICT_TOP_FIELD + get_bits1(&s->gb); // bottom_field_flag
          } else {
      } else {
          /* Shorten frame num gaps so we don't have to allocate reference
           * frames just to throw them away */
 -        if (h->frame_num != h->prev_frame_num) {
 +        if (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0) {
              int unwrap_prev_frame_num = h->prev_frame_num;
              int max_frame_num         = 1 << h->sps.log2_max_frame_num;
  
              }
          }
  
 -        while (h->frame_num != h->prev_frame_num &&
 +        while (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0 &&
                 h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) {
              Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
              av_log(h->s.avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
                  s0->first_field         = FIELD_PICTURE;
              } else {
                  if (s0->current_picture_ptr->frame_num != h->frame_num) {
 +                    ff_thread_report_progress((AVFrame*)s0->current_picture_ptr, INT_MAX,
 +                                              s0->picture_structure==PICT_BOTTOM_FIELD);
                      /* This and the previous field had different frame_nums.
                       * Consider this field first in pair. Throw away previous
                       * one except for reference purposes. */
  
      s->current_picture_ptr->frame_num = h->frame_num; // FIXME frame_num cleanup
  
 -    assert(s->mb_num == s->mb_width * s->mb_height);
 +    av_assert1(s->mb_num == s->mb_width * s->mb_height);
      if (first_mb_in_slice << FIELD_OR_MBAFF_PICTURE >= s->mb_num ||
          first_mb_in_slice >= s->mb_num) {
          av_log(h->s.avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
      s->resync_mb_y = s->mb_y = (first_mb_in_slice / s->mb_width) << FIELD_OR_MBAFF_PICTURE;
      if (s->picture_structure == PICT_BOTTOM_FIELD)
          s->resync_mb_y = s->mb_y = s->mb_y + 1;
 -    assert(s->mb_y < s->mb_height);
 +    av_assert1(s->mb_y < s->mb_height);
  
      if (s->picture_structure == PICT_FRAME) {
          h->curr_pic_num = h->frame_num;
      h->ref_count[1] = h->pps.ref_count[1];
  
      if (h->slice_type_nos != AV_PICTURE_TYPE_I) {
 +        unsigned max[2];
 +        max[0] = max[1] = s->picture_structure == PICT_FRAME ? 15 : 31;
 +
          if (h->slice_type_nos == AV_PICTURE_TYPE_B)
              h->direct_spatial_mv_pred = get_bits1(&s->gb);
          num_ref_idx_active_override_flag = get_bits1(&s->gb);
  
          if (num_ref_idx_active_override_flag) {
              h->ref_count[0] = get_ue_golomb(&s->gb) + 1;
 -            if (h->ref_count[0] < 1)
 -                return AVERROR_INVALIDDATA;
              if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
                  h->ref_count[1] = get_ue_golomb(&s->gb) + 1;
 -                if (h->ref_count[1] < 1)
 -                    return AVERROR_INVALIDDATA;
 -            }
 +            } else
 +                // full range is spec-ok in this case, even for frames
 +                h->ref_count[1] = 1;
 +        }
 +
 +        if (h->ref_count[0]-1 > max[0] || h->ref_count[1]-1 > max[1]){
 +            av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow %u > %u or %u > %u\n", h->ref_count[0]-1, max[0], h->ref_count[1]-1, max[1]);
 +            h->ref_count[0] = h->ref_count[1] = 1;
 +            return AVERROR_INVALIDDATA;
          }
  
          if (h->slice_type_nos == AV_PICTURE_TYPE_B)
          else
              h->list_count = 1;
      } else
 -        h->list_count = 0;
 -
 -    max_refs = s->picture_structure == PICT_FRAME ? 16 : 32;
 -
 -    if (h->ref_count[0] > max_refs || h->ref_count[1] > max_refs) {
 -        av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow\n");
 -        h->ref_count[0] = h->ref_count[1] = 1;
 -        return AVERROR_INVALIDDATA;
 -    }
 +        h->ref_count[1]= h->ref_count[0]= h->list_count= 0;
  
      if (!default_ref_list_done)
          ff_h264_fill_default_ref_list(h);
  
      if (h->slice_type_nos != AV_PICTURE_TYPE_I) {
          s->last_picture_ptr = &h->ref_list[0][0];
+         s->last_picture_ptr->owner2 = s;
          ff_copy_picture(&s->last_picture, s->last_picture_ptr);
      }
      if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
          s->next_picture_ptr = &h->ref_list[1][0];
+         s->next_picture_ptr->owner2 = s;
          ff_copy_picture(&s->next_picture, s->next_picture_ptr);
      }
  
  
      h0->last_slice_type = slice_type;
      h->slice_num = ++h0->current_slice;
 -    if (h->slice_num >= MAX_SLICES) {
 -        av_log(s->avctx, AV_LOG_ERROR,
 -               "Too many slices, increase MAX_SLICES and recompile\n");
 +
 +    if (h->slice_num)
 +        h0->slice_row[(h->slice_num-1)&(MAX_SLICES-1)]= s->resync_mb_y;
 +    if (   h0->slice_row[h->slice_num&(MAX_SLICES-1)] + 3 >= s->resync_mb_y
 +        && h0->slice_row[h->slice_num&(MAX_SLICES-1)] <= s->resync_mb_y
 +        && h->slice_num >= MAX_SLICES) {
 +        //in case of ASO this check needs to be updated depending on how we decide to assign slice numbers in this case
 +        av_log(s->avctx, AV_LOG_WARNING, "Possibly too many slices (%d >= %d), increase MAX_SLICES and recompile if there are artifacts\n", h->slice_num, MAX_SLICES);
      }
  
      for (j = 0; j < 2; j++) {
@@@ -3197,7 -3108,7 +3199,7 @@@ static av_always_inline void fill_filte
          if (USES_LIST(top_type, list)) {
              const int b_xy  = h->mb2b_xy[top_xy] + 3 * b_stride;
              const int b8_xy = 4 * top_xy + 2;
 -            int (*ref2frm)[64] = h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2);
 +            int (*ref2frm)[64] = (void*)(h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2));
              AV_COPY128(mv_dst - 1 * 8, s->current_picture.f.motion_val[list][b_xy + 0]);
              ref_cache[0 - 1 * 8] =
              ref_cache[1 - 1 * 8] = ref2frm[list][s->current_picture.f.ref_index[list][b8_xy + 0]];
              if (USES_LIST(left_type[LTOP], list)) {
                  const int b_xy  = h->mb2b_xy[left_xy[LTOP]] + 3;
                  const int b8_xy = 4 * left_xy[LTOP] + 1;
 -                int (*ref2frm)[64] = h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2);
 +                int (*ref2frm)[64] =(void*)( h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2));
                  AV_COPY32(mv_dst - 1 +  0, s->current_picture.f.motion_val[list][b_xy + b_stride * 0]);
                  AV_COPY32(mv_dst - 1 +  8, s->current_picture.f.motion_val[list][b_xy + b_stride * 1]);
                  AV_COPY32(mv_dst - 1 + 16, s->current_picture.f.motion_val[list][b_xy + b_stride * 2]);
  
      {
          int8_t *ref = &s->current_picture.f.ref_index[list][4 * mb_xy];
 -        int (*ref2frm)[64] = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2);
 +        int (*ref2frm)[64] = (void*)(h->ref2frm[h->slice_num & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2));
          uint32_t ref01 = (pack16to32(ref2frm[list][ref[0]], ref2frm[list][ref[1]]) & 0x00FF00FF) * 0x0101;
          uint32_t ref23 = (pack16to32(ref2frm[list][ref[2]], ref2frm[list][ref[3]]) & 0x00FF00FF) * 0x0101;
          AV_WN32A(&ref_cache[0 * 8], ref01);
@@@ -3548,6 -3459,7 +3550,6 @@@ static int decode_slice(struct AVCodecC
          align_get_bits(&s->gb);
  
          /* init cabac */
 -        ff_init_cabac_states(&h->cabac);
          ff_init_cabac_decoder(&h->cabac,
                                s->gb.buffer + get_bits_count(&s->gb) / 8,
                                (get_bits_left(&s->gb) + 7) / 8);
                      loop_filter(h, lf_x_start, s->mb_x + 1);
                  return 0;
              }
 -            if (ret < 0 || h->cabac.bytestream > h->cabac.bytestream_end + 2) {
 +            if (h->cabac.bytestream > h->cabac.bytestream_end + 2 )
 +                av_log(h->s.avctx, AV_LOG_DEBUG, "bytestream overread %td\n", h->cabac.bytestream_end - h->cabac.bytestream);
 +            if (ret < 0 || h->cabac.bytestream > h->cabac.bytestream_end + 4) {
                  av_log(h->s.avctx, AV_LOG_ERROR,
                         "error while decoding MB %d %d, bytestream (%td)\n",
                         s->mb_x, s->mb_y,
                      tprintf(s->avctx, "slice end %d %d\n",
                              get_bits_count(&s->gb), s->gb.size_in_bits);
  
 -                    if (get_bits_left(&s->gb) == 0) {
 +                    if (   get_bits_left(&s->gb) == 0
 +                        || get_bits_left(&s->gb) > 0 && !(s->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
                          ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y,
                                          s->mb_x - 1, s->mb_y,
                                          ER_MB_END & part_mask);
                          return 0;
                      } else {
                          ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y,
 -                                        s->mb_x - 1, s->mb_y,
 +                                        s->mb_x, s->mb_y,
                                          ER_MB_END & part_mask);
  
                          return -1;
@@@ -3718,7 -3627,6 +3720,7 @@@ static int execute_decode_slices(H264Co
              hx                    = h->thread_context[i];
              hx->s.err_recognition = avctx->err_recognition;
              hx->s.error_count     = 0;
 +            hx->x264_build        = h->x264_build;
          }
  
          avctx->execute(avctx, decode_slice, h->thread_context,
@@@ -3749,10 -3657,6 +3751,10 @@@ static int decode_nal_units(H264Contex
      int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts
      int nal_index;
  
 +    h->nal_unit_type= 0;
 +
 +    if(!s->slice_context_count)
 +         s->slice_context_count= 1;
      h->max_contexts = s->slice_context_count;
      if (!(s->flags2 & CODEC_FLAG2_CHUNKS)) {
          h->current_slice = 0;
                  s->workaround_bugs |= FF_BUG_TRUNCATED;
  
              if (!(s->workaround_bugs & FF_BUG_TRUNCATED))
 -                while (ptr[dst_length - 1] == 0 && dst_length > 0)
 +                while(dst_length > 0 && ptr[dst_length - 1] == 0)
                      dst_length--;
              bit_length = !dst_length ? 0
                                       : (8 * dst_length -
                                          decode_rbsp_trailing(h, ptr + dst_length - 1));
  
              if (s->avctx->debug & FF_DEBUG_STARTCODE)
 -                av_log(h->s.avctx, AV_LOG_DEBUG,
 -                       "NAL %d at %d/%d length %d\n",
 -                       hx->nal_unit_type, buf_index, buf_size, dst_length);
 +                av_log(h->s.avctx, AV_LOG_DEBUG, "NAL %d/%d at %d/%d length %d pass %d\n", hx->nal_unit_type, hx->nal_ref_idc, buf_index, buf_size, dst_length, pass);
  
              if (h->is_avc && (nalsize != consumed) && nalsize)
                  av_log(h->s.avctx, AV_LOG_DEBUG,
  
  again:
              err = 0;
 +
 +            if (h->decoding_extradata) {
 +                switch (hx->nal_unit_type) {
 +                case NAL_IDR_SLICE:
 +                case NAL_SLICE:
 +                case NAL_DPA:
 +                case NAL_DPB:
 +                case NAL_DPC:
 +                case NAL_AUXILIARY_SLICE:
 +                    av_log(h->s.avctx, AV_LOG_WARNING, "Ignoring NAL %d in global header\n", hx->nal_unit_type);
 +                    hx->nal_unit_type = NAL_FILLER_DATA;
 +                }
 +            }
 +
              switch (hx->nal_unit_type) {
              case NAL_IDR_SLICE:
                  if (h->nal_unit_type != NAL_IDR_SLICE) {
                  if ((err = decode_slice_header(hx, h)))
                      break;
  
 +                if (h->sei_recovery_frame_cnt >= 0 && (h->frame_num != h->sei_recovery_frame_cnt || hx->slice_type_nos != AV_PICTURE_TYPE_I))
 +                    h->valid_recovery_point = 1;
 +
 +                if (   h->sei_recovery_frame_cnt >= 0
 +                    && (   h->recovery_frame<0
 +                        || ((h->recovery_frame - h->frame_num) & ((1 << h->sps.log2_max_frame_num)-1)) > h->sei_recovery_frame_cnt)) {
 +                    h->recovery_frame = (h->frame_num + h->sei_recovery_frame_cnt) %
 +                                        (1 << h->sps.log2_max_frame_num);
 +
 +                    if (!h->valid_recovery_point)
 +                        h->recovery_frame = h->frame_num;
 +                }
 +
                  s->current_picture_ptr->f.key_frame |=
 -                    (hx->nal_unit_type == NAL_IDR_SLICE) ||
 -                    (h->sei_recovery_frame_cnt >= 0);
 +                        (hx->nal_unit_type == NAL_IDR_SLICE);
 +
 +                if (h->recovery_frame == h->frame_num) {
 +                    s->current_picture_ptr->sync |= 1;
 +                    h->recovery_frame = -1;
 +                }
 +
 +                h->sync |= !!s->current_picture_ptr->f.key_frame;
 +                h->sync |= 3*!!(s->flags2 & CODEC_FLAG2_SHOW_ALL);
 +                s->current_picture_ptr->sync |= h->sync;
  
                  if (h->current_slice == 1) {
                      if (!(s->flags2 & CODEC_FLAG2_CHUNKS))
                  init_get_bits(&hx->inter_gb, ptr, bit_length);
                  hx->inter_gb_ptr = &hx->inter_gb;
  
 +                av_log(h->s.avctx, AV_LOG_ERROR, "Partitioned H.264 support is incomplete\n");
 +                break;
 +
                  if (hx->redundant_pic_count == 0 &&
                      hx->intra_gb_ptr &&
                      hx->s.data_partitioning &&
                  break;
              case NAL_SPS:
                  init_get_bits(&s->gb, ptr, bit_length);
 -                if (ff_h264_decode_seq_parameter_set(h) < 0 &&
 -                    h->is_avc && (nalsize != consumed) && nalsize) {
 +                if (ff_h264_decode_seq_parameter_set(h) < 0 && (h->is_avc ? (nalsize != consumed) && nalsize : 1)) {
                      av_log(h->s.avctx, AV_LOG_DEBUG,
                             "SPS decoding failure, trying again with the complete NAL\n");
 -                    init_get_bits(&s->gb, buf + buf_index + 1 - consumed,
 -                                  8 * (nalsize - 1));
 +                    if (h->is_avc)
 +                        av_assert0(next_avc - buf_index + consumed == nalsize);
 +                    init_get_bits(&s->gb, &buf[buf_index + 1 - consumed],
 +                                  8*(next_avc - buf_index + consumed - 1));
                      ff_h264_decode_seq_parameter_set(h);
                  }
  
  
                  if (avctx->has_b_frames < 2)
                      avctx->has_b_frames = !s->low_delay;
 -
 -                if (avctx->bits_per_raw_sample != h->sps.bit_depth_luma ||
 -                    h->cur_chroma_format_idc   != h->sps.chroma_format_idc) {
 -                    if (s->avctx->codec &&
 -                        s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU
 -                        && (h->sps.bit_depth_luma != 8 ||
 -                            h->sps.chroma_format_idc > 1)) {
 -                        av_log(avctx, AV_LOG_ERROR,
 -                               "VDPAU decoding does not support video "
 -                               "colorspace\n");
 -                        buf_index = -1;
 -                        goto end;
 -                    }
 -                    if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 10) {
 -                        avctx->bits_per_raw_sample = h->sps.bit_depth_luma;
 -                        h->cur_chroma_format_idc   = h->sps.chroma_format_idc;
 -                        h->pixel_shift             = h->sps.bit_depth_luma > 8;
 -
 -                        ff_h264dsp_init(&h->h264dsp, h->sps.bit_depth_luma,
 -                                        h->sps.chroma_format_idc);
 -                        ff_h264_pred_init(&h->hpc, s->codec_id,
 -                                          h->sps.bit_depth_luma,
 -                                          h->sps.chroma_format_idc);
 -                        s->dsp.dct_bits = h->sps.bit_depth_luma > 8 ? 32 : 16;
 -                        ff_dsputil_init(&s->dsp, s->avctx);
 -                    } else {
 -                        av_log(avctx, AV_LOG_ERROR,
 -                               "Unsupported bit depth: %d\n",
 -                               h->sps.bit_depth_luma);
 -                        buf_index = -1;
 -                        goto end;
 -                    }
 -                }
                  break;
              case NAL_PPS:
                  init_get_bits(&s->gb, ptr, bit_length);
@@@ -4090,15 -3990,15 +4092,15 @@@ static int decode_frame(AVCodecContext 
      MpegEncContext *s  = &h->s;
      AVFrame *pict      = data;
      int buf_index      = 0;
 +    Picture *out;
 +    int i, out_idx;
  
      s->flags  = avctx->flags;
      s->flags2 = avctx->flags2;
  
      /* end of stream, output what is still in the buffers */
 -out:
      if (buf_size == 0) {
 -        Picture *out;
 -        int i, out_idx;
 + out:
  
          s->current_picture_ptr = NULL;
  
  
          return buf_index;
      }
 +    if(h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC && (buf[5]&0x1F) && buf[8]==0x67){
 +        int cnt= buf[5]&0x1f;
 +        const uint8_t *p= buf+6;
 +        while(cnt--){
 +            int nalsize= AV_RB16(p) + 2;
 +            if(nalsize > buf_size - (p-buf) || p[2]!=0x67)
 +                goto not_extra;
 +            p += nalsize;
 +        }
 +        cnt = *(p++);
 +        if(!cnt)
 +            goto not_extra;
 +        while(cnt--){
 +            int nalsize= AV_RB16(p) + 2;
 +            if(nalsize > buf_size - (p-buf) || p[2]!=0x68)
 +                goto not_extra;
 +            p += nalsize;
 +        }
 +
 +        return ff_h264_decode_extradata(h, buf, buf_size);
 +    }
 +not_extra:
  
      buf_index = decode_nal_units(h, buf, buf_size);
      if (buf_index < 0)
          return -1;
  
      if (!s->current_picture_ptr && h->nal_unit_type == NAL_END_SEQUENCE) {
 -        buf_size = 0;
 +        av_assert0(buf_index <= buf_size);
          goto out;
      }
  
      if (!(s->flags2 & CODEC_FLAG2_CHUNKS) && !s->current_picture_ptr) {
 -        if (avctx->skip_frame >= AVDISCARD_NONREF)
 -            return 0;
 +        if (avctx->skip_frame >= AVDISCARD_NONREF ||
 +            buf_size >= 4 && !memcmp("Q264", buf, 4))
 +            return buf_size;
          av_log(avctx, AV_LOG_ERROR, "no frame!\n");
          return -1;
      }
  
          field_end(h, 0);
  
 -        if (!h->next_output_pic) {
 -            /* Wait for second field. */
 -            *data_size = 0;
 -        } else {
 +        /* Wait for second field. */
 +        *data_size = 0;
 +        if (h->next_output_pic && (h->next_output_pic->sync || h->sync>1)) {
              *data_size = sizeof(AVFrame);
              *pict      = h->next_output_pic->f;
          }
@@@ -4204,7 -4082,6 +4206,7 @@@ static av_cold int h264_decode_end(AVCo
      H264Context *h    = avctx->priv_data;
      MpegEncContext *s = &h->s;
  
 +    ff_h264_remove_all_refs(h);
      ff_h264_free_context(h);
  
      ff_MPV_common_end(s);
@@@ -4231,26 -4108,6 +4233,26 @@@ static const AVProfile profiles[] = 
      { FF_PROFILE_UNKNOWN },
  };
  
 +static const AVOption h264_options[] = {
 +    {"is_avc", "is avc", offsetof(H264Context, is_avc), FF_OPT_TYPE_INT, {.i64 = 0}, 0, 1, 0},
 +    {"nal_length_size", "nal_length_size", offsetof(H264Context, nal_length_size), FF_OPT_TYPE_INT, {.i64 = 0}, 0, 4, 0},
 +    {NULL}
 +};
 +
 +static const AVClass h264_class = {
 +    .class_name = "H264 Decoder",
 +    .item_name  = av_default_item_name,
 +    .option     = h264_options,
 +    .version    = LIBAVUTIL_VERSION_INT,
 +};
 +
 +static const AVClass h264_vdpau_class = {
 +    .class_name = "H264 VDPAU Decoder",
 +    .item_name  = av_default_item_name,
 +    .option     = h264_options,
 +    .version    = LIBAVUTIL_VERSION_INT,
 +};
 +
  AVCodec ff_h264_decoder = {
      .name                  = "h264",
      .type                  = AVMEDIA_TYPE_VIDEO,
      .init_thread_copy      = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
      .update_thread_context = ONLY_IF_THREADS_ENABLED(decode_update_thread_context),
      .profiles              = NULL_IF_CONFIG_SMALL(profiles),
 +    .priv_class            = &h264_class,
  };
  
  #if CONFIG_H264_VDPAU_DECODER
@@@ -4285,6 -4141,5 +4287,6 @@@ AVCodec ff_h264_vdpau_decoder = 
      .pix_fmts       = (const enum AVPixelFormat[]) { AV_PIX_FMT_VDPAU_H264,
                                                     AV_PIX_FMT_NONE},
      .profiles       = NULL_IF_CONFIG_SMALL(profiles),
 +    .priv_class     = &h264_vdpau_class,
  };
  #endif
diff --combined libavcodec/mpegvideo.c
@@@ -5,20 -5,20 +5,20 @@@
   *
   * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
   *
 - * This file is part of Libav.
 + * This file is part of FFmpeg.
   *
 - * Libav is free software; you can redistribute it and/or
 + * FFmpeg is free software; you can redistribute it and/or
   * modify it under the terms of the GNU Lesser General Public
   * License as published by the Free Software Foundation; either
   * version 2.1 of the License, or (at your option) any later version.
   *
 - * Libav is distributed in the hope that it will be useful,
 + * FFmpeg is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   * Lesser General Public License for more details.
   *
   * You should have received a copy of the GNU Lesser General Public
 - * License along with Libav; if not, write to the Free Software
 + * License along with FFmpeg; if not, write to the Free Software
   * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
   */
  
@@@ -138,9 -138,9 +138,9 @@@ const enum AVPixelFormat ff_hwaccel_pix
      AV_PIX_FMT_NONE
  };
  
 -const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
 +const uint8_t *avpriv_mpv_find_start_code(const uint8_t *av_restrict p,
                                            const uint8_t *end,
 -                                          uint32_t * restrict state)
 +                                          uint32_t *av_restrict state)
  {
      int i;
  
@@@ -422,12 -422,12 +422,12 @@@ static int init_duplicate_context(MpegE
      // edge emu needs blocksize + filter length - 1
      // (= 17x17 for  halfpel / 21x21 for  h264)
      FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
 -                      (s->width + 64) * 2 * 21 * 2, fail);    // (width + edge + align)*interlaced*MBsize*tolerance
 +                      (s->width + 95) * 2 * 21 * 4, fail);    // (width + edge + align)*interlaced*MBsize*tolerance
  
      // FIXME should be linesize instead of s->width * 2
      // but that is not known before get_buffer()
      FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
 -                      (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
 +                      (s->width + 95) * 4 * 16 * 2 * sizeof(uint8_t), fail)
      s->me.temp         = s->me.scratchpad;
      s->rd_scratchpad   = s->me.scratchpad;
      s->b_scratchpad    = s->me.scratchpad;
@@@ -530,32 -530,27 +530,32 @@@ int ff_mpeg_update_thread_context(AVCod
                                    const AVCodecContext *src)
  {
      int i;
 +    int err;
      MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
  
 -    if (dst == src || !s1->context_initialized)
 +    if (dst == src)
          return 0;
  
 +    av_assert0(s != s1);
 +
      // FIXME can parameters change on I-frames?
      // in that case dst may need a reinit
      if (!s->context_initialized) {
          memcpy(s, s1, sizeof(MpegEncContext));
  
          s->avctx                 = dst;
 -        s->picture_range_start  += MAX_PICTURE_COUNT;
 -        s->picture_range_end    += MAX_PICTURE_COUNT;
          s->bitstream_buffer      = NULL;
          s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
  
 -        ff_MPV_common_init(s);
 +        if (s1->context_initialized){
 +            s->picture_range_start  += MAX_PICTURE_COUNT;
 +            s->picture_range_end    += MAX_PICTURE_COUNT;
 +            if((err = ff_MPV_common_init(s)) < 0)
 +                return err;
 +        }
      }
  
      if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
 -        int err;
          s->context_reinit = 0;
          s->height = s1->height;
          s->width  = s1->width;
      s->picture_number       = s1->picture_number;
      s->input_picture_number = s1->input_picture_number;
  
 +    av_assert0(!s->picture || s->picture != s1->picture);
      memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
      memcpy(&s->last_picture, &s1->last_picture,
             (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
      // Error/bug resilience
      s->next_p_frame_damaged = s1->next_p_frame_damaged;
      s->workaround_bugs      = s1->workaround_bugs;
 +    s->padding_bug_score    = s1->padding_bug_score;
  
      // MPEG4 timing info
      memcpy(&s->time_increment_bits, &s1->time_increment_bits,
@@@ -704,32 -697,44 +704,32 @@@ static int init_context_frame(MpegEncCo
      c_size  = s->mb_stride * (s->mb_height + 1);
      yc_size = y_size + 2   * c_size;
  
 -    FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
 -                      fail); // error ressilience code looks cleaner with this
 +    FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
      for (y = 0; y < s->mb_height; y++)
          for (x = 0; x < s->mb_width; x++)
              s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
  
 -    s->mb_index2xy[s->mb_height * s->mb_width] =
 -        (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
 +    s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
  
      if (s->encoding) {
          /* Allocate MV tables */
 -        FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
 -                          mv_table_size * 2 * sizeof(int16_t), fail);
 -        FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
 -                          mv_table_size * 2 * sizeof(int16_t), fail);
 -        FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
 -                          mv_table_size * 2 * sizeof(int16_t), fail);
 -        FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
 -                          mv_table_size * 2 * sizeof(int16_t), fail);
 -        FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
 -                          mv_table_size * 2 * sizeof(int16_t), fail);
 -        FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
 -                          mv_table_size * 2 * sizeof(int16_t), fail);
 +        FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,                 mv_table_size * 2 * sizeof(int16_t), fail)
 +        FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,            mv_table_size * 2 * sizeof(int16_t), fail)
 +        FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,            mv_table_size * 2 * sizeof(int16_t), fail)
 +        FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,      mv_table_size * 2 * sizeof(int16_t), fail)
 +        FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,      mv_table_size * 2 * sizeof(int16_t), fail)
 +        FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,          mv_table_size * 2 * sizeof(int16_t), fail)
          s->p_mv_table            = s->p_mv_table_base + s->mb_stride + 1;
          s->b_forw_mv_table       = s->b_forw_mv_table_base + s->mb_stride + 1;
          s->b_back_mv_table       = s->b_back_mv_table_base + s->mb_stride + 1;
 -        s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
 -                                   s->mb_stride + 1;
 -        s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
 -                                   s->mb_stride + 1;
 +        s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
 +        s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
          s->b_direct_mv_table     = s->b_direct_mv_table_base + s->mb_stride + 1;
  
          /* Allocate MB type table */
 -        FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
 -                          sizeof(uint16_t), fail); // needed for encoding
 +        FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
  
 -        FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
 -                          sizeof(int), fail);
 +        FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
  
          FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
                           mb_array_size * sizeof(float), fail);
      FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
                        mb_array_size * sizeof(uint8_t), fail);
  
 -    if (s->codec_id == AV_CODEC_ID_MPEG4 ||
 -        (s->flags & CODEC_FLAG_INTERLACED_ME)) {
 +    if (s->codec_id == AV_CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)) {
          /* interlaced direct mode decoding tables */
          for (i = 0; i < 2; i++) {
              int j, k;
                      s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
                                                     s->mb_stride + 1;
                  }
 -                FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
 -                                  mb_array_size * 2 * sizeof(uint8_t), fail);
 -                FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
 -                                  mv_table_size * 2 * sizeof(int16_t), fail);
 -                s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
 -                                            + s->mb_stride + 1;
 +                FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
 +                FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
 +                s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
              }
 -            FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
 -                              mb_array_size * 2 * sizeof(uint8_t), fail);
 +            FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
          }
      }
      if (s->out_format == FMT_H263) {
          s->coded_block = s->coded_block_base + s->b8_stride + 1;
  
          /* cbp, ac_pred, pred_dir */
 -        FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
 -                          mb_array_size * sizeof(uint8_t), fail);
 -        FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
 -                          mb_array_size * sizeof(uint8_t), fail);
 +        FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table     , mb_array_size * sizeof(uint8_t), fail);
 +        FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
      }
  
      if (s->h263_pred || s->h263_plus || !s->encoding) {
          /* dc values */
          // MN: we need these for  error resilience of intra-frames
 -        FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
 -                          yc_size * sizeof(int16_t), fail);
 +        FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
          s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
          s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
          s->dc_val[2] = s->dc_val[1] + c_size;
      FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
      // Note the + 1 is for  a quicker mpeg4 slice_end detection
  
 -    if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
 -        s->avctx->debug_mv) {
 -        s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
 -                    2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
 -        s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
 -                    2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
 -        s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
 -                    2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
 -    }
 -
      return 0;
  fail:
      return AVERROR(ENOMEM);
@@@ -842,34 -865,44 +842,34 @@@ av_cold int ff_MPV_common_init(MpegEncC
      s->flags  = s->avctx->flags;
      s->flags2 = s->avctx->flags2;
  
 -    if (s->width && s->height) {
 -        /* set chroma shifts */
 -        av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
 -                                         &s->chroma_x_shift,
 -                                         &s->chroma_y_shift);
 +    /* set chroma shifts */
 +    avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);
  
 -        /* convert fourcc to upper case */
 -        s->codec_tag          = avpriv_toupper4(s->avctx->codec_tag);
 +    /* convert fourcc to upper case */
 +    s->codec_tag        = avpriv_toupper4(s->avctx->codec_tag);
 +    s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
  
 -        s->stream_codec_tag   = avpriv_toupper4(s->avctx->stream_codec_tag);
 +    s->avctx->coded_frame = &s->current_picture.f;
 +
 +    if (s->encoding) {
 +        if (s->msmpeg4_version) {
 +            FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
 +                                2 * 2 * (MAX_LEVEL + 1) *
 +                                (MAX_RUN + 1) * 2 * sizeof(int), fail);
 +        }
 +        FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
  
 -        s->avctx->coded_frame = &s->current_picture.f;
 +        FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,          64 * 32   * sizeof(int), fail)
 +        FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix,   64 * 32   * sizeof(int), fail)
 +        FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,          64 * 32   * sizeof(int), fail)
 +        FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,        64 * 32 * 2 * sizeof(uint16_t), fail)
 +        FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
 +        FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,        64 * 32 * 2 * sizeof(uint16_t), fail)
 +        FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,           MAX_PICTURE_COUNT * sizeof(Picture *), fail)
 +        FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
  
 -        if (s->encoding) {
 -            if (s->msmpeg4_version) {
 -                FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
 -                                  2 * 2 * (MAX_LEVEL + 1) *
 -                                  (MAX_RUN + 1) * 2 * sizeof(int), fail);
 -            }
 -            FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
 -
 -            FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
 -                              64 * 32   * sizeof(int), fail);
 -            FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
 -                              64 * 32   * sizeof(int), fail);
 -            FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
 -                              64 * 32 * 2 * sizeof(uint16_t), fail);
 -            FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
 -                              64 * 32 * 2 * sizeof(uint16_t), fail);
 -            FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
 -                              MAX_PICTURE_COUNT * sizeof(Picture *), fail);
 -            FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
 -                              MAX_PICTURE_COUNT * sizeof(Picture *), fail);
 -
 -            if (s->avctx->noise_reduction) {
 -                FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
 -                                  2 * 64 * sizeof(uint16_t), fail);
 -            }
 +        if (s->avctx->noise_reduction) {
 +            FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail);
          }
      }
  
          avcodec_get_frame_defaults(&s->picture[i].f);
      }
  
 -    if (s->width && s->height) {
          if (init_context_frame(s))
              goto fail;
  
          s->parse_context.state = -1;
 -    }
  
 -    s->context_initialized = 1;
 -    s->thread_context[0]   = s;
 +        s->context_initialized = 1;
 +        s->thread_context[0]   = s;
  
 -    if (s->width && s->height) {
 +//     if (s->width && s->height) {
          if (nb_slices > 1) {
              for (i = 1; i < nb_slices; i++) {
                  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
              s->end_mb_y   = s->mb_height;
          }
          s->slice_context_count = nb_slices;
 -    }
 +//     }
  
      return 0;
   fail:
@@@ -965,7 -1000,6 +965,7 @@@ static int free_context_frame(MpegEncCo
      av_freep(&s->er_temp_buffer);
      av_freep(&s->mb_index2xy);
      av_freep(&s->lambda_table);
 +
      av_freep(&s->cplx_tab);
      av_freep(&s->bits_tab);
  
@@@ -1075,10 -1109,6 +1075,10 @@@ void ff_MPV_common_end(MpegEncContext *
      av_freep(&s->avctx->stats_out);
      av_freep(&s->ac_stats);
  
 +    if(s->q_chroma_intra_matrix   != s->q_intra_matrix  ) av_freep(&s->q_chroma_intra_matrix);
 +    if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
 +    s->q_chroma_intra_matrix=   NULL;
 +    s->q_chroma_intra_matrix16= NULL;
      av_freep(&s->q_intra_matrix);
      av_freep(&s->q_inter_matrix);
      av_freep(&s->q_intra_matrix16);
@@@ -1240,21 -1270,7 +1240,21 @@@ static int find_unused_picture(MpegEncC
          }
      }
  
 -    return AVERROR_INVALIDDATA;
 +    av_log(s->avctx, AV_LOG_FATAL,
 +           "Internal error, picture buffer overflow\n");
 +    /* We could return -1, but the codec would crash trying to draw into a
 +     * non-existing frame anyway. This is safer than waiting for a random crash.
 +     * Also the return of this is never useful, an encoder must only allocate
 +     * as much as allowed in the specification. This has no relationship to how
 +     * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
 +     * enough for such valid streams).
 +     * Plus, a decoder has to check stream validity and remove frames if too
 +     * many reference frames are around. Waiting for "OOM" is not correct at
 +     * all. Similarly, missing reference frames have to be replaced by
 +     * interpolated/MC frames, anything else is a bug in the codec ...
 +     */
 +    abort();
 +    return -1;
  }
  
  int ff_find_unused_picture(MpegEncContext *s, int shared)
@@@ -1302,11 -1318,6 +1302,11 @@@ int ff_MPV_frame_start(MpegEncContext *
      Picture *pic;
      s->mb_skipped = 0;
  
 +    if (!ff_thread_can_start_frame(avctx)) {
 +        av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
 +        return -1;
 +    }
 +
      /* mark & release old frames */
      if (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3) {
          if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
                  return i;
              }
              s->last_picture_ptr = &s->picture[i];
 +            s->last_picture_ptr->f.key_frame = 0;
              if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
                  s->last_picture_ptr = NULL;
                  return -1;
              }
 +
 +            if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
 +                for(i=0; i<avctx->height; i++)
 +                    memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
 +            }
 +
              ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
              ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
              s->last_picture_ptr->f.reference = 3;
                  return i;
              }
              s->next_picture_ptr = &s->picture[i];
 +            s->next_picture_ptr->f.key_frame = 0;
              if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
                  s->next_picture_ptr = NULL;
                  return -1;
      if (s->next_picture_ptr)
          ff_copy_picture(&s->next_picture, s->next_picture_ptr);
  
-     if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
-         (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3)) {
+     if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME)) {
          if (s->next_picture_ptr)
              s->next_picture_ptr->owner2 = s;
          if (s->last_picture_ptr)
@@@ -1516,30 -1518,28 +1515,30 @@@ void ff_MPV_frame_end(MpegEncContext *s
      // just to make sure that all data is rendered.
      if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
          ff_xvmc_field_end(s);
 -   } else if ((s->error_count || s->encoding) &&
 +   } else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
                !s->avctx->hwaccel &&
                !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
                s->unrestricted_mv &&
                s->current_picture.f.reference &&
                !s->intra_only &&
 -              !(s->flags & CODEC_FLAG_EMU_EDGE)) {
 -       const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
 -       int hshift = desc->log2_chroma_w;
 -       int vshift = desc->log2_chroma_h;
 -       s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
 -                         s->h_edge_pos, s->v_edge_pos,
 -                         EDGE_WIDTH, EDGE_WIDTH,
 -                         EDGE_TOP | EDGE_BOTTOM);
 -       s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
 -                         s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
 -                         EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
 -                         EDGE_TOP | EDGE_BOTTOM);
 -       s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
 -                         s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
 -                         EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
 -                         EDGE_TOP | EDGE_BOTTOM);
 +              !(s->flags & CODEC_FLAG_EMU_EDGE) &&
 +              !s->avctx->lowres
 +            ) {
 +        const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
 +        int hshift = desc->log2_chroma_w;
 +        int vshift = desc->log2_chroma_h;
 +        s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
 +                          s->h_edge_pos, s->v_edge_pos,
 +                          EDGE_WIDTH, EDGE_WIDTH,
 +                          EDGE_TOP | EDGE_BOTTOM);
 +        s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
 +                          s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
 +                          EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
 +                          EDGE_TOP | EDGE_BOTTOM);
 +        s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
 +                          s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
 +                          EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
 +                          EDGE_TOP | EDGE_BOTTOM);
      }
  
      emms_c();
@@@ -1609,11 -1609,11 +1608,11 @@@ static void draw_line(uint8_t *buf, in
          buf += sx + sy * stride;
          ex  -= sx;
          f    = ((ey - sy) << 16) / ex;
 -        for (x = 0; x = ex; x++) {
 +        for(x= 0; x <= ex; x++){
              y  = (x * f) >> 16;
              fr = (x * f) & 0xFFFF;
              buf[y * stride + x]       += (color * (0x10000 - fr)) >> 16;
 -            buf[(y + 1) * stride + x] += (color *            fr ) >> 16;
 +            if(fr) buf[(y + 1) * stride + x] += (color *            fr ) >> 16;
          }
      } else {
          if (sy > ey) {
          buf += sx + sy * stride;
          ey  -= sy;
          if (ey)
 -            f  = ((ex - sx) << 16) / ey;
 +            f = ((ex - sx) << 16) / ey;
          else
              f = 0;
 -        for (y = 0; y = ey; y++) {
 -            x  = (y * f) >> 16;
 -            fr = (y * f) & 0xFFFF;
 +        for(y= 0; y <= ey; y++){
 +            x  = (y*f) >> 16;
 +            fr = (y*f) & 0xFFFF;
              buf[y * stride + x]     += (color * (0x10000 - fr)) >> 16;
 -            buf[y * stride + x + 1] += (color *            fr ) >> 16;
 +            if(fr) buf[y * stride + x + 1] += (color *            fr ) >> 16;
          }
      }
  }
@@@ -1675,16 -1675,33 +1674,16 @@@ static void draw_arrow(uint8_t *buf, in
   */
  void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
  {
 -    if (s->avctx->hwaccel || !pict || !pict->mb_type)
 +    if (   s->avctx->hwaccel || !pict || !pict->mb_type
 +        || (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
          return;
  
 +
      if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
          int x,y;
  
 -        av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
 -        switch (pict->pict_type) {
 -        case AV_PICTURE_TYPE_I:
 -            av_log(s->avctx,AV_LOG_DEBUG,"I\n");
 -            break;
 -        case AV_PICTURE_TYPE_P:
 -            av_log(s->avctx,AV_LOG_DEBUG,"P\n");
 -            break;
 -        case AV_PICTURE_TYPE_B:
 -            av_log(s->avctx,AV_LOG_DEBUG,"B\n");
 -            break;
 -        case AV_PICTURE_TYPE_S:
 -            av_log(s->avctx,AV_LOG_DEBUG,"S\n");
 -            break;
 -        case AV_PICTURE_TYPE_SI:
 -            av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
 -            break;
 -        case AV_PICTURE_TYPE_SP:
 -            av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
 -            break;
 -        }
 +        av_log(s->avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
 +               av_get_picture_type_char(pict->pict_type));
          for (y = 0; y < s->mb_height; y++) {
              for (x = 0; x < s->mb_width; x++) {
                  if (s->avctx->debug & FF_DEBUG_SKIP) {
                      else if (!USES_LIST(mb_type, 0))
                          av_log(s->avctx, AV_LOG_DEBUG, "<");
                      else {
 -                        assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
 +                        av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
                          av_log(s->avctx, AV_LOG_DEBUG, "X");
                      }
  
                                     (s->codec_id == AV_CODEC_ID_H264 ? 0 : 1);
          s->low_delay = 0; // needed to see the vectors without trashing the buffers
  
 -        av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
 -                                         &h_chroma_shift, &v_chroma_shift);
 +        avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
 +
          for (i = 0; i < 3; i++) {
 -            memcpy(s->visualization_buffer[i], pict->data[i],
 -                   (i == 0) ? pict->linesize[i] * height:
 -                              pict->linesize[i] * height >> v_chroma_shift);
 +            size_t size= (i == 0) ? pict->linesize[i] * FFALIGN(height, 16):
 +                         pict->linesize[i] * FFALIGN(height, 16) >> v_chroma_shift;
 +            s->visualization_buffer[i]= av_realloc(s->visualization_buffer[i], size);
 +            memcpy(s->visualization_buffer[i], pict->data[i], size);
              pict->data[i] = s->visualization_buffer[i];
          }
          pict->type   = FF_BUFFER_TYPE_COPY;
 +        pict->opaque= NULL;
          ptr          = pict->data[0];
          block_height = 16 >> v_chroma_shift;
  
              int mb_x;
              for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
                  const int mb_index = mb_x + mb_y * s->mb_stride;
 -                if ((s->avctx->debug_mv) && pict->motion_val) {
 +                if ((s->avctx->debug_mv) && pict->motion_val[0]) {
                      int type;
                      for (type = 0; type < 3; type++) {
                          int direction = 0;
                                             height, s->linesize, 100);
                              }
                          } else {
 -                              int sx = mb_x * 16 + 8;
 -                              int sy = mb_y * 16 + 8;
 -                              int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
 -                              int mx = pict->motion_val[direction][xy][0] >> shift + sx;
 -                              int my = pict->motion_val[direction][xy][1] >> shift + sy;
 +                              int sx= mb_x * 16 + 8;
 +                              int sy= mb_y * 16 + 8;
 +                              int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
 +                              int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
 +                              int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
                                draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
                          }
                      }
                  }
 -                if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
 +                if ((s->avctx->debug & FF_DEBUG_VIS_QP)) {
                      uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
                                   0x0101010101010101ULL;
                      int y;
                      }
                  }
                  if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
 -                    pict->motion_val) {
 +                    pict->motion_val[0]) {
                      int mb_type = pict->mb_type[mb_index];
                      uint64_t u,v;
                      int y;
                      } else if (!USES_LIST(mb_type, 0)) {
                          COLOR(0, 48)
                      } else {
 -                        assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
 +                        av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
                          COLOR(300,48)
                      }
  
      }
  }
  
 +static inline int hpel_motion_lowres(MpegEncContext *s,
 +                                     uint8_t *dest, uint8_t *src,
 +                                     int field_based, int field_select,
 +                                     int src_x, int src_y,
 +                                     int width, int height, int stride,
 +                                     int h_edge_pos, int v_edge_pos,
 +                                     int w, int h, h264_chroma_mc_func *pix_op,
 +                                     int motion_x, int motion_y)
 +{
 +    const int lowres   = s->avctx->lowres;
 +    const int op_index = FFMIN(lowres, 2);
 +    const int s_mask   = (2 << lowres) - 1;
 +    int emu = 0;
 +    int sx, sy;
 +
 +    if (s->quarter_sample) {
 +        motion_x /= 2;
 +        motion_y /= 2;
 +    }
 +
 +    sx = motion_x & s_mask;
 +    sy = motion_y & s_mask;
 +    src_x += motion_x >> lowres + 1;
 +    src_y += motion_y >> lowres + 1;
 +
 +    src   += src_y * stride + src_x;
 +
 +    if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w,                 0) ||
 +        (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
 +        s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
 +                                (h + 1) << field_based, src_x,
 +                                src_y   << field_based,
 +                                h_edge_pos,
 +                                v_edge_pos);
 +        src = s->edge_emu_buffer;
 +        emu = 1;
 +    }
 +
 +    sx = (sx << 2) >> lowres;
 +    sy = (sy << 2) >> lowres;
 +    if (field_select)
 +        src += s->linesize;
 +    pix_op[op_index](dest, src, stride, h, sx, sy);
 +    return emu;
 +}
 +
 +/* apply one mpeg motion vector to the three components */
 +static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
 +                                                uint8_t *dest_y,
 +                                                uint8_t *dest_cb,
 +                                                uint8_t *dest_cr,
 +                                                int field_based,
 +                                                int bottom_field,
 +                                                int field_select,
 +                                                uint8_t **ref_picture,
 +                                                h264_chroma_mc_func *pix_op,
 +                                                int motion_x, int motion_y,
 +                                                int h, int mb_y)
 +{
 +    uint8_t *ptr_y, *ptr_cb, *ptr_cr;
 +    int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
 +        uvsx, uvsy;
 +    const int lowres     = s->avctx->lowres;
 +    const int op_index   = FFMIN(lowres-1+s->chroma_x_shift, 2);
 +    const int block_s    = 8>>lowres;
 +    const int s_mask     = (2 << lowres) - 1;
 +    const int h_edge_pos = s->h_edge_pos >> lowres;
 +    const int v_edge_pos = s->v_edge_pos >> lowres;
 +    linesize   = s->current_picture.f.linesize[0] << field_based;
 +    uvlinesize = s->current_picture.f.linesize[1] << field_based;
 +
 +    // FIXME obviously not perfect but qpel will not work in lowres anyway
 +    if (s->quarter_sample) {
 +        motion_x /= 2;
 +        motion_y /= 2;
 +    }
 +
 +    if(field_based){
 +        motion_y += (bottom_field - field_select)*((1 << lowres)-1);
 +    }
 +
 +    sx = motion_x & s_mask;
 +    sy = motion_y & s_mask;
 +    src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
 +    src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
 +
 +    if (s->out_format == FMT_H263) {
 +        uvsx    = ((motion_x >> 1) & s_mask) | (sx & 1);
 +        uvsy    = ((motion_y >> 1) & s_mask) | (sy & 1);
 +        uvsrc_x = src_x >> 1;
 +        uvsrc_y = src_y >> 1;
 +    } else if (s->out_format == FMT_H261) {
 +        // even chroma mv's are full pel in H261
 +        mx      = motion_x / 4;
 +        my      = motion_y / 4;
 +        uvsx    = (2 * mx) & s_mask;
 +        uvsy    = (2 * my) & s_mask;
 +        uvsrc_x = s->mb_x * block_s + (mx >> lowres);
 +        uvsrc_y =    mb_y * block_s + (my >> lowres);
 +    } else {
 +        if(s->chroma_y_shift){
 +            mx      = motion_x / 2;
 +            my      = motion_y / 2;
 +            uvsx    = mx & s_mask;
 +            uvsy    = my & s_mask;
 +            uvsrc_x = s->mb_x * block_s                 + (mx >> lowres + 1);
 +            uvsrc_y =   (mb_y * block_s >> field_based) + (my >> lowres + 1);
 +        } else {
 +            if(s->chroma_x_shift){
 +            //Chroma422
 +                mx = motion_x / 2;
 +                uvsx = mx & s_mask;
 +                uvsy = motion_y & s_mask;
 +                uvsrc_y = src_y;
 +                uvsrc_x = s->mb_x*block_s               + (mx >> (lowres+1));
 +            } else {
 +            //Chroma444
 +                uvsx = motion_x & s_mask;
 +                uvsy = motion_y & s_mask;
 +                uvsrc_x = src_x;
 +                uvsrc_y = src_y;
 +            }
 +        }
 +    }
 +
 +    ptr_y  = ref_picture[0] + src_y   * linesize   + src_x;
 +    ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
 +    ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
 +
 +    if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s,       0) ||
 +        (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
 +        s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
 +                                linesize >> field_based, 17, 17 + field_based,
 +                                src_x, src_y << field_based, h_edge_pos,
 +                                v_edge_pos);
 +        ptr_y = s->edge_emu_buffer;
 +        if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
 +            uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
 +            s->dsp.emulated_edge_mc(uvbuf , ptr_cb, uvlinesize >> field_based, 9,
 +                                    9 + field_based,
 +                                    uvsrc_x, uvsrc_y << field_based,
 +                                    h_edge_pos >> 1, v_edge_pos >> 1);
 +            s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, uvlinesize >> field_based, 9,
 +                                    9 + field_based,
 +                                    uvsrc_x, uvsrc_y << field_based,
 +                                    h_edge_pos >> 1, v_edge_pos >> 1);
 +            ptr_cb = uvbuf;
 +            ptr_cr = uvbuf + 16;
 +        }
 +    }
 +
 +    // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
 +    if (bottom_field) {
 +        dest_y  += s->linesize;
 +        dest_cb += s->uvlinesize;
 +        dest_cr += s->uvlinesize;
 +    }
 +
 +    if (field_select) {
 +        ptr_y   += s->linesize;
 +        ptr_cb  += s->uvlinesize;
 +        ptr_cr  += s->uvlinesize;
 +    }
 +
 +    sx = (sx << 2) >> lowres;
 +    sy = (sy << 2) >> lowres;
 +    pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
 +
 +    if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
 +        uvsx = (uvsx << 2) >> lowres;
 +        uvsy = (uvsy << 2) >> lowres;
 +        if (h >> s->chroma_y_shift) {
 +            pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
 +            pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
 +        }
 +    }
 +    // FIXME h261 lowres loop filter
 +}
 +
 +static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
 +                                            uint8_t *dest_cb, uint8_t *dest_cr,
 +                                            uint8_t **ref_picture,
 +                                            h264_chroma_mc_func * pix_op,
 +                                            int mx, int my)
 +{
 +    const int lowres     = s->avctx->lowres;
 +    const int op_index   = FFMIN(lowres, 2);
 +    const int block_s    = 8 >> lowres;
 +    const int s_mask     = (2 << lowres) - 1;
 +    const int h_edge_pos = s->h_edge_pos >> lowres + 1;
 +    const int v_edge_pos = s->v_edge_pos >> lowres + 1;
 +    int emu = 0, src_x, src_y, offset, sx, sy;
 +    uint8_t *ptr;
 +
 +    if (s->quarter_sample) {
 +        mx /= 2;
 +        my /= 2;
 +    }
 +
 +    /* In case of 8X8, we construct a single chroma motion vector
 +       with a special rounding */
 +    mx = ff_h263_round_chroma(mx);
 +    my = ff_h263_round_chroma(my);
 +
 +    sx = mx & s_mask;
 +    sy = my & s_mask;
 +    src_x = s->mb_x * block_s + (mx >> lowres + 1);
 +    src_y = s->mb_y * block_s + (my >> lowres + 1);
 +
 +    offset = src_y * s->uvlinesize + src_x;
 +    ptr = ref_picture[1] + offset;
 +    if (s->flags & CODEC_FLAG_EMU_EDGE) {
 +        if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
 +            (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
 +            s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
 +                                    9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
 +            ptr = s->edge_emu_buffer;
 +            emu = 1;
 +        }
 +    }
 +    sx = (sx << 2) >> lowres;
 +    sy = (sy << 2) >> lowres;
 +    pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
 +
 +    ptr = ref_picture[2] + offset;
 +    if (emu) {
 +        s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
 +                                src_x, src_y, h_edge_pos, v_edge_pos);
 +        ptr = s->edge_emu_buffer;
 +    }
 +    pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
 +}
 +
 +/**
 + * motion compensation of a single macroblock
 + * @param s context
 + * @param dest_y luma destination pointer
 + * @param dest_cb chroma cb/u destination pointer
 + * @param dest_cr chroma cr/v destination pointer
 + * @param dir direction (0->forward, 1->backward)
 + * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
 + * @param pix_op halfpel motion compensation function (average or put normally)
 + * the motion vectors are taken from s->mv and the MV type from s->mv_type
 + */
 +static inline void MPV_motion_lowres(MpegEncContext *s,
 +                                     uint8_t *dest_y, uint8_t *dest_cb,
 +                                     uint8_t *dest_cr,
 +                                     int dir, uint8_t **ref_picture,
 +                                     h264_chroma_mc_func *pix_op)
 +{
 +    int mx, my;
 +    int mb_x, mb_y, i;
 +    const int lowres  = s->avctx->lowres;
 +    const int block_s = 8 >>lowres;
 +
 +    mb_x = s->mb_x;
 +    mb_y = s->mb_y;
 +
 +    switch (s->mv_type) {
 +    case MV_TYPE_16X16:
 +        mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
 +                           0, 0, 0,
 +                           ref_picture, pix_op,
 +                           s->mv[dir][0][0], s->mv[dir][0][1],
 +                           2 * block_s, mb_y);
 +        break;
 +    case MV_TYPE_8X8:
 +        mx = 0;
 +        my = 0;
 +        for (i = 0; i < 4; i++) {
 +            hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
 +                               s->linesize) * block_s,
 +                               ref_picture[0], 0, 0,
 +                               (2 * mb_x + (i & 1)) * block_s,
 +                               (2 * mb_y + (i >> 1)) * block_s,
 +                               s->width, s->height, s->linesize,
 +                               s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
 +                               block_s, block_s, pix_op,
 +                               s->mv[dir][i][0], s->mv[dir][i][1]);
 +
 +            mx += s->mv[dir][i][0];
 +            my += s->mv[dir][i][1];
 +        }
 +
 +        if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
 +            chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
 +                                     pix_op, mx, my);
 +        break;
 +    case MV_TYPE_FIELD:
 +        if (s->picture_structure == PICT_FRAME) {
 +            /* top field */
 +            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
 +                               1, 0, s->field_select[dir][0],
 +                               ref_picture, pix_op,
 +                               s->mv[dir][0][0], s->mv[dir][0][1],
 +                               block_s, mb_y);
 +            /* bottom field */
 +            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
 +                               1, 1, s->field_select[dir][1],
 +                               ref_picture, pix_op,
 +                               s->mv[dir][1][0], s->mv[dir][1][1],
 +                               block_s, mb_y);
 +        } else {
 +            if (s->picture_structure != s->field_select[dir][0] + 1 &&
 +                s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
 +                ref_picture = s->current_picture_ptr->f.data;
 +
 +            }
 +            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
 +                               0, 0, s->field_select[dir][0],
 +                               ref_picture, pix_op,
 +                               s->mv[dir][0][0],
 +                               s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
 +            }
 +        break;
 +    case MV_TYPE_16X8:
 +        for (i = 0; i < 2; i++) {
 +            uint8_t **ref2picture;
 +
 +            if (s->picture_structure == s->field_select[dir][i] + 1 ||
 +                s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
 +                ref2picture = ref_picture;
 +            } else {
 +                ref2picture = s->current_picture_ptr->f.data;
 +            }
 +
 +            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
 +                               0, 0, s->field_select[dir][i],
 +                               ref2picture, pix_op,
 +                               s->mv[dir][i][0], s->mv[dir][i][1] +
 +                               2 * block_s * i, block_s, mb_y >> 1);
 +
 +            dest_y  +=  2 * block_s *  s->linesize;
 +            dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
 +            dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
 +        }
 +        break;
 +    case MV_TYPE_DMV:
 +        if (s->picture_structure == PICT_FRAME) {
 +            for (i = 0; i < 2; i++) {
 +                int j;
 +                for (j = 0; j < 2; j++) {
 +                    mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
 +                                       1, j, j ^ i,
 +                                       ref_picture, pix_op,
 +                                       s->mv[dir][2 * i + j][0],
 +                                       s->mv[dir][2 * i + j][1],
 +                                       block_s, mb_y);
 +                }
 +                pix_op = s->dsp.avg_h264_chroma_pixels_tab;
 +            }
 +        } else {
 +            for (i = 0; i < 2; i++) {
 +                mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
 +                                   0, 0, s->picture_structure != i + 1,
 +                                   ref_picture, pix_op,
 +                                   s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
 +                                   2 * block_s, mb_y >> 1);
 +
 +                // after put we make avg of the same block
 +                pix_op = s->dsp.avg_h264_chroma_pixels_tab;
 +
 +                // opposite parity is always in the same
 +                // frame if this is second field
 +                if (!s->first_field) {
 +                    ref_picture = s->current_picture_ptr->f.data;
 +                }
 +            }
 +        }
 +        break;
 +    default:
 +        av_assert2(0);
 +    }
 +}
 +
  /**
   * find the lowest MB row referenced in the MVs
   */
@@@ -2447,7 -2087,7 +2446,7 @@@ void ff_clean_intra_table_entries(MpegE
   */
  static av_always_inline
  void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
 -                            int is_mpeg12)
 +                            int lowres_flag, int is_mpeg12)
  {
      const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
      if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
          qpel_mc_func (*op_qpix)[16];
          const int linesize   = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
          const int uvlinesize = s->current_picture.f.linesize[1];
 -        const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
 -        const int block_size = 8;
 +        const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
 +        const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
  
          /* avoid copy if macroblock skipped in last frame too */
          /* skip only during decoding as we might trash the buffers during encoding a bit */
  
              if (s->mb_skipped) {
                  s->mb_skipped= 0;
 -                assert(s->pict_type!=AV_PICTURE_TYPE_I);
 +                av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
                  *mbskip_ptr = 1;
              } else if(!s->current_picture.f.reference) {
                  *mbskip_ptr = 1;
                      }
                  }
  
 -                op_qpix= s->me.qpel_put;
 -                if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
 -                    op_pix = s->dsp.put_pixels_tab;
 +                if(lowres_flag){
 +                    h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
 +
 +                    if (s->mv_dir & MV_DIR_FORWARD) {
 +                        MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
 +                        op_pix = s->dsp.avg_h264_chroma_pixels_tab;
 +                    }
 +                    if (s->mv_dir & MV_DIR_BACKWARD) {
 +                        MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
 +                    }
                  }else{
 -                    op_pix = s->dsp.put_no_rnd_pixels_tab;
 -                }
 -                if (s->mv_dir & MV_DIR_FORWARD) {
 -                    ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
 -                    op_pix = s->dsp.avg_pixels_tab;
 -                    op_qpix= s->me.qpel_avg;
 -                }
 -                if (s->mv_dir & MV_DIR_BACKWARD) {
 -                    ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
 +                    op_qpix= s->me.qpel_put;
 +                    if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
 +                        op_pix = s->dsp.put_pixels_tab;
 +                    }else{
 +                        op_pix = s->dsp.put_no_rnd_pixels_tab;
 +                    }
 +                    if (s->mv_dir & MV_DIR_FORWARD) {
 +                        ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
 +                        op_pix = s->dsp.avg_pixels_tab;
 +                        op_qpix= s->me.qpel_avg;
 +                    }
 +                    if (s->mv_dir & MV_DIR_BACKWARD) {
 +                        ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
 +                    }
                  }
              }
  
                      }else{
                          //chroma422
                          dct_linesize = uvlinesize << s->interlaced_dct;
 -                        dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
 +                        dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
  
                          add_dct(s, block[4], 4, dest_cb, dct_linesize);
                          add_dct(s, block[5], 5, dest_cr, dct_linesize);
                          add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
                          add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
                          if(!s->chroma_x_shift){//Chroma444
 -                            add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
 -                            add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
 -                            add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
 -                            add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
 +                            add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
 +                            add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
 +                            add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
 +                            add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
                          }
                      }
                  }//fi gray
                      }else{
  
                          dct_linesize = uvlinesize << s->interlaced_dct;
 -                        dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
 +                        dct_offset   = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
  
                          s->dsp.idct_put(dest_cb,              dct_linesize, block[4]);
                          s->dsp.idct_put(dest_cr,              dct_linesize, block[5]);
                          s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
                          s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
                          if(!s->chroma_x_shift){//Chroma444
 -                            s->dsp.idct_put(dest_cb + 8,              dct_linesize, block[8]);
 -                            s->dsp.idct_put(dest_cr + 8,              dct_linesize, block[9]);
 -                            s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
 -                            s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
 +                            s->dsp.idct_put(dest_cb + block_size,              dct_linesize, block[8]);
 +                            s->dsp.idct_put(dest_cr + block_size,              dct_linesize, block[9]);
 +                            s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
 +                            s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
                          }
                      }
                  }//gray
@@@ -2692,12 -2320,10 +2691,12 @@@ skip_idct
  void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
  #if !CONFIG_SMALL
      if(s->out_format == FMT_MPEG1) {
 -        MPV_decode_mb_internal(s, block, 1);
 +        if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
 +        else                 MPV_decode_mb_internal(s, block, 0, 1);
      } else
  #endif
 -        MPV_decode_mb_internal(s, block, 0);
 +    if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
 +    else                  MPV_decode_mb_internal(s, block, 0, 0);
  }
  
  /**
@@@ -2773,7 -2399,7 +2772,7 @@@ void ff_draw_horiz_band(MpegEncContext 
  void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
      const int linesize   = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
      const int uvlinesize = s->current_picture.f.linesize[1];
 -    const int mb_size= 4;
 +    const int mb_size= 4 - s->avctx->lowres;
  
      s->block_index[0]= s->b8_stride*(s->mb_y*2    ) - 2 + s->mb_x*2;
      s->block_index[1]= s->b8_stride*(s->mb_y*2    ) - 1 + s->mb_x*2;
              s->dest[0] += (s->mb_y>>1) *   linesize << mb_size;
              s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
              s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
 -            assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
 +            av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
          }
      }
  }
@@@ -2818,7 -2444,6 +2817,7 @@@ void ff_mpeg_flush(AVCodecContext *avct
      s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
  
      s->mb_x= s->mb_y= 0;
 +    s->closed_gop= 0;
  
      s->parse_context.state= -1;
      s->parse_context.frame_start_found= 0;
@@@ -2838,7 -2463,10 +2837,7 @@@ static void dct_unquantize_mpeg1_intra_
  
      nCoeffs= s->block_last_index[n];
  
 -    if (n < 4)
 -        block[0] = block[0] * s->y_dc_scale;
 -    else
 -        block[0] = block[0] * s->c_dc_scale;
 +    block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
      /* XXX: only mpeg1 */
      quant_matrix = s->intra_matrix;
      for(i=1;i<=nCoeffs;i++) {
@@@ -2897,7 -2525,10 +2896,7 @@@ static void dct_unquantize_mpeg2_intra_
      if(s->alternate_scan) nCoeffs= 63;
      else nCoeffs= s->block_last_index[n];
  
 -    if (n < 4)
 -        block[0] = block[0] * s->y_dc_scale;
 -    else
 -        block[0] = block[0] * s->c_dc_scale;
 +    block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
      quant_matrix = s->intra_matrix;
      for(i=1;i<=nCoeffs;i++) {
          int j= s->intra_scantable.permutated[i];
@@@ -2925,8 -2556,10 +2924,8 @@@ static void dct_unquantize_mpeg2_intra_
      if(s->alternate_scan) nCoeffs= 63;
      else nCoeffs= s->block_last_index[n];
  
 -    if (n < 4)
 -        block[0] = block[0] * s->y_dc_scale;
 -    else
 -        block[0] = block[0] * s->c_dc_scale;
 +    block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
 +    sum += block[0];
      quant_matrix = s->intra_matrix;
      for(i=1;i<=nCoeffs;i++) {
          int j= s->intra_scantable.permutated[i];
@@@ -2988,7 -2621,10 +2987,7 @@@ static void dct_unquantize_h263_intra_c
      qmul = qscale << 1;
  
      if (!s->h263_aic) {
 -        if (n < 4)
 -            block[0] = block[0] * s->y_dc_scale;
 -        else
 -            block[0] = block[0] * s->c_dc_scale;
 +        block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
          qadd = (qscale - 1) | 1;
      }else{
          qadd = 0;