int16_t position[3][2];
}AVPanScan;
-#define FF_COMMON_FRAME \
- /**\
- * pointer to the picture planes.\
- * This might be different from the first allocated byte\
- * - encoding: \
- * - decoding: \
- */\
- uint8_t *data[4];\
- int linesize[4];\
- /**\
- * pointer to the first allocated byte of the picture. Can be used in get_buffer/release_buffer.\
- * This isn't used by libavcodec unless the default get/release_buffer() is used.\
- * - encoding: \
- * - decoding: \
- */\
- uint8_t *base[4];\
- /**\
- * 1 -> keyframe, 0-> not\
- * - encoding: Set by libavcodec.\
- * - decoding: Set by libavcodec.\
- */\
- int key_frame;\
-\
- /**\
- * Picture type of the frame, see ?_TYPE below.\
- * - encoding: Set by libavcodec. for coded_picture (and set by user for input).\
- * - decoding: Set by libavcodec.\
- */\
- enum AVPictureType pict_type;\
-\
- /**\
- * presentation timestamp in time_base units (time when frame should be shown to user)\
- * If AV_NOPTS_VALUE then frame_rate = 1/time_base will be assumed.\
- * - encoding: MUST be set by user.\
- * - decoding: Set by libavcodec.\
- */\
- int64_t pts;\
-\
- /**\
- * picture number in bitstream order\
- * - encoding: set by\
- * - decoding: Set by libavcodec.\
- */\
- int coded_picture_number;\
- /**\
- * picture number in display order\
- * - encoding: set by\
- * - decoding: Set by libavcodec.\
- */\
- int display_picture_number;\
-\
- /**\
- * quality (between 1 (good) and FF_LAMBDA_MAX (bad)) \
- * - encoding: Set by libavcodec. for coded_picture (and set by user for input).\
- * - decoding: Set by libavcodec.\
- */\
- int quality; \
-\
- /**\
- * buffer age (1->was last buffer and dint change, 2->..., ...).\
- * Set to INT_MAX if the buffer has not been used yet.\
- * - encoding: unused\
- * - decoding: MUST be set by get_buffer().\
- */\
- int age;\
-\
- /**\
- * is this picture used as reference\
- * The values for this are the same as the MpegEncContext.picture_structure\
- * variable, that is 1->top field, 2->bottom field, 3->frame/both fields.\
- * Set to 4 for delayed, non-reference frames.\
- * - encoding: unused\
- * - decoding: Set by libavcodec. (before get_buffer() call)).\
- */\
- int reference;\
-\
- /**\
- * QP table\
- * - encoding: unused\
- * - decoding: Set by libavcodec.\
- */\
- int8_t *qscale_table;\
- /**\
- * QP store stride\
- * - encoding: unused\
- * - decoding: Set by libavcodec.\
- */\
- int qstride;\
-\
- /**\
- * mbskip_table[mb]>=1 if MB didn't change\
- * stride= mb_width = (width+15)>>4\
- * - encoding: unused\
- * - decoding: Set by libavcodec.\
- */\
- uint8_t *mbskip_table;\
-\
- /**\
- * motion vector table\
- * @code\
- * example:\
- * int mv_sample_log2= 4 - motion_subsample_log2;\
- * int mb_width= (width+15)>>4;\
- * int mv_stride= (mb_width << mv_sample_log2) + 1;\
- * motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y];\
- * @endcode\
- * - encoding: Set by user.\
- * - decoding: Set by libavcodec.\
- */\
- int16_t (*motion_val[2])[2];\
-\
- /**\
- * macroblock type table\
- * mb_type_base + mb_width + 2\
- * - encoding: Set by user.\
- * - decoding: Set by libavcodec.\
- */\
- uint32_t *mb_type;\
-\
- /**\
- * log2 of the size of the block which a single vector in motion_val represents: \
- * (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2)\
- * - encoding: unused\
- * - decoding: Set by libavcodec.\
- */\
- uint8_t motion_subsample_log2;\
-\
- /**\
- * for some private data of the user\
- * - encoding: unused\
- * - decoding: Set by user.\
- */\
- void *opaque;\
-\
- /**\
- * error\
- * - encoding: Set by libavcodec. if flags&CODEC_FLAG_PSNR.\
- * - decoding: unused\
- */\
- uint64_t error[4];\
-\
- /**\
- * type of the buffer (to keep track of who has to deallocate data[*])\
- * - encoding: Set by the one who allocates it.\
- * - decoding: Set by the one who allocates it.\
- * Note: User allocated (direct rendering) & internal buffers cannot coexist currently.\
- */\
- int type;\
- \
- /**\
- * When decoding, this signals how much the picture must be delayed.\
- * extra_delay = repeat_pict / (2*fps)\
- * - encoding: unused\
- * - decoding: Set by libavcodec.\
- */\
- int repeat_pict;\
- \
- /**\
- * \
- */\
- int qscale_type;\
- \
- /**\
- * The content of the picture is interlaced.\
- * - encoding: Set by user.\
- * - decoding: Set by libavcodec. (default 0)\
- */\
- int interlaced_frame;\
- \
- /**\
- * If the content is interlaced, is top field displayed first.\
- * - encoding: Set by user.\
- * - decoding: Set by libavcodec.\
- */\
- int top_field_first;\
- \
- /**\
- * Pan scan.\
- * - encoding: Set by user.\
- * - decoding: Set by libavcodec.\
- */\
- AVPanScan *pan_scan;\
- \
- /**\
- * Tell user application that palette has changed from previous frame.\
- * - encoding: ??? (no palette-enabled encoder yet)\
- * - decoding: Set by libavcodec. (default 0).\
- */\
- int palette_has_changed;\
- \
- /**\
- * codec suggestion on buffer type if != 0\
- * - encoding: unused\
- * - decoding: Set by libavcodec. (before get_buffer() call)).\
- */\
- int buffer_hints;\
-\
- /**\
- * DCT coefficients\
- * - encoding: unused\
- * - decoding: Set by libavcodec.\
- */\
- short *dct_coeff;\
-\
- /**\
- * motion reference frame index\
- * the order in which these are stored can depend on the codec.\
- * - encoding: Set by user.\
- * - decoding: Set by libavcodec.\
- */\
- int8_t *ref_index[2];\
-\
- /**\
- * reordered opaque 64bit (generally an integer or a double precision float\
- * PTS but can be anything). \
- * The user sets AVCodecContext.reordered_opaque to represent the input at\
- * that time,\
- * the decoder reorders values as needed and sets AVFrame.reordered_opaque\
- * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque \
- * @deprecated in favor of pkt_pts\
- * - encoding: unused\
- * - decoding: Read by user.\
- */\
- int64_t reordered_opaque;\
-\
- /**\
- * hardware accelerator private data (Libav-allocated)\
- * - encoding: unused\
- * - decoding: Set by libavcodec\
- */\
- void *hwaccel_picture_private;\
-\
- /**\
- * reordered pts from the last AVPacket that has been input into the decoder\
- * - encoding: unused\
- * - decoding: Read by user.\
- */\
- int64_t pkt_pts;\
-\
- /**\
- * dts from the last AVPacket that has been input into the decoder\
- * - encoding: unused\
- * - decoding: Read by user.\
- */\
- int64_t pkt_dts;\
-\
- /**\
- * the AVCodecContext which ff_thread_get_buffer() was last called on\
- * - encoding: Set by libavcodec.\
- * - decoding: Set by libavcodec.\
- */\
- struct AVCodecContext *owner;\
-\
- /**\
- * used by multithreading to store frame-specific info\
- * - encoding: Set by libavcodec.\
- * - decoding: Set by libavcodec.\
- */\
- void *thread_opaque;\
-
#define FF_QSCALE_TYPE_MPEG1 0
#define FF_QSCALE_TYPE_MPEG2 1
#define FF_QSCALE_TYPE_H264 2
/**
* Audio Video Frame.
- * New fields can be added to the end of FF_COMMON_FRAME with minor version
- * bumps.
- * Removal, reordering and changes to existing fields require a major
- * version bump. No fields should be added into AVFrame before or after
- * FF_COMMON_FRAME!
+ * New fields can be added to the end of AVFRAME with minor version
+ * bumps. Removal, reordering and changes to existing fields require
+ * a major version bump.
* sizeof(AVFrame) must not be used outside libav*.
*/
typedef struct AVFrame {
- FF_COMMON_FRAME
+ /**
+ * pointer to the picture planes.
+ * This might be different from the first allocated byte
+ * - encoding:
+ * - decoding:
+ */
+ uint8_t *data[4];
+ int linesize[4];
+ /**
+ * pointer to the first allocated byte of the picture. Can be used in get_buffer/release_buffer.
+ * This isn't used by libavcodec unless the default get/release_buffer() is used.
+ * - encoding:
+ * - decoding:
+ */
+ uint8_t *base[4];
+ /**
+ * 1 -> keyframe, 0-> not
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int key_frame;
+
+ /**
+ * Picture type of the frame, see ?_TYPE below.
+ * - encoding: Set by libavcodec. for coded_picture (and set by user for input).
+ * - decoding: Set by libavcodec.
+ */
+ enum AVPictureType pict_type;
+
+ /**
+ * presentation timestamp in time_base units (time when frame should be shown to user)
+ * If AV_NOPTS_VALUE then frame_rate = 1/time_base will be assumed.
+ * - encoding: MUST be set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int64_t pts;
+
+ /**
+ * picture number in bitstream order
+ * - encoding: set by
+ * - decoding: Set by libavcodec.
+ */
+ int coded_picture_number;
+ /**
+ * picture number in display order
+ * - encoding: set by
+ * - decoding: Set by libavcodec.
+ */
+ int display_picture_number;
+
+ /**
+ * quality (between 1 (good) and FF_LAMBDA_MAX (bad))
+ * - encoding: Set by libavcodec. for coded_picture (and set by user for input).
+ * - decoding: Set by libavcodec.
+ */
+ int quality;
+
+ /**
+ * buffer age (1->was last buffer and dint change, 2->..., ...).
+ * Set to INT_MAX if the buffer has not been used yet.
+ * - encoding: unused
+ * - decoding: MUST be set by get_buffer().
+ */
+ int age;
+
+ /**
+ * is this picture used as reference
+ * The values for this are the same as the MpegEncContext.picture_structure
+ * variable, that is 1->top field, 2->bottom field, 3->frame/both fields.
+ * Set to 4 for delayed, non-reference frames.
+ * - encoding: unused
+ * - decoding: Set by libavcodec. (before get_buffer() call)).
+ */
+ int reference;
+
+ /**
+ * QP table
+ * - encoding: unused
+ * - decoding: Set by libavcodec.
+ */
+ int8_t *qscale_table;
+ /**
+ * QP store stride
+ * - encoding: unused
+ * - decoding: Set by libavcodec.
+ */
+ int qstride;
+
+ /**
+ * mbskip_table[mb]>=1 if MB didn't change
+ * stride= mb_width = (width+15)>>4
+ * - encoding: unused
+ * - decoding: Set by libavcodec.
+ */
+ uint8_t *mbskip_table;
+
+ /**
+ * motion vector table
+ * @code
+ * example:
+ * int mv_sample_log2= 4 - motion_subsample_log2;
+ * int mb_width= (width+15)>>4;
+ * int mv_stride= (mb_width << mv_sample_log2) + 1;
+ * motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y];
+ * @endcode
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int16_t (*motion_val[2])[2];
+
+ /**
+ * macroblock type table
+ * mb_type_base + mb_width + 2
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ uint32_t *mb_type;
+
+ /**
+ * log2 of the size of the block which a single vector in motion_val represents:
+ * (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2)
+ * - encoding: unused
+ * - decoding: Set by libavcodec.
+ */
+ uint8_t motion_subsample_log2;
+
+ /**
+ * for some private data of the user
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ void *opaque;
+
+ /**
+ * error
+ * - encoding: Set by libavcodec. if flags&CODEC_FLAG_PSNR.
+ * - decoding: unused
+ */
+ uint64_t error[4];
+
+ /**
+ * type of the buffer (to keep track of who has to deallocate data[*])
+ * - encoding: Set by the one who allocates it.
+ * - decoding: Set by the one who allocates it.
+ * Note: User allocated (direct rendering) & internal buffers cannot coexist currently.
+ */
+ int type;
+
+ /**
+ * When decoding, this signals how much the picture must be delayed.
+ * extra_delay = repeat_pict / (2*fps)
+ * - encoding: unused
+ * - decoding: Set by libavcodec.
+ */
+ int repeat_pict;
+
+ /**
+ *
+ */
+ int qscale_type;
+
+ /**
+ * The content of the picture is interlaced.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec. (default 0)
+ */
+ int interlaced_frame;
+
+ /**
+ * If the content is interlaced, is top field displayed first.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int top_field_first;
+
+ /**
+ * Pan scan.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ AVPanScan *pan_scan;
+
+ /**
+ * Tell user application that palette has changed from previous frame.
+ * - encoding: ??? (no palette-enabled encoder yet)
+ * - decoding: Set by libavcodec. (default 0).
+ */
+ int palette_has_changed;
+
+ /**
+ * codec suggestion on buffer type if != 0
+ * - encoding: unused
+ * - decoding: Set by libavcodec. (before get_buffer() call)).
+ */
+ int buffer_hints;
+
+ /**
+ * DCT coefficients
+ * - encoding: unused
+ * - decoding: Set by libavcodec.
+ */
+ short *dct_coeff;
+
+ /**
+ * motion reference frame index
+ * the order in which these are stored can depend on the codec.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int8_t *ref_index[2];
+
+ /**
+ * reordered opaque 64bit (generally an integer or a double precision float
+ * PTS but can be anything).
+ * The user sets AVCodecContext.reordered_opaque to represent the input at
+ * that time,
+ * the decoder reorders values as needed and sets AVFrame.reordered_opaque
+ * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque
+ * @deprecated in favor of pkt_pts
+ * - encoding: unused
+ * - decoding: Read by user.
+ */
+ int64_t reordered_opaque;
+
+ /**
+ * hardware accelerator private data (Libav-allocated)
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ void *hwaccel_picture_private;
+
+ /**
+ * reordered pts from the last AVPacket that has been input into the decoder
+ * - encoding: unused
+ * - decoding: Read by user.
+ */
+ int64_t pkt_pts;
+
+ /**
+ * dts from the last AVPacket that has been input into the decoder
+ * - encoding: unused
+ * - decoding: Read by user.
+ */
+ int64_t pkt_dts;
+
+ /**
+ * the AVCodecContext which ff_thread_get_buffer() was last called on
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ struct AVCodecContext *owner;
+
+ /**
+ * used by multithreading to store frame-specific info
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ void *thread_opaque;
} AVFrame;
/**
const int mx= mv->x + src_x_offset*8;
const int my= mv->y + src_y_offset*8;
const int luma_xy= (mx&3) + ((my&3)<<2);
- uint8_t * src_y = pic->data[0] + (mx>>2) + (my>>2)*h->l_stride;
- uint8_t * src_cb= pic->data[1] + (mx>>3) + (my>>3)*h->c_stride;
- uint8_t * src_cr= pic->data[2] + (mx>>3) + (my>>3)*h->c_stride;
+ uint8_t * src_y = pic->f.data[0] + (mx >> 2) + (my >> 2) * h->l_stride;
+ uint8_t * src_cb = pic->f.data[1] + (mx >> 3) + (my >> 3) * h->c_stride;
+ uint8_t * src_cr = pic->f.data[2] + (mx >> 3) + (my >> 3) * h->c_stride;
int extra_width= 0; //(s->flags&CODEC_FLAG_EMU_EDGE) ? 0 : 16;
int extra_height= extra_width;
int emu=0;
const int pic_width = 16*h->mb_width;
const int pic_height = 16*h->mb_height;
- if(!pic->data[0])
+ if(!pic->f.data[0])
return;
if(mx&7) extra_width -= 3;
if(my&7) extra_height -= 3;
h->mbx = 0;
h->mby++;
/* re-calculate sample pointers */
- h->cy = h->picture.data[0] + h->mby*16*h->l_stride;
- h->cu = h->picture.data[1] + h->mby*8*h->c_stride;
- h->cv = h->picture.data[2] + h->mby*8*h->c_stride;
+ h->cy = h->picture.f.data[0] + h->mby * 16 * h->l_stride;
+ h->cu = h->picture.f.data[1] + h->mby * 8 * h->c_stride;
+ h->cv = h->picture.f.data[2] + h->mby * 8 * h->c_stride;
if(h->mby == h->mb_height) { //frame end
return 0;
}
h->mv[MV_FWD_X0] = ff_cavs_dir_mv;
set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
h->pred_mode_Y[3] = h->pred_mode_Y[6] = NOT_AVAIL;
- h->cy = h->picture.data[0];
- h->cu = h->picture.data[1];
- h->cv = h->picture.data[2];
- h->l_stride = h->picture.linesize[0];
- h->c_stride = h->picture.linesize[1];
+ h->cy = h->picture.f.data[0];
+ h->cu = h->picture.f.data[1];
+ h->cv = h->picture.f.data[2];
+ h->l_stride = h->picture.f.linesize[0];
+ h->c_stride = h->picture.f.linesize[1];
h->luma_scan[2] = 8*h->l_stride;
h->luma_scan[3] = 8*h->l_stride+8;
h->mbx = h->mby = h->mbidx = 0;
return -1;
}
/* make sure we have the reference frames we need */
- if(!h->DPB[0].data[0] ||
- (!h->DPB[1].data[0] && h->pic_type == AV_PICTURE_TYPE_B))
+ if(!h->DPB[0].f.data[0] ||
+ (!h->DPB[1].f.data[0] && h->pic_type == AV_PICTURE_TYPE_B))
return -1;
} else {
h->pic_type = AV_PICTURE_TYPE_I;
skip_bits(&s->gb,1); //marker_bit
}
/* release last B frame */
- if(h->picture.data[0])
+ if(h->picture.f.data[0])
s->avctx->release_buffer(s->avctx, (AVFrame *)&h->picture);
s->avctx->get_buffer(s->avctx, (AVFrame *)&h->picture);
} while(ff_cavs_next_mb(h));
}
if(h->pic_type != AV_PICTURE_TYPE_B) {
- if(h->DPB[1].data[0])
+ if(h->DPB[1].f.data[0])
s->avctx->release_buffer(s->avctx, (AVFrame *)&h->DPB[1]);
h->DPB[1] = h->DPB[0];
h->DPB[0] = h->picture;
s->avctx = avctx;
if (buf_size == 0) {
- if(!s->low_delay && h->DPB[0].data[0]) {
+ if (!s->low_delay && h->DPB[0].f.data[0]) {
*data_size = sizeof(AVPicture);
*picture = *(AVFrame *) &h->DPB[0];
}
break;
case PIC_I_START_CODE:
if(!h->got_keyframe) {
- if(h->DPB[0].data[0])
+ if(h->DPB[0].f.data[0])
avctx->release_buffer(avctx, (AVFrame *)&h->DPB[0]);
- if(h->DPB[1].data[0])
+ if(h->DPB[1].f.data[0])
avctx->release_buffer(avctx, (AVFrame *)&h->DPB[1]);
h->got_keyframe = 1;
}
break;
*data_size = sizeof(AVPicture);
if(h->pic_type != AV_PICTURE_TYPE_B) {
- if(h->DPB[1].data[0]) {
+ if(h->DPB[1].f.data[0]) {
*picture = *(AVFrame *) &h->DPB[1];
} else {
*data_size = 0;
#undef mb_intra
static void decode_mb(MpegEncContext *s, int ref){
- s->dest[0] = s->current_picture.data[0] + (s->mb_y * 16* s->linesize ) + s->mb_x * 16;
- s->dest[1] = s->current_picture.data[1] + (s->mb_y * (16>>s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16>>s->chroma_x_shift);
- s->dest[2] = s->current_picture.data[2] + (s->mb_y * (16>>s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16>>s->chroma_x_shift);
+ s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
+ s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
+ s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
if(CONFIG_H264_DECODER && s->codec_id == CODEC_ID_H264){
H264Context *h= (void*)s;
assert(ref>=0);
if(ref >= h->ref_count[0]) //FIXME it is posible albeit uncommon that slice references differ between slices, we take the easy approuch and ignore it for now. If this turns out to have any relevance in practice then correct remapping should be added
ref=0;
- fill_rectangle(&s->current_picture.ref_index[0][4*h->mb_xy], 2, 2, 2, ref, 1);
+ fill_rectangle(&s->current_picture.f.ref_index[0][4*h->mb_xy], 2, 2, 2, ref, 1);
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
fill_rectangle(h->mv_cache[0][ scan8[0] ], 4, 4, 8, pack16to32(s->mv[0][0][0],s->mv[0][0][1]), 4);
assert(!FRAME_MBAFF);
error= s->error_status_table[mb_index];
- if(IS_INTER(s->current_picture.mb_type[mb_index])) continue; //inter
+ if(IS_INTER(s->current_picture.f.mb_type[mb_index])) continue; //inter
if(!(error&DC_ERROR)) continue; //dc-ok
/* right block */
for(j=b_x+1; j<w; j++){
int mb_index_j= (j>>is_luma) + (b_y>>is_luma)*s->mb_stride;
int error_j= s->error_status_table[mb_index_j];
- int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]);
+ int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
if(intra_j==0 || !(error_j&DC_ERROR)){
color[0]= dc[j + b_y*stride];
distance[0]= j-b_x;
for(j=b_x-1; j>=0; j--){
int mb_index_j= (j>>is_luma) + (b_y>>is_luma)*s->mb_stride;
int error_j= s->error_status_table[mb_index_j];
- int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]);
+ int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
if(intra_j==0 || !(error_j&DC_ERROR)){
color[1]= dc[j + b_y*stride];
distance[1]= b_x-j;
for(j=b_y+1; j<h; j++){
int mb_index_j= (b_x>>is_luma) + (j>>is_luma)*s->mb_stride;
int error_j= s->error_status_table[mb_index_j];
- int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]);
+ int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
if(intra_j==0 || !(error_j&DC_ERROR)){
color[2]= dc[b_x + j*stride];
distance[2]= j-b_y;
for(j=b_y-1; j>=0; j--){
int mb_index_j= (b_x>>is_luma) + (j>>is_luma)*s->mb_stride;
int error_j= s->error_status_table[mb_index_j];
- int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]);
+ int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
if(intra_j==0 || !(error_j&DC_ERROR)){
color[3]= dc[b_x + j*stride];
distance[3]= b_y-j;
int y;
int left_status = s->error_status_table[( b_x >>is_luma) + (b_y>>is_luma)*s->mb_stride];
int right_status= s->error_status_table[((b_x+1)>>is_luma) + (b_y>>is_luma)*s->mb_stride];
- int left_intra= IS_INTRA(s->current_picture.mb_type [( b_x >>is_luma) + (b_y>>is_luma)*s->mb_stride]);
- int right_intra= IS_INTRA(s->current_picture.mb_type [((b_x+1)>>is_luma) + (b_y>>is_luma)*s->mb_stride]);
+ int left_intra = IS_INTRA(s->current_picture.f.mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
+ int right_intra = IS_INTRA(s->current_picture.f.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
int left_damage = left_status&(DC_ERROR|AC_ERROR|MV_ERROR);
int right_damage= right_status&(DC_ERROR|AC_ERROR|MV_ERROR);
int offset= b_x*8 + b_y*stride*8;
- int16_t *left_mv= s->current_picture.motion_val[0][mvy_stride*b_y + mvx_stride* b_x ];
- int16_t *right_mv= s->current_picture.motion_val[0][mvy_stride*b_y + mvx_stride*(b_x+1)];
+ int16_t *left_mv= s->current_picture.f.motion_val[0][mvy_stride*b_y + mvx_stride* b_x ];
+ int16_t *right_mv= s->current_picture.f.motion_val[0][mvy_stride*b_y + mvx_stride*(b_x+1)];
if(!(left_damage||right_damage)) continue; // both undamaged
int x;
int top_status = s->error_status_table[(b_x>>is_luma) + ( b_y >>is_luma)*s->mb_stride];
int bottom_status= s->error_status_table[(b_x>>is_luma) + ((b_y+1)>>is_luma)*s->mb_stride];
- int top_intra= IS_INTRA(s->current_picture.mb_type [(b_x>>is_luma) + ( b_y >>is_luma)*s->mb_stride]);
- int bottom_intra= IS_INTRA(s->current_picture.mb_type [(b_x>>is_luma) + ((b_y+1)>>is_luma)*s->mb_stride]);
+ int top_intra = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]);
+ int bottom_intra = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
int top_damage = top_status&(DC_ERROR|AC_ERROR|MV_ERROR);
int bottom_damage= bottom_status&(DC_ERROR|AC_ERROR|MV_ERROR);
int offset= b_x*8 + b_y*stride*8;
- int16_t *top_mv= s->current_picture.motion_val[0][mvy_stride* b_y + mvx_stride*b_x];
- int16_t *bottom_mv= s->current_picture.motion_val[0][mvy_stride*(b_y+1) + mvx_stride*b_x];
+ int16_t *top_mv = s->current_picture.f.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
+ int16_t *bottom_mv = s->current_picture.f.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
if(!(top_damage||bottom_damage)) continue; // both undamaged
int f=0;
int error= s->error_status_table[mb_xy];
- if(IS_INTRA(s->current_picture.mb_type[mb_xy])) f=MV_FROZEN; //intra //FIXME check
+ if(IS_INTRA(s->current_picture.f.mb_type[mb_xy])) f=MV_FROZEN; //intra //FIXME check
if(!(error&MV_ERROR)) f=MV_FROZEN; //inter with undamaged MV
fixed[mb_xy]= f;
for(mb_x=0; mb_x<s->mb_width; mb_x++){
const int mb_xy= mb_x + mb_y*s->mb_stride;
- if(IS_INTRA(s->current_picture.mb_type[mb_xy])) continue;
+ if(IS_INTRA(s->current_picture.f.mb_type[mb_xy])) continue;
if(!(s->error_status_table[mb_xy]&MV_ERROR)) continue;
- s->mv_dir = s->last_picture.data[0] ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
+ s->mv_dir = s->last_picture.f.data[0] ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
s->mb_intra=0;
s->mv_type = MV_TYPE_16X16;
s->mb_skipped=0;
if((mb_x^mb_y^pass)&1) continue;
if(fixed[mb_xy]==MV_FROZEN) continue;
- assert(!IS_INTRA(s->current_picture.mb_type[mb_xy]));
- assert(s->last_picture_ptr && s->last_picture_ptr->data[0]);
+ assert(!IS_INTRA(s->current_picture.f.mb_type[mb_xy]));
+ assert(s->last_picture_ptr && s->last_picture_ptr->f.data[0]);
j=0;
if(mb_x>0 && fixed[mb_xy-1 ]==MV_FROZEN) j=1;
none_left=0;
if(mb_x>0 && fixed[mb_xy-1]){
- mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index - mot_step][0];
- mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index - mot_step][1];
- ref [pred_count] = s->current_picture.ref_index[0][4*(mb_xy-1)];
+ mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index - mot_step][0];
+ mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index - mot_step][1];
+ ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy-1)];
pred_count++;
}
if(mb_x+1<mb_width && fixed[mb_xy+1]){
- mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index + mot_step][0];
- mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index + mot_step][1];
- ref [pred_count] = s->current_picture.ref_index[0][4*(mb_xy+1)];
+ mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index + mot_step][0];
+ mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index + mot_step][1];
+ ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy+1)];
pred_count++;
}
if(mb_y>0 && fixed[mb_xy-mb_stride]){
- mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index - mot_stride*mot_step][0];
- mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index - mot_stride*mot_step][1];
- ref [pred_count] = s->current_picture.ref_index[0][4*(mb_xy-s->mb_stride)];
+ mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index - mot_stride*mot_step][0];
+ mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index - mot_stride*mot_step][1];
+ ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy-s->mb_stride)];
pred_count++;
}
if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]){
- mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index + mot_stride*mot_step][0];
- mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index + mot_stride*mot_step][1];
- ref [pred_count] = s->current_picture.ref_index[0][4*(mb_xy+s->mb_stride)];
+ mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index + mot_stride*mot_step][0];
+ mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index + mot_stride*mot_step][1];
+ ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy+s->mb_stride)];
pred_count++;
}
if(pred_count==0) continue;
ff_thread_await_progress((AVFrame *) s->last_picture_ptr,
mb_y, 0);
}
- if (!s->last_picture.motion_val[0] ||
- !s->last_picture.ref_index[0])
+ if (!s->last_picture.f.motion_val[0] ||
+ !s->last_picture.f.ref_index[0])
goto skip_last_mv;
- prev_x = s->last_picture.motion_val[0][mot_index][0];
- prev_y = s->last_picture.motion_val[0][mot_index][1];
- prev_ref = s->last_picture.ref_index[0][4*mb_xy];
+ prev_x = s->last_picture.f.motion_val[0][mot_index][0];
+ prev_y = s->last_picture.f.motion_val[0][mot_index][1];
+ prev_ref = s->last_picture.f.ref_index[0][4*mb_xy];
} else {
- prev_x = s->current_picture.motion_val[0][mot_index][0];
- prev_y = s->current_picture.motion_val[0][mot_index][1];
- prev_ref = s->current_picture.ref_index[0][4*mb_xy];
+ prev_x = s->current_picture.f.motion_val[0][mot_index][0];
+ prev_y = s->current_picture.f.motion_val[0][mot_index][1];
+ prev_ref = s->current_picture.f.ref_index[0][4*mb_xy];
}
/* last MV */
for(j=0; j<pred_count; j++){
int score=0;
- uint8_t *src= s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
+ uint8_t *src = s->current_picture.f.data[0] + mb_x*16 + mb_y*16*s->linesize;
- s->current_picture.motion_val[0][mot_index][0]= s->mv[0][0][0]= mv_predictor[j][0];
- s->current_picture.motion_val[0][mot_index][1]= s->mv[0][0][1]= mv_predictor[j][1];
+ s->current_picture.f.motion_val[0][mot_index][0] = s->mv[0][0][0] = mv_predictor[j][0];
+ s->current_picture.f.motion_val[0][mot_index][1] = s->mv[0][0][1] = mv_predictor[j][1];
if(ref[j]<0) //predictor intra or otherwise not available
continue;
for(i=0; i<mot_step; i++)
for(j=0; j<mot_step; j++){
- s->current_picture.motion_val[0][mot_index+i+j*mot_stride][0]= s->mv[0][0][0];
- s->current_picture.motion_val[0][mot_index+i+j*mot_stride][1]= s->mv[0][0][1];
+ s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
+ s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
}
decode_mb(s, ref[best_pred]);
static int is_intra_more_likely(MpegEncContext *s){
int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y;
- if(!s->last_picture_ptr || !s->last_picture_ptr->data[0]) return 1; //no previous frame available -> use spatial prediction
+ if (!s->last_picture_ptr || !s->last_picture_ptr->f.data[0]) return 1; //no previous frame available -> use spatial prediction
undamaged_count=0;
for(i=0; i<s->mb_num; i++){
if(s->codec_id == CODEC_ID_H264){
H264Context *h= (void*)s;
- if(h->ref_count[0] <= 0 || !h->ref_list[0][0].data[0])
+ if (h->ref_count[0] <= 0 || !h->ref_list[0][0].f.data[0])
return 1;
}
if((j%skip_amount) != 0) continue; //skip a few to speed things up
if(s->pict_type==AV_PICTURE_TYPE_I){
- uint8_t *mb_ptr = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
- uint8_t *last_mb_ptr= s->last_picture.data [0] + mb_x*16 + mb_y*16*s->linesize;
+ uint8_t *mb_ptr = s->current_picture.f.data[0] + mb_x*16 + mb_y*16*s->linesize;
+ uint8_t *last_mb_ptr= s->last_picture.f.data [0] + mb_x*16 + mb_y*16*s->linesize;
if (s->avctx->codec_id == CODEC_ID_H264) {
// FIXME
is_intra_likely += s->dsp.sad[0](NULL, last_mb_ptr, mb_ptr , s->linesize, 16);
is_intra_likely -= s->dsp.sad[0](NULL, last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize, 16);
}else{
- if(IS_INTRA(s->current_picture.mb_type[mb_xy]))
+ if (IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
is_intra_likely++;
else
is_intra_likely--;
s->picture_structure != PICT_FRAME || // we dont support ER of field pictures yet, though it should not crash if enabled
s->error_count==3*s->mb_width*(s->avctx->skip_top + s->avctx->skip_bottom)) return;
- if(s->current_picture.motion_val[0] == NULL){
+ if (s->current_picture.f.motion_val[0] == NULL) {
av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
for(i=0; i<2; i++){
- pic->ref_index[i]= av_mallocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
+ pic->f.ref_index[i] = av_mallocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
pic->motion_val_base[i]= av_mallocz((size+4) * 2 * sizeof(uint16_t));
- pic->motion_val[i]= pic->motion_val_base[i]+4;
+ pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
}
- pic->motion_subsample_log2= 3;
+ pic->f.motion_subsample_log2 = 3;
s->current_picture= *s->current_picture_ptr;
}
continue;
if(is_intra_likely)
- s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA4x4;
+ s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
else
- s->current_picture.mb_type[mb_xy]= MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.f.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
}
// change inter to intra blocks if no reference frames are available
- if (!s->last_picture.data[0] && !s->next_picture.data[0])
+ if (!s->last_picture.f.data[0] && !s->next_picture.f.data[0])
for(i=0; i<s->mb_num; i++){
const int mb_xy= s->mb_index2xy[i];
- if(!IS_INTRA(s->current_picture.mb_type[mb_xy]))
- s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA4x4;
+ if (!IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
+ s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
}
/* handle inter blocks with damaged AC */
for(mb_y=0; mb_y<s->mb_height; mb_y++){
for(mb_x=0; mb_x<s->mb_width; mb_x++){
const int mb_xy= mb_x + mb_y * s->mb_stride;
- const int mb_type= s->current_picture.mb_type[mb_xy];
- int dir = !s->last_picture.data[0];
+ const int mb_type= s->current_picture.f.mb_type[mb_xy];
+ int dir = !s->last_picture.f.data[0];
error= s->error_status_table[mb_xy];
if(IS_INTRA(mb_type)) continue; //intra
int j;
s->mv_type = MV_TYPE_8X8;
for(j=0; j<4; j++){
- s->mv[0][j][0] = s->current_picture.motion_val[dir][ mb_index + (j&1) + (j>>1)*s->b8_stride ][0];
- s->mv[0][j][1] = s->current_picture.motion_val[dir][ mb_index + (j&1) + (j>>1)*s->b8_stride ][1];
+ s->mv[0][j][0] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
+ s->mv[0][j][1] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
}
}else{
s->mv_type = MV_TYPE_16X16;
- s->mv[0][0][0] = s->current_picture.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][0];
- s->mv[0][0][1] = s->current_picture.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][1];
+ s->mv[0][0][0] = s->current_picture.f.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][0];
+ s->mv[0][0][1] = s->current_picture.f.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][1];
}
s->dsp.clear_blocks(s->block[0]);
for(mb_x=0; mb_x<s->mb_width; mb_x++){
int xy= mb_x*2 + mb_y*2*s->b8_stride;
const int mb_xy= mb_x + mb_y * s->mb_stride;
- const int mb_type= s->current_picture.mb_type[mb_xy];
+ const int mb_type= s->current_picture.f.mb_type[mb_xy];
error= s->error_status_table[mb_xy];
if(IS_INTRA(mb_type)) continue;
if(!(error&AC_ERROR)) continue; //undamaged inter
s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD;
- if(!s->last_picture.data[0]) s->mv_dir &= ~MV_DIR_FORWARD;
- if(!s->next_picture.data[0]) s->mv_dir &= ~MV_DIR_BACKWARD;
+ if(!s->last_picture.f.data[0]) s->mv_dir &= ~MV_DIR_FORWARD;
+ if(!s->next_picture.f.data[0]) s->mv_dir &= ~MV_DIR_BACKWARD;
s->mb_intra=0;
s->mv_type = MV_TYPE_16X16;
s->mb_skipped=0;
ff_thread_await_progress((AVFrame *) s->next_picture_ptr,
mb_y, 0);
}
- s->mv[0][0][0] = s->next_picture.motion_val[0][xy][0]*time_pb/time_pp;
- s->mv[0][0][1] = s->next_picture.motion_val[0][xy][1]*time_pb/time_pp;
- s->mv[1][0][0] = s->next_picture.motion_val[0][xy][0]*(time_pb - time_pp)/time_pp;
- s->mv[1][0][1] = s->next_picture.motion_val[0][xy][1]*(time_pb - time_pp)/time_pp;
+ s->mv[0][0][0] = s->next_picture.f.motion_val[0][xy][0] * time_pb / time_pp;
+ s->mv[0][0][1] = s->next_picture.f.motion_val[0][xy][1] * time_pb / time_pp;
+ s->mv[1][0][0] = s->next_picture.f.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
+ s->mv[1][0][1] = s->next_picture.f.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
}else{
s->mv[0][0][0]= 0;
s->mv[0][0][1]= 0;
int16_t *dc_ptr;
uint8_t *dest_y, *dest_cb, *dest_cr;
const int mb_xy= mb_x + mb_y * s->mb_stride;
- const int mb_type= s->current_picture.mb_type[mb_xy];
+ const int mb_type = s->current_picture.f.mb_type[mb_xy];
error= s->error_status_table[mb_xy];
if(IS_INTRA(mb_type) && s->partitioned_frame) continue;
// if(error&MV_ERROR) continue; //inter data damaged FIXME is this good?
- dest_y = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
- dest_cb= s->current_picture.data[1] + mb_x*8 + mb_y*8 *s->uvlinesize;
- dest_cr= s->current_picture.data[2] + mb_x*8 + mb_y*8 *s->uvlinesize;
+ dest_y = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
+ dest_cb = s->current_picture.f.data[1] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
+ dest_cr = s->current_picture.f.data[2] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
dc_ptr= &s->dc_val[0][mb_x*2 + mb_y*2*s->b8_stride];
for(n=0; n<4; n++){
for(mb_x=0; mb_x<s->mb_width; mb_x++){
uint8_t *dest_y, *dest_cb, *dest_cr;
const int mb_xy= mb_x + mb_y * s->mb_stride;
- const int mb_type= s->current_picture.mb_type[mb_xy];
+ const int mb_type = s->current_picture.f.mb_type[mb_xy];
error= s->error_status_table[mb_xy];
if(IS_INTER(mb_type)) continue;
if(!(error&AC_ERROR)) continue; //undamaged
- dest_y = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
- dest_cb= s->current_picture.data[1] + mb_x*8 + mb_y*8 *s->uvlinesize;
- dest_cr= s->current_picture.data[2] + mb_x*8 + mb_y*8 *s->uvlinesize;
+ dest_y = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
+ dest_cb = s->current_picture.f.data[1] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
+ dest_cr = s->current_picture.f.data[2] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
}
if(s->avctx->error_concealment&FF_EC_DEBLOCK){
/* filter horizontal block boundaries */
- h_block_filter(s, s->current_picture.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
- h_block_filter(s, s->current_picture.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
- h_block_filter(s, s->current_picture.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
+ h_block_filter(s, s->current_picture.f.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
+ h_block_filter(s, s->current_picture.f.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
+ h_block_filter(s, s->current_picture.f.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
/* filter vertical block boundaries */
- v_block_filter(s, s->current_picture.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
- v_block_filter(s, s->current_picture.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
- v_block_filter(s, s->current_picture.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
+ v_block_filter(s, s->current_picture.f.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
+ v_block_filter(s, s->current_picture.f.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
+ v_block_filter(s, s->current_picture.f.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
}
ec_clean:
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
- s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0;
s->mb_skipped = 1;
}
if(s->mb_intra){
- s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA;
goto intra;
}
//set motion vectors
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
- s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
s->mv[0][0][0] = h->current_mv_x * 2;//gets divided by 2 in motion compensation
s->mv[0][0][1] = h->current_mv_y * 2;
s->picture_number = (s->picture_number&~31) + i;
s->avctx->time_base= (AVRational){1001, 30000};
- s->current_picture.pts= s->picture_number;
+ s->current_picture.f.pts = s->picture_number;
/* PTYPE starts here */
}
//we need to set current_picture_ptr before reading the header, otherwise we cannot store anyting im there
- if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
+ if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
int i= ff_find_unused_picture(s, 0);
s->current_picture_ptr= &s->picture[i];
}
}
// for skipping the frame
- s->current_picture.pict_type= s->pict_type;
- s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
+ s->current_picture.f.pict_type = s->pict_type;
+ s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==AV_PICTURE_TYPE_B)
||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=AV_PICTURE_TYPE_I)
const int wrap = s->b8_stride;
const int xy = s->block_index[0];
- s->current_picture.mbskip_table[mb_xy]= s->mb_skipped;
+ s->current_picture.f.mbskip_table[mb_xy] = s->mb_skipped;
if(s->mv_type != MV_TYPE_8X8){
int motion_x, motion_y;
s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0];
s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1];
}
- s->current_picture.ref_index[0][4*mb_xy ]=
- s->current_picture.ref_index[0][4*mb_xy + 1]= s->field_select[0][0];
- s->current_picture.ref_index[0][4*mb_xy + 2]=
- s->current_picture.ref_index[0][4*mb_xy + 3]= s->field_select[0][1];
+ s->current_picture.f.ref_index[0][4*mb_xy ] =
+ s->current_picture.f.ref_index[0][4*mb_xy + 1] = s->field_select[0][0];
+ s->current_picture.f.ref_index[0][4*mb_xy + 2] =
+ s->current_picture.f.ref_index[0][4*mb_xy + 3] = s->field_select[0][1];
}
/* no update if 8X8 because it has been done during parsing */
- s->current_picture.motion_val[0][xy][0] = motion_x;
- s->current_picture.motion_val[0][xy][1] = motion_y;
- s->current_picture.motion_val[0][xy + 1][0] = motion_x;
- s->current_picture.motion_val[0][xy + 1][1] = motion_y;
- s->current_picture.motion_val[0][xy + wrap][0] = motion_x;
- s->current_picture.motion_val[0][xy + wrap][1] = motion_y;
- s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_x;
- s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_y;
+ s->current_picture.f.motion_val[0][xy][0] = motion_x;
+ s->current_picture.f.motion_val[0][xy][1] = motion_y;
+ s->current_picture.f.motion_val[0][xy + 1][0] = motion_x;
+ s->current_picture.f.motion_val[0][xy + 1][1] = motion_y;
+ s->current_picture.f.motion_val[0][xy + wrap][0] = motion_x;
+ s->current_picture.f.motion_val[0][xy + wrap][1] = motion_y;
+ s->current_picture.f.motion_val[0][xy + 1 + wrap][0] = motion_x;
+ s->current_picture.f.motion_val[0][xy + 1 + wrap][1] = motion_y;
}
if(s->encoding){ //FIXME encoding MUST be cleaned up
if (s->mv_type == MV_TYPE_8X8)
- s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_8x8;
+ s->current_picture.f.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8;
else if(s->mb_intra)
- s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA;
+ s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA;
else
- s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_16x16;
+ s->current_picture.f.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16;
}
}
Diag Top
Left Center
*/
- if(!IS_SKIP(s->current_picture.mb_type[xy])){
+ if (!IS_SKIP(s->current_picture.f.mb_type[xy])) {
qp_c= s->qscale;
s->dsp.h263_v_loop_filter(dest_y+8*linesize , linesize, qp_c);
s->dsp.h263_v_loop_filter(dest_y+8*linesize+8, linesize, qp_c);
if(s->mb_y){
int qp_dt, qp_tt, qp_tc;
- if(IS_SKIP(s->current_picture.mb_type[xy-s->mb_stride]))
+ if (IS_SKIP(s->current_picture.f.mb_type[xy - s->mb_stride]))
qp_tt=0;
else
- qp_tt= s->current_picture.qscale_table[xy-s->mb_stride];
+ qp_tt = s->current_picture.f.qscale_table[xy - s->mb_stride];
if(qp_c)
qp_tc= qp_c;
s->dsp.h263_h_loop_filter(dest_y-8*linesize+8 , linesize, qp_tt);
if(s->mb_x){
- if(qp_tt || IS_SKIP(s->current_picture.mb_type[xy-1-s->mb_stride]))
+ if (qp_tt || IS_SKIP(s->current_picture.f.mb_type[xy - 1 - s->mb_stride]))
qp_dt= qp_tt;
else
- qp_dt= s->current_picture.qscale_table[xy-1-s->mb_stride];
+ qp_dt = s->current_picture.f.qscale_table[xy - 1 - s->mb_stride];
if(qp_dt){
const int chroma_qp= s->chroma_qscale_table[qp_dt];
if(s->mb_x){
int qp_lc;
- if(qp_c || IS_SKIP(s->current_picture.mb_type[xy-1]))
+ if (qp_c || IS_SKIP(s->current_picture.f.mb_type[xy - 1]))
qp_lc= qp_c;
else
- qp_lc= s->current_picture.qscale_table[xy-1];
+ qp_lc = s->current_picture.f.qscale_table[xy - 1];
if(qp_lc){
s->dsp.h263_h_loop_filter(dest_y, linesize, qp_lc);
static const int off[4]= {2, 1, 1, -1};
wrap = s->b8_stride;
- mot_val = s->current_picture.motion_val[dir] + s->block_index[block];
+ mot_val = s->current_picture.f.motion_val[dir] + s->block_index[block];
A = mot_val[ - 1];
/* special case for first (slice) line */
/* We need to set current_picture_ptr before reading the header,
* otherwise we cannot store anyting in there */
- if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
+ if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
int i= ff_find_unused_picture(s, 0);
s->current_picture_ptr= &s->picture[i];
}
s->gob_index = ff_h263_get_gob_height(s);
// for skipping the frame
- s->current_picture.pict_type= s->pict_type;
- s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
+ s->current_picture.f.pict_type = s->pict_type;
+ s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
/* skip B-frames if we don't have reference frames */
if(s->last_picture_ptr==NULL && (s->pict_type==AV_PICTURE_TYPE_B || s->dropable)) return get_consumed_bytes(s, buf_size);
// Error resilience puts the current picture in the ref list.
// Don't try to wait on these as it will cause a deadlock.
// Fields can wait on each other, though.
- if(ref->thread_opaque != s->current_picture.thread_opaque ||
- (ref->reference&3) != s->picture_structure) {
+ if (ref->f.thread_opaque != s->current_picture.f.thread_opaque ||
+ (ref->f.reference & 3) != s->picture_structure) {
my = get_lowest_part_list_y(h, ref, n, height, y_offset, 0);
if (refs[0][ref_n] < 0) nrefs[0] += 1;
refs[0][ref_n] = FFMAX(refs[0][ref_n], my);
int ref_n = h->ref_cache[1][ scan8[n] ];
Picture *ref= &h->ref_list[1][ref_n];
- if(ref->thread_opaque != s->current_picture.thread_opaque ||
- (ref->reference&3) != s->picture_structure) {
+ if (ref->f.thread_opaque != s->current_picture.f.thread_opaque ||
+ (ref->f.reference & 3) != s->picture_structure) {
my = get_lowest_part_list_y(h, ref, n, height, y_offset, 1);
if (refs[1][ref_n] < 0) nrefs[1] += 1;
refs[1][ref_n] = FFMAX(refs[1][ref_n], my);
static void await_references(H264Context *h){
MpegEncContext * const s = &h->s;
const int mb_xy= h->mb_xy;
- const int mb_type= s->current_picture.mb_type[mb_xy];
+ const int mb_type = s->current_picture.f.mb_type[mb_xy];
int refs[2][48];
int nrefs[2] = {0};
int ref, list;
int row = refs[list][ref];
if(row >= 0){
Picture *ref_pic = &h->ref_list[list][ref];
- int ref_field = ref_pic->reference - 1;
+ int ref_field = ref_pic->f.reference - 1;
int ref_field_picture = ref_pic->field_picture;
int pic_height = 16*s->mb_height >> ref_field_picture;
int my= h->mv_cache[list][ scan8[n] ][1] + src_y_offset*8;
const int luma_xy= (mx&3) + ((my&3)<<2);
int offset = ((mx>>2) << pixel_shift) + (my>>2)*h->mb_linesize;
- uint8_t * src_y = pic->data[0] + offset;
+ uint8_t * src_y = pic->f.data[0] + offset;
uint8_t * src_cb, * src_cr;
int extra_width= h->emu_edge_width;
int extra_height= h->emu_edge_height;
if(CONFIG_GRAY && s->flags&CODEC_FLAG_GRAY) return;
if(chroma444){
- src_cb = pic->data[1] + offset;
+ src_cb = pic->f.data[1] + offset;
if(emu){
s->dsp.emulated_edge_mc(s->edge_emu_buffer, src_cb - (2 << pixel_shift) - 2*h->mb_linesize, h->mb_linesize,
16+5, 16+5/*FIXME*/, full_mx-2, full_my-2, pic_width, pic_height);
qpix_op[luma_xy](dest_cb + delta, src_cb + delta, h->mb_linesize);
}
- src_cr = pic->data[2] + offset;
+ src_cr = pic->f.data[2] + offset;
if(emu){
s->dsp.emulated_edge_mc(s->edge_emu_buffer, src_cr - (2 << pixel_shift) - 2*h->mb_linesize, h->mb_linesize,
16+5, 16+5/*FIXME*/, full_mx-2, full_my-2, pic_width, pic_height);
if(MB_FIELD){
// chroma offset when predicting from a field of opposite parity
- my += 2 * ((s->mb_y & 1) - (pic->reference - 1));
+ my += 2 * ((s->mb_y & 1) - (pic->f.reference - 1));
emu |= (my>>3) < 0 || (my>>3) + 8 >= (pic_height>>1);
}
- src_cb= pic->data[1] + ((mx>>3) << pixel_shift) + (my>>3)*h->mb_uvlinesize;
- src_cr= pic->data[2] + ((mx>>3) << pixel_shift) + (my>>3)*h->mb_uvlinesize;
+ src_cb = pic->f.data[1] + ((mx >> 3) << pixel_shift) + (my >> 3) * h->mb_uvlinesize;
+ src_cr = pic->f.data[2] + ((mx >> 3) << pixel_shift) + (my >> 3) * h->mb_uvlinesize;
if(emu){
s->dsp.emulated_edge_mc(s->edge_emu_buffer, src_cb, h->mb_uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1);
if(refn >= 0){
const int mx= (h->mv_cache[list][scan8[0]][0]>>2) + 16*s->mb_x + 8;
const int my= (h->mv_cache[list][scan8[0]][1]>>2) + 16*s->mb_y;
- uint8_t **src= h->ref_list[list][refn].data;
+ uint8_t **src = h->ref_list[list][refn].f.data;
int off= (mx << pixel_shift) + (my + (s->mb_x&3)*4)*h->mb_linesize + (64 << pixel_shift);
s->dsp.prefetch(src[0]+off, s->linesize, 4);
if(chroma444){
int pixel_shift, int chroma444){
MpegEncContext * const s = &h->s;
const int mb_xy= h->mb_xy;
- const int mb_type= s->current_picture.mb_type[mb_xy];
+ const int mb_type = s->current_picture.f.mb_type[mb_xy];
assert(IS_INTER(mb_type));
* Zero here; IDR markings per slice in frame or fields are ORed in later.
* See decode_nal_units().
*/
- s->current_picture_ptr->key_frame= 0;
+ s->current_picture_ptr->f.key_frame = 0;
s->current_picture_ptr->mmco_reset= 0;
assert(s->linesize && s->uvlinesize);
/* some macroblocks can be accessed before they're available in case of lost slices, mbaff or threading*/
memset(h->slice_table, -1, (s->mb_height*s->mb_stride-1) * sizeof(*h->slice_table));
-// s->decode= (s->flags&CODEC_FLAG_PSNR) || !s->encoding || s->current_picture.reference /*|| h->contains_intra*/ || 1;
+// s->decode = (s->flags & CODEC_FLAG_PSNR) || !s->encoding || s->current_picture.f.reference /*|| h->contains_intra*/ || 1;
// We mark the current picture as non-reference after allocating it, so
// that if we break out due to an error it can be released automatically
// get released even with set reference, besides SVQ3 and others do not
// mark frames as reference later "naturally".
if(s->codec_id != CODEC_ID_SVQ3)
- s->current_picture_ptr->reference= 0;
+ s->current_picture_ptr->f.reference = 0;
s->current_picture_ptr->field_poc[0]=
s->current_picture_ptr->field_poc[1]= INT_MAX;
Picture *cur = s->current_picture_ptr;
int i, pics, out_of_order, out_idx;
- s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_H264;
- s->current_picture_ptr->pict_type= s->pict_type;
+ s->current_picture_ptr->f.qscale_type = FF_QSCALE_TYPE_H264;
+ s->current_picture_ptr->f.pict_type = s->pict_type;
if (h->next_output_pic) return;
return;
}
- cur->interlaced_frame = 0;
- cur->repeat_pict = 0;
+ cur->f.interlaced_frame = 0;
+ cur->f.repeat_pict = 0;
/* Signal interlacing information externally. */
/* Prioritize picture timing SEI information over used decoding process if it exists. */
break;
case SEI_PIC_STRUCT_TOP_FIELD:
case SEI_PIC_STRUCT_BOTTOM_FIELD:
- cur->interlaced_frame = 1;
+ cur->f.interlaced_frame = 1;
break;
case SEI_PIC_STRUCT_TOP_BOTTOM:
case SEI_PIC_STRUCT_BOTTOM_TOP:
if (FIELD_OR_MBAFF_PICTURE)
- cur->interlaced_frame = 1;
+ cur->f.interlaced_frame = 1;
else
// try to flag soft telecine progressive
- cur->interlaced_frame = h->prev_interlaced_frame;
+ cur->f.interlaced_frame = h->prev_interlaced_frame;
break;
case SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM:
// Signal the possibility of telecined film externally (pic_struct 5,6)
// From these hints, let the applications decide if they apply deinterlacing.
- cur->repeat_pict = 1;
+ cur->f.repeat_pict = 1;
break;
case SEI_PIC_STRUCT_FRAME_DOUBLING:
// Force progressive here, as doubling interlaced frame is a bad idea.
- cur->repeat_pict = 2;
+ cur->f.repeat_pict = 2;
break;
case SEI_PIC_STRUCT_FRAME_TRIPLING:
- cur->repeat_pict = 4;
+ cur->f.repeat_pict = 4;
break;
}
if ((h->sei_ct_type & 3) && h->sei_pic_struct <= SEI_PIC_STRUCT_BOTTOM_TOP)
- cur->interlaced_frame = (h->sei_ct_type & (1<<1)) != 0;
+ cur->f.interlaced_frame = (h->sei_ct_type & (1 << 1)) != 0;
}else{
/* Derive interlacing flag from used decoding process. */
- cur->interlaced_frame = FIELD_OR_MBAFF_PICTURE;
+ cur->f.interlaced_frame = FIELD_OR_MBAFF_PICTURE;
}
- h->prev_interlaced_frame = cur->interlaced_frame;
+ h->prev_interlaced_frame = cur->f.interlaced_frame;
if (cur->field_poc[0] != cur->field_poc[1]){
/* Derive top_field_first from field pocs. */
- cur->top_field_first = cur->field_poc[0] < cur->field_poc[1];
+ cur->f.top_field_first = cur->field_poc[0] < cur->field_poc[1];
}else{
- if(cur->interlaced_frame || h->sps.pic_struct_present_flag){
+ if (cur->f.interlaced_frame || h->sps.pic_struct_present_flag) {
/* Use picture timing SEI information. Even if it is a information of a past frame, better than nothing. */
if(h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM
|| h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
- cur->top_field_first = 1;
+ cur->f.top_field_first = 1;
else
- cur->top_field_first = 0;
+ cur->f.top_field_first = 0;
}else{
/* Most likely progressive */
- cur->top_field_first = 0;
+ cur->f.top_field_first = 0;
}
}
assert(pics <= MAX_DELAYED_PIC_COUNT);
h->delayed_pic[pics++] = cur;
- if(cur->reference == 0)
- cur->reference = DELAYED_PIC_REF;
+ if (cur->f.reference == 0)
+ cur->f.reference = DELAYED_PIC_REF;
out = h->delayed_pic[0];
out_idx = 0;
- for(i=1; h->delayed_pic[i] && !h->delayed_pic[i]->key_frame && !h->delayed_pic[i]->mmco_reset; i++)
+ for (i = 1; h->delayed_pic[i] && !h->delayed_pic[i]->f.key_frame && !h->delayed_pic[i]->mmco_reset; i++)
if(h->delayed_pic[i]->poc < out->poc){
out = h->delayed_pic[i];
out_idx = i;
}
- if(s->avctx->has_b_frames == 0 && (h->delayed_pic[0]->key_frame || h->delayed_pic[0]->mmco_reset))
+ if (s->avctx->has_b_frames == 0 && (h->delayed_pic[0]->f.key_frame || h->delayed_pic[0]->mmco_reset))
h->next_outputed_poc= INT_MIN;
out_of_order = out->poc < h->next_outputed_poc;
else if((out_of_order && pics-1 == s->avctx->has_b_frames && s->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT)
|| (s->low_delay &&
((h->next_outputed_poc != INT_MIN && out->poc > h->next_outputed_poc + 2)
- || cur->pict_type == AV_PICTURE_TYPE_B)))
+ || cur->f.pict_type == AV_PICTURE_TYPE_B)))
{
s->low_delay = 0;
s->avctx->has_b_frames++;
}
if(out_of_order || pics > s->avctx->has_b_frames){
- out->reference &= ~DELAYED_PIC_REF;
+ out->f.reference &= ~DELAYED_PIC_REF;
out->owner2 = s; // for frame threading, the owner must be the second field's thread
// or else the first thread can release the picture and reuse it unsafely
for(i=out_idx; h->delayed_pic[i]; i++)
}
if(!out_of_order && pics > s->avctx->has_b_frames){
h->next_output_pic = out;
- if(out_idx==0 && h->delayed_pic[0] && (h->delayed_pic[0]->key_frame || h->delayed_pic[0]->mmco_reset)) {
+ if (out_idx == 0 && h->delayed_pic[0] && (h->delayed_pic[0]->f.key_frame || h->delayed_pic[0]->mmco_reset)) {
h->next_outputed_poc = INT_MIN;
} else
h->next_outputed_poc = out->poc;
const int mb_x= s->mb_x;
const int mb_y= s->mb_y;
const int mb_xy= h->mb_xy;
- const int mb_type= s->current_picture.mb_type[mb_xy];
+ const int mb_type = s->current_picture.f.mb_type[mb_xy];
uint8_t *dest_y, *dest_cb, *dest_cr;
int linesize, uvlinesize /*dct_offset*/;
int i, j;
const int is_h264 = !CONFIG_SVQ3_DECODER || simple || s->codec_id == CODEC_ID_H264;
void (*idct_add)(uint8_t *dst, DCTELEM *block, int stride);
- dest_y = s->current_picture.data[0] + ((mb_x << pixel_shift) + mb_y * s->linesize ) * 16;
- dest_cb = s->current_picture.data[1] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * 8;
- dest_cr = s->current_picture.data[2] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * 8;
+ dest_y = s->current_picture.f.data[0] + ((mb_x << pixel_shift) + mb_y * s->linesize ) * 16;
+ dest_cb = s->current_picture.f.data[1] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * 8;
+ dest_cr = s->current_picture.f.data[2] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * 8;
s->dsp.prefetch(dest_y + (s->mb_x&3)*4*s->linesize + (64 << pixel_shift), s->linesize, 4);
s->dsp.prefetch(dest_cb + (s->mb_x&7)*s->uvlinesize + (64 << pixel_shift), dest_cr - dest_cb, 2);
const int mb_x= s->mb_x;
const int mb_y= s->mb_y;
const int mb_xy= h->mb_xy;
- const int mb_type= s->current_picture.mb_type[mb_xy];
+ const int mb_type = s->current_picture.f.mb_type[mb_xy];
uint8_t *dest[3];
int linesize;
int i, j, p;
for (p = 0; p < plane_count; p++)
{
- dest[p] = s->current_picture.data[p] + ((mb_x << pixel_shift) + mb_y * s->linesize) * 16;
+ dest[p] = s->current_picture.f.data[p] + ((mb_x << pixel_shift) + mb_y * s->linesize) * 16;
s->dsp.prefetch(dest[p] + (s->mb_x&3)*4*s->linesize + (64 << pixel_shift), s->linesize, 4);
}
void ff_h264_hl_decode_mb(H264Context *h){
MpegEncContext * const s = &h->s;
const int mb_xy= h->mb_xy;
- const int mb_type= s->current_picture.mb_type[mb_xy];
+ const int mb_type = s->current_picture.f.mb_type[mb_xy];
int is_complex = CONFIG_SMALL || h->is_complex || IS_INTRA_PCM(mb_type) || s->qscale == 0;
if (CHROMA444) {
int i;
for(i=0; i<MAX_DELAYED_PIC_COUNT; i++) {
if(h->delayed_pic[i])
- h->delayed_pic[i]->reference= 0;
+ h->delayed_pic[i]->f.reference = 0;
h->delayed_pic[i]= NULL;
}
h->outputed_poc=h->next_outputed_poc= INT_MIN;
h->prev_interlaced_frame = 1;
idr(h);
if(h->s.current_picture_ptr)
- h->s.current_picture_ptr->reference= 0;
+ h->s.current_picture_ptr->f.reference = 0;
h->s.first_field= 0;
ff_h264_reset_sei(h);
ff_mpeg_flush(avctx);
* be fixed. */
if (h->short_ref_count) {
if (prev) {
- av_image_copy(h->short_ref[0]->data, h->short_ref[0]->linesize,
- (const uint8_t**)prev->data, prev->linesize,
+ av_image_copy(h->short_ref[0]->f.data, h->short_ref[0]->f.linesize,
+ (const uint8_t**)prev->f.data, prev->f.linesize,
s->avctx->pix_fmt, s->mb_width*16, s->mb_height*16);
h->short_ref[0]->poc = prev->poc+2;
}
/* See if we have a decoded first field looking for a pair... */
if (s0->first_field) {
assert(s0->current_picture_ptr);
- assert(s0->current_picture_ptr->data[0]);
+ assert(s0->current_picture_ptr->f.data[0]);
assert(s0->current_picture_ptr->reference != DELAYED_PIC_REF);
/* figure out if we have a complementary field pair */
} else {
if (h->nal_ref_idc &&
- s0->current_picture_ptr->reference &&
+ s0->current_picture_ptr->f.reference &&
s0->current_picture_ptr->frame_num != h->frame_num) {
/*
* This and previous field were reference, but had
int *ref2frm= h->ref2frm[h->slice_num&(MAX_SLICES-1)][j];
for(i=0; i<16; i++){
id_list[i]= 60;
- if(h->ref_list[j][i].data[0]){
+ if (h->ref_list[j][i].f.data[0]) {
int k;
- uint8_t *base= h->ref_list[j][i].base[0];
+ uint8_t *base = h->ref_list[j][i].f.base[0];
for(k=0; k<h->short_ref_count; k++)
- if(h->short_ref[k]->base[0] == base){
+ if (h->short_ref[k]->f.base[0] == base) {
id_list[i]= k;
break;
}
for(k=0; k<h->long_ref_count; k++)
- if(h->long_ref[k] && h->long_ref[k]->base[0] == base){
+ if (h->long_ref[k] && h->long_ref[k]->f.base[0] == base) {
id_list[i]= h->short_ref_count + k;
break;
}
ref2frm[1]= -1;
for(i=0; i<16; i++)
ref2frm[i+2]= 4*id_list[i]
- +(h->ref_list[j][i].reference&3);
+ + (h->ref_list[j][i].f.reference & 3);
ref2frm[18+0]=
ref2frm[18+1]= -1;
for(i=16; i<48; i++)
ref2frm[i+4]= 4*id_list[(i-16)>>1]
- +(h->ref_list[j][i].reference&3);
+ + (h->ref_list[j][i].f.reference & 3);
}
//FIXME: fix draw_edges+PAFF+frame threads
const int b_xy= h->mb2b_xy[top_xy] + 3*b_stride;
const int b8_xy= 4*top_xy + 2;
int (*ref2frm)[64] = h->ref2frm[ h->slice_table[top_xy]&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2);
- AV_COPY128(mv_dst - 1*8, s->current_picture.motion_val[list][b_xy + 0]);
+ AV_COPY128(mv_dst - 1*8, s->current_picture.f.motion_val[list][b_xy + 0]);
ref_cache[0 - 1*8]=
- ref_cache[1 - 1*8]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + 0]];
+ ref_cache[1 - 1*8]= ref2frm[list][s->current_picture.f.ref_index[list][b8_xy + 0]];
ref_cache[2 - 1*8]=
- ref_cache[3 - 1*8]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + 1]];
+ ref_cache[3 - 1*8]= ref2frm[list][s->current_picture.f.ref_index[list][b8_xy + 1]];
}else{
AV_ZERO128(mv_dst - 1*8);
AV_WN32A(&ref_cache[0 - 1*8], ((LIST_NOT_USED)&0xFF)*0x01010101u);
const int b_xy= h->mb2b_xy[left_xy[LTOP]] + 3;
const int b8_xy= 4*left_xy[LTOP] + 1;
int (*ref2frm)[64] = h->ref2frm[ h->slice_table[left_xy[LTOP]]&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2);
- AV_COPY32(mv_dst - 1 + 0, s->current_picture.motion_val[list][b_xy + b_stride*0]);
- AV_COPY32(mv_dst - 1 + 8, s->current_picture.motion_val[list][b_xy + b_stride*1]);
- AV_COPY32(mv_dst - 1 +16, s->current_picture.motion_val[list][b_xy + b_stride*2]);
- AV_COPY32(mv_dst - 1 +24, s->current_picture.motion_val[list][b_xy + b_stride*3]);
+ AV_COPY32(mv_dst - 1 + 0, s->current_picture.f.motion_val[list][b_xy + b_stride*0]);
+ AV_COPY32(mv_dst - 1 + 8, s->current_picture.f.motion_val[list][b_xy + b_stride*1]);
+ AV_COPY32(mv_dst - 1 + 16, s->current_picture.f.motion_val[list][b_xy + b_stride*2]);
+ AV_COPY32(mv_dst - 1 + 24, s->current_picture.f.motion_val[list][b_xy + b_stride*3]);
ref_cache[-1 + 0]=
- ref_cache[-1 + 8]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + 2*0]];
+ ref_cache[-1 + 8]= ref2frm[list][s->current_picture.f.ref_index[list][b8_xy + 2*0]];
ref_cache[-1 + 16]=
- ref_cache[-1 + 24]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + 2*1]];
+ ref_cache[-1 + 24]= ref2frm[list][s->current_picture.f.ref_index[list][b8_xy + 2*1]];
}else{
AV_ZERO32(mv_dst - 1 + 0);
AV_ZERO32(mv_dst - 1 + 8);
}
{
- int8_t *ref = &s->current_picture.ref_index[list][4*mb_xy];
+ int8_t *ref = &s->current_picture.f.ref_index[list][4*mb_xy];
int (*ref2frm)[64] = h->ref2frm[ h->slice_num&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2);
uint32_t ref01 = (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101;
uint32_t ref23 = (pack16to32(ref2frm[list][ref[2]],ref2frm[list][ref[3]])&0x00FF00FF)*0x0101;
}
{
- int16_t (*mv_src)[2] = &s->current_picture.motion_val[list][4*s->mb_x + 4*s->mb_y*b_stride];
+ int16_t (*mv_src)[2] = &s->current_picture.f.motion_val[list][4*s->mb_x + 4*s->mb_y*b_stride];
AV_COPY128(mv_dst + 8*0, mv_src + 0*b_stride);
AV_COPY128(mv_dst + 8*1, mv_src + 1*b_stride);
AV_COPY128(mv_dst + 8*2, mv_src + 2*b_stride);
left_xy[LBOT] = left_xy[LTOP] = mb_xy-1;
if(FRAME_MBAFF){
- const int left_mb_field_flag = IS_INTERLACED(s->current_picture.mb_type[mb_xy-1]);
+ const int left_mb_field_flag = IS_INTERLACED(s->current_picture.f.mb_type[mb_xy - 1]);
const int curr_mb_field_flag = IS_INTERLACED(mb_type);
if(s->mb_y&1){
if (left_mb_field_flag != curr_mb_field_flag) {
}
}else{
if(curr_mb_field_flag){
- top_xy += s->mb_stride & (((s->current_picture.mb_type[top_xy ]>>7)&1)-1);
+ top_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy] >> 7) & 1) - 1);
}
if (left_mb_field_flag != curr_mb_field_flag) {
left_xy[LBOT] += s->mb_stride;
//for sufficiently low qp, filtering wouldn't do anything
//this is a conservative estimate: could also check beta_offset and more accurate chroma_qp
int qp_thresh = h->qp_thresh; //FIXME strictly we should store qp_thresh for each mb of a slice
- int qp = s->current_picture.qscale_table[mb_xy];
+ int qp = s->current_picture.f.qscale_table[mb_xy];
if(qp <= qp_thresh
- && (left_xy[LTOP]<0 || ((qp + s->current_picture.qscale_table[left_xy[LTOP]] + 1)>>1) <= qp_thresh)
- && (top_xy <0 || ((qp + s->current_picture.qscale_table[top_xy ] + 1)>>1) <= qp_thresh)){
+ && (left_xy[LTOP] < 0 || ((qp + s->current_picture.f.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh)
+ && (top_xy < 0 || ((qp + s->current_picture.f.qscale_table[top_xy ] + 1) >> 1) <= qp_thresh)) {
if(!FRAME_MBAFF)
return 1;
- if( (left_xy[LTOP]< 0 || ((qp + s->current_picture.qscale_table[left_xy[LBOT] ] + 1)>>1) <= qp_thresh)
- && (top_xy < s->mb_stride || ((qp + s->current_picture.qscale_table[top_xy -s->mb_stride] + 1)>>1) <= qp_thresh))
+ if ((left_xy[LTOP] < 0 || ((qp + s->current_picture.f.qscale_table[left_xy[LBOT] ] + 1) >> 1) <= qp_thresh) &&
+ (top_xy < s->mb_stride || ((qp + s->current_picture.f.qscale_table[top_xy - s->mb_stride] + 1) >> 1) <= qp_thresh))
return 1;
}
}
- top_type = s->current_picture.mb_type[top_xy];
- left_type[LTOP] = s->current_picture.mb_type[left_xy[LTOP]];
- left_type[LBOT] = s->current_picture.mb_type[left_xy[LBOT]];
+ top_type = s->current_picture.f.mb_type[top_xy];
+ left_type[LTOP] = s->current_picture.f.mb_type[left_xy[LTOP]];
+ left_type[LBOT] = s->current_picture.f.mb_type[left_xy[LBOT]];
if(h->deblocking_filter == 2){
if(h->slice_table[top_xy ] != h->slice_num) top_type= 0;
if(h->slice_table[left_xy[LBOT]] != h->slice_num) left_type[LTOP]= left_type[LBOT]= 0;
int mb_xy, mb_type;
mb_xy = h->mb_xy = mb_x + mb_y*s->mb_stride;
h->slice_num= h->slice_table[mb_xy];
- mb_type= s->current_picture.mb_type[mb_xy];
+ mb_type = s->current_picture.f.mb_type[mb_xy];
h->list_count= h->list_counts[mb_xy];
if(FRAME_MBAFF)
s->mb_x= mb_x;
s->mb_y= mb_y;
- dest_y = s->current_picture.data[0] + ((mb_x << pixel_shift) + mb_y * s->linesize ) * 16;
- dest_cb = s->current_picture.data[1] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * (8 << CHROMA444);
- dest_cr = s->current_picture.data[2] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * (8 << CHROMA444);
+ dest_y = s->current_picture.f.data[0] + ((mb_x << pixel_shift) + mb_y * s->linesize ) * 16;
+ dest_cb = s->current_picture.f.data[1] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * (8 << CHROMA444);
+ dest_cr = s->current_picture.f.data[2] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * (8 << CHROMA444);
//FIXME simplify above
if (MB_FIELD) {
backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, CHROMA444, 0);
if(fill_filter_caches(h, mb_type))
continue;
- h->chroma_qp[0] = get_chroma_qp(h, 0, s->current_picture.qscale_table[mb_xy]);
- h->chroma_qp[1] = get_chroma_qp(h, 1, s->current_picture.qscale_table[mb_xy]);
+ h->chroma_qp[0] = get_chroma_qp(h, 0, s->current_picture.f.qscale_table[mb_xy]);
+ h->chroma_qp[1] = get_chroma_qp(h, 1, s->current_picture.f.qscale_table[mb_xy]);
if (FRAME_MBAFF) {
ff_h264_filter_mb (h, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize);
MpegEncContext * const s = &h->s;
const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
int mb_type = (h->slice_table[mb_xy-1] == h->slice_num)
- ? s->current_picture.mb_type[mb_xy-1]
+ ? s->current_picture.f.mb_type[mb_xy - 1]
: (h->slice_table[mb_xy-s->mb_stride] == h->slice_num)
- ? s->current_picture.mb_type[mb_xy-s->mb_stride]
+ ? s->current_picture.f.mb_type[mb_xy - s->mb_stride]
: 0;
h->mb_mbaff = h->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
}
if((err = decode_slice_header(hx, h)))
break;
- s->current_picture_ptr->key_frame |=
+ s->current_picture_ptr->f.key_frame |=
(hx->nal_unit_type == NAL_IDR_SLICE) ||
(h->sei_recovery_frame_cnt >= 0);
//FIXME factorize this with the output code below
out = h->delayed_pic[0];
out_idx = 0;
- for(i=1; h->delayed_pic[i] && !h->delayed_pic[i]->key_frame && !h->delayed_pic[i]->mmco_reset; i++)
+ for (i = 1; h->delayed_pic[i] && !h->delayed_pic[i]->f.key_frame && !h->delayed_pic[i]->mmco_reset; i++)
if(h->delayed_pic[i]->poc < out->poc){
out = h->delayed_pic[i];
out_idx = i;
left_xy[LBOT] = left_xy[LTOP] = mb_xy-1;
h->left_block = left_block_options[0];
if(FRAME_MBAFF){
- const int left_mb_field_flag = IS_INTERLACED(s->current_picture.mb_type[mb_xy-1]);
- const int curr_mb_field_flag = IS_INTERLACED(mb_type);
+ const int left_mb_field_flag = IS_INTERLACED(s->current_picture.f.mb_type[mb_xy - 1]);
+ const int curr_mb_field_flag = IS_INTERLACED(mb_type);
if(s->mb_y&1){
if (left_mb_field_flag != curr_mb_field_flag) {
left_xy[LBOT] = left_xy[LTOP] = mb_xy - s->mb_stride - 1;
}
}else{
if(curr_mb_field_flag){
- topleft_xy += s->mb_stride & (((s->current_picture.mb_type[top_xy - 1]>>7)&1)-1);
- topright_xy += s->mb_stride & (((s->current_picture.mb_type[top_xy + 1]>>7)&1)-1);
- top_xy += s->mb_stride & (((s->current_picture.mb_type[top_xy ]>>7)&1)-1);
+ topleft_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy - 1] >> 7) & 1) - 1);
+ topright_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy + 1] >> 7) & 1) - 1);
+ top_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy ] >> 7) & 1) - 1);
}
if (left_mb_field_flag != curr_mb_field_flag) {
if (curr_mb_field_flag) {
h->left_mb_xy[LBOT] = left_xy[LBOT];
//FIXME do we need all in the context?
- h->topleft_type = s->current_picture.mb_type[topleft_xy] ;
- h->top_type = s->current_picture.mb_type[top_xy] ;
- h->topright_type= s->current_picture.mb_type[topright_xy];
- h->left_type[LTOP] = s->current_picture.mb_type[left_xy[LTOP]] ;
- h->left_type[LBOT] = s->current_picture.mb_type[left_xy[LBOT]] ;
+ h->topleft_type = s->current_picture.f.mb_type[topleft_xy];
+ h->top_type = s->current_picture.f.mb_type[top_xy];
+ h->topright_type = s->current_picture.f.mb_type[topright_xy];
+ h->left_type[LTOP] = s->current_picture.f.mb_type[left_xy[LTOP]];
+ h->left_type[LBOT] = s->current_picture.f.mb_type[left_xy[LBOT]];
if(FMO){
if(h->slice_table[topleft_xy ] != h->slice_num) h->topleft_type = 0;
h->left_samples_available&= 0xFF5F;
}
}else{
- int left_typei = s->current_picture.mb_type[left_xy[LTOP] + s->mb_stride];
+ int left_typei = s->current_picture.f.mb_type[left_xy[LTOP] + s->mb_stride];
assert(left_xy[LTOP] == left_xy[LBOT]);
if(!((left_typei & type_mask) && (left_type[LTOP] & type_mask))){
int b_stride = h->b_stride;
for(list=0; list<h->list_count; list++){
int8_t *ref_cache = &h->ref_cache[list][scan8[0]];
- int8_t *ref = s->current_picture.ref_index[list];
+ int8_t *ref = s->current_picture.f.ref_index[list];
int16_t (*mv_cache)[2] = &h->mv_cache[list][scan8[0]];
- int16_t (*mv)[2] = s->current_picture.motion_val[list];
+ int16_t (*mv)[2] = s->current_picture.f.motion_val[list];
if(!USES_LIST(mb_type, list)){
continue;
}
static av_always_inline void write_back_motion_list(H264Context *h, MpegEncContext * const s, int b_stride,
int b_xy, int b8_xy, int mb_type, int list )
{
- int16_t (*mv_dst)[2] = &s->current_picture.motion_val[list][b_xy];
+ int16_t (*mv_dst)[2] = &s->current_picture.f.motion_val[list][b_xy];
int16_t (*mv_src)[2] = &h->mv_cache[list][scan8[0]];
AV_COPY128(mv_dst + 0*b_stride, mv_src + 8*0);
AV_COPY128(mv_dst + 1*b_stride, mv_src + 8*1);
}
{
- int8_t *ref_index = &s->current_picture.ref_index[list][b8_xy];
+ int8_t *ref_index = &s->current_picture.f.ref_index[list][b8_xy];
int8_t *ref_cache = h->ref_cache[list];
ref_index[0+0*2]= ref_cache[scan8[0]];
ref_index[1+0*2]= ref_cache[scan8[4]];
if(USES_LIST(mb_type, 0)){
write_back_motion_list(h, s, b_stride, b_xy, b8_xy, mb_type, 0);
}else{
- fill_rectangle(&s->current_picture.ref_index[0][b8_xy], 2, 2, 2, (uint8_t)LIST_NOT_USED, 1);
+ fill_rectangle(&s->current_picture.f.ref_index[0][b8_xy],
+ 2, 2, 2, (uint8_t)LIST_NOT_USED, 1);
}
if(USES_LIST(mb_type, 1)){
write_back_motion_list(h, s, b_stride, b_xy, b8_xy, mb_type, 1);
}
write_back_motion(h, mb_type);
- s->current_picture.mb_type[mb_xy]= mb_type;
- s->current_picture.qscale_table[mb_xy]= s->qscale;
+ s->current_picture.f.mb_type[mb_xy] = mb_type;
+ s->current_picture.f.qscale_table[mb_xy] = s->qscale;
h->slice_table[ mb_xy ]= h->slice_num;
h->prev_mb_skipped= 1;
}
unsigned long ctx = 0;
- ctx += h->mb_field_decoding_flag & !!s->mb_x; //for FMO:(s->current_picture.mb_type[mba_xy]>>7)&(h->slice_table[mba_xy] == h->slice_num);
- ctx += (s->current_picture.mb_type[mbb_xy]>>7)&(h->slice_table[mbb_xy] == h->slice_num);
+ ctx += h->mb_field_decoding_flag & !!s->mb_x; //for FMO:(s->current_picture.f.mb_type[mba_xy] >> 7) & (h->slice_table[mba_xy] == h->slice_num);
+ ctx += (s->current_picture.f.mb_type[mbb_xy] >> 7) & (h->slice_table[mbb_xy] == h->slice_num);
return get_cabac_noinline( &h->cabac, &(h->cabac_state+70)[ctx] );
}
mba_xy = mb_xy - 1;
if( (mb_y&1)
&& h->slice_table[mba_xy] == h->slice_num
- && MB_FIELD == !!IS_INTERLACED( s->current_picture.mb_type[mba_xy] ) )
+ && MB_FIELD == !!IS_INTERLACED( s->current_picture.f.mb_type[mba_xy] ) )
mba_xy += s->mb_stride;
if( MB_FIELD ){
mbb_xy = mb_xy - s->mb_stride;
if( !(mb_y&1)
&& h->slice_table[mbb_xy] == h->slice_num
- && IS_INTERLACED( s->current_picture.mb_type[mbb_xy] ) )
+ && IS_INTERLACED( s->current_picture.f.mb_type[mbb_xy] ) )
mbb_xy -= s->mb_stride;
}else
mbb_xy = mb_x + (mb_y-1)*s->mb_stride;
mbb_xy = mb_xy - (s->mb_stride << FIELD_PICTURE);
}
- if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mba_xy] ))
+ if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP( s->current_picture.f.mb_type[mba_xy] ))
ctx++;
- if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mbb_xy] ))
+ if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP( s->current_picture.f.mb_type[mbb_xy] ))
ctx++;
if( h->slice_type_nos == AV_PICTURE_TYPE_B )
/* read skip flags */
if( skip ) {
if( FRAME_MBAFF && (s->mb_y&1)==0 ){
- s->current_picture.mb_type[mb_xy] = MB_TYPE_SKIP;
+ s->current_picture.f.mb_type[mb_xy] = MB_TYPE_SKIP;
h->next_mb_skipped = decode_cabac_mb_skip( h, s->mb_x, s->mb_y+1 );
if(!h->next_mb_skipped)
h->mb_mbaff = h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h);
h->cbp_table[mb_xy] = 0xf7ef;
h->chroma_pred_mode_table[mb_xy] = 0;
// In deblocking, the quantizer is 0
- s->current_picture.qscale_table[mb_xy]= 0;
+ s->current_picture.f.qscale_table[mb_xy] = 0;
// All coeffs are present
memset(h->non_zero_count[mb_xy], 16, 48);
- s->current_picture.mb_type[mb_xy]= mb_type;
+ s->current_picture.f.mb_type[mb_xy] = mb_type;
h->last_qscale_diff = 0;
return 0;
}
AV_WN32A(&nnz_cache[4+8*10], top_empty);
}
}
- s->current_picture.mb_type[mb_xy]= mb_type;
+ s->current_picture.f.mb_type[mb_xy] = mb_type;
if( cbp || IS_INTRA16x16( mb_type ) ) {
const uint8_t *scan, *scan8x8;
h->last_qscale_diff = 0;
}
- s->current_picture.qscale_table[mb_xy]= s->qscale;
+ s->current_picture.f.qscale_table[mb_xy] = s->qscale;
write_back_non_zero_count(h);
if(MB_MBAFF){
}
// In deblocking, the quantizer is 0
- s->current_picture.qscale_table[mb_xy]= 0;
+ s->current_picture.f.qscale_table[mb_xy] = 0;
// All coeffs are present
memset(h->non_zero_count[mb_xy], 16, 48);
- s->current_picture.mb_type[mb_xy]= mb_type;
+ s->current_picture.f.mb_type[mb_xy] = mb_type;
return 0;
}
}
h->cbp=
h->cbp_table[mb_xy]= cbp;
- s->current_picture.mb_type[mb_xy]= mb_type;
+ s->current_picture.f.mb_type[mb_xy] = mb_type;
if(cbp || IS_INTRA16x16(mb_type)){
int i4x4, chroma_idx;
fill_rectangle(&h->non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1);
fill_rectangle(&h->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1);
}
- s->current_picture.qscale_table[mb_xy]= s->qscale;
+ s->current_picture.f.qscale_table[mb_xy] = s->qscale;
write_back_non_zero_count(h);
if(MB_MBAFF){
poc= (poc&~3) + rfield + 1;
for(j=start; j<end; j++){
- if(4*h->ref_list[0][j].frame_num + (h->ref_list[0][j].reference&3) == poc){
+ if (4 * h->ref_list[0][j].frame_num + (h->ref_list[0][j].f.reference & 3) == poc) {
int cur_ref= mbafi ? (j-16)^field : j;
map[list][2*old_ref + (rfield^field) + 16] = cur_ref;
if(rfield == field || !interl)
Picture * const cur = s->current_picture_ptr;
int list, j, field;
int sidx= (s->picture_structure&1)^1;
- int ref1sidx= (ref1->reference&1)^1;
+ int ref1sidx = (ref1->f.reference&1)^1;
for(list=0; list<2; list++){
cur->ref_count[sidx][list] = h->ref_count[list];
for(j=0; j<h->ref_count[list]; j++)
- cur->ref_poc[sidx][list][j] = 4*h->ref_list[list][j].frame_num + (h->ref_list[list][j].reference&3);
+ cur->ref_poc[sidx][list][j] = 4 * h->ref_list[list][j].frame_num + (h->ref_list[list][j].f.reference & 3);
}
if(s->picture_structure == PICT_FRAME){
int *col_poc = h->ref_list[1]->field_poc;
h->col_parity= (FFABS(col_poc[0] - cur_poc) >= FFABS(col_poc[1] - cur_poc));
ref1sidx=sidx= h->col_parity;
- }else if(!(s->picture_structure & h->ref_list[1][0].reference) && !h->ref_list[1][0].mbaff){ // FL -> FL & differ parity
- h->col_fieldoff= 2*(h->ref_list[1][0].reference) - 3;
+ } else if (!(s->picture_structure & h->ref_list[1][0].f.reference) && !h->ref_list[1][0].mbaff) { // FL -> FL & differ parity
+ h->col_fieldoff = 2 * h->ref_list[1][0].f.reference - 3;
}
- if(cur->pict_type != AV_PICTURE_TYPE_B || h->direct_spatial_mv_pred)
+ if (cur->f.pict_type != AV_PICTURE_TYPE_B || h->direct_spatial_mv_pred)
return;
for(list=0; list<2; list++){
static void await_reference_mb_row(H264Context * const h, Picture *ref, int mb_y)
{
- int ref_field = ref->reference - 1;
+ int ref_field = ref->f.reference - 1;
int ref_field_picture = ref->field_picture;
int ref_height = 16*h->s.mb_height >> ref_field_picture;
return;
}
- if(IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])){ // AFL/AFR/FR/FL -> AFL/FL
- if(!IS_INTERLACED(*mb_type)){ // AFR/FR -> AFL/FL
+ if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
+ if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
mb_y = (s->mb_y&~1) + h->col_parity;
mb_xy= s->mb_x + ((s->mb_y&~1) + h->col_parity)*s->mb_stride;
b8_stride = 0;
if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR
mb_y = s->mb_y&~1;
mb_xy= s->mb_x + (s->mb_y&~1)*s->mb_stride;
- mb_type_col[0] = h->ref_list[1][0].mb_type[mb_xy];
- mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + s->mb_stride];
+ mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy];
+ mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + s->mb_stride];
b8_stride = 2+4*s->mb_stride;
b4_stride *= 6;
}else{ // AFR/FR -> AFR/FR
single_col:
mb_type_col[0] =
- mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy];
+ mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy];
sub_mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
await_reference_mb_row(h, &h->ref_list[1][0], mb_y);
- l1mv0 = &h->ref_list[1][0].motion_val[0][h->mb2b_xy [mb_xy]];
- l1mv1 = &h->ref_list[1][0].motion_val[1][h->mb2b_xy [mb_xy]];
- l1ref0 = &h->ref_list[1][0].ref_index [0][4*mb_xy];
- l1ref1 = &h->ref_list[1][0].ref_index [1][4*mb_xy];
+ l1mv0 = &h->ref_list[1][0].f.motion_val[0][h->mb2b_xy [mb_xy]];
+ l1mv1 = &h->ref_list[1][0].f.motion_val[1][h->mb2b_xy [mb_xy]];
+ l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy];
+ l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy];
if(!b8_stride){
if(s->mb_y&1){
l1ref0 += 2;
await_reference_mb_row(h, &h->ref_list[1][0], s->mb_y + !!IS_INTERLACED(*mb_type));
- if(IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])){ // AFL/AFR/FR/FL -> AFL/FL
- if(!IS_INTERLACED(*mb_type)){ // AFR/FR -> AFL/FL
+ if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
+ if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
mb_y = (s->mb_y&~1) + h->col_parity;
mb_xy= s->mb_x + ((s->mb_y&~1) + h->col_parity)*s->mb_stride;
b8_stride = 0;
if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR
mb_y = s->mb_y&~1;
mb_xy= s->mb_x + (s->mb_y&~1)*s->mb_stride;
- mb_type_col[0] = h->ref_list[1][0].mb_type[mb_xy];
- mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + s->mb_stride];
+ mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy];
+ mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + s->mb_stride];
b8_stride = 2+4*s->mb_stride;
b4_stride *= 6;
}else{ // AFR/FR -> AFR/FR
single_col:
mb_type_col[0] =
- mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy];
+ mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy];
sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
await_reference_mb_row(h, &h->ref_list[1][0], mb_y);
- l1mv0 = &h->ref_list[1][0].motion_val[0][h->mb2b_xy [mb_xy]];
- l1mv1 = &h->ref_list[1][0].motion_val[1][h->mb2b_xy [mb_xy]];
- l1ref0 = &h->ref_list[1][0].ref_index [0][4*mb_xy];
- l1ref1 = &h->ref_list[1][0].ref_index [1][4*mb_xy];
+ l1mv0 = &h->ref_list[1][0].f.motion_val[0][h->mb2b_xy [mb_xy]];
+ l1mv1 = &h->ref_list[1][0].f.motion_val[1][h->mb2b_xy [mb_xy]];
+ l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy];
+ l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy];
if(!b8_stride){
if(s->mb_y&1){
l1ref0 += 2;
left_type= h->left_type[LTOP];
top_type= h->top_type;
- mb_type = s->current_picture.mb_type[mb_xy];
- qp = s->current_picture.qscale_table[mb_xy];
- qp0 = s->current_picture.qscale_table[mb_xy-1];
- qp1 = s->current_picture.qscale_table[h->top_mb_xy];
+ mb_type = s->current_picture.f.mb_type[mb_xy];
+ qp = s->current_picture.f.qscale_table[mb_xy];
+ qp0 = s->current_picture.f.qscale_table[mb_xy - 1];
+ qp1 = s->current_picture.f.qscale_table[h->top_mb_xy];
qpc = get_chroma_qp( h, 0, qp );
qpc0 = get_chroma_qp( h, 0, qp0 );
qpc1 = get_chroma_qp( h, 0, qp1 );
for(j=0; j<2; j++, mbn_xy += s->mb_stride){
DECLARE_ALIGNED(8, int16_t, bS)[4];
int qp;
- if( IS_INTRA(mb_type|s->current_picture.mb_type[mbn_xy]) ) {
+ if (IS_INTRA(mb_type | s->current_picture.f.mb_type[mbn_xy])) {
AV_WN64A(bS, 0x0003000300030003ULL);
} else {
- if(!CABAC && IS_8x8DCT(s->current_picture.mb_type[mbn_xy])){
+ if (!CABAC && IS_8x8DCT(s->current_picture.f.mb_type[mbn_xy])) {
bS[0]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+0]);
bS[1]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+1]);
bS[2]= 1+((h->cbp_table[mbn_xy] & 0x8000)||h->non_zero_count_cache[scan8[0]+2]);
}
// Do not use s->qscale as luma quantizer because it has not the same
// value in IPCM macroblocks.
- qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbn_xy] + 1 ) >> 1;
+ qp = (s->current_picture.f.qscale_table[mb_xy] + s->current_picture.f.qscale_table[mbn_xy] + 1) >> 1;
tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize);
{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, h );
- chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp( h, 0, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1;
- chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp( h, 1, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1;
+ chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, s->current_picture.f.qscale_table[mbn_xy]) + 1) >> 1;
+ chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, s->current_picture.f.qscale_table[mbn_xy]) + 1) >> 1;
if (chroma) {
if (chroma444) {
filter_mb_edgeh (&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], h);
// Do not use s->qscale as luma quantizer because it has not the same
// value in IPCM macroblocks.
if(bS[0]+bS[1]+bS[2]+bS[3]){
- qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbm_xy] + 1 ) >> 1;
+ qp = (s->current_picture.f.qscale_table[mb_xy] + s->current_picture.f.qscale_table[mbm_xy] + 1) >> 1;
//tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], s->current_picture.qscale_table[mbn_xy]);
tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
//{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
- chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp( h, 0, s->current_picture.qscale_table[mbm_xy] ) + 1 ) >> 1;
- chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp( h, 1, s->current_picture.qscale_table[mbm_xy] ) + 1 ) >> 1;
+ chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, s->current_picture.f.qscale_table[mbm_xy]) + 1) >> 1;
+ chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, s->current_picture.f.qscale_table[mbm_xy]) + 1) >> 1;
if( dir == 0 ) {
filter_mb_edgev( &img_y[0], linesize, bS, qp, h );
if (chroma) {
/* Filter edge */
// Do not use s->qscale as luma quantizer because it has not the same
// value in IPCM macroblocks.
- qp = s->current_picture.qscale_table[mb_xy];
+ qp = s->current_picture.f.qscale_table[mb_xy];
//tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], s->current_picture.qscale_table[mbn_xy]);
tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
//{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) {
MpegEncContext * const s = &h->s;
const int mb_xy= mb_x + mb_y*s->mb_stride;
- const int mb_type = s->current_picture.mb_type[mb_xy];
+ const int mb_type = s->current_picture.f.mb_type[mb_xy];
const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4;
int first_vertical_edge_done = 0;
av_unused int dir;
}
}
- mb_qp = s->current_picture.qscale_table[mb_xy];
- mbn0_qp = s->current_picture.qscale_table[h->left_mb_xy[0]];
- mbn1_qp = s->current_picture.qscale_table[h->left_mb_xy[1]];
+ mb_qp = s->current_picture.f.qscale_table[mb_xy];
+ mbn0_qp = s->current_picture.f.qscale_table[h->left_mb_xy[0]];
+ mbn1_qp = s->current_picture.f.qscale_table[h->left_mb_xy[1]];
qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1;
bqp[0] = ( get_chroma_qp( h, 0, mb_qp ) +
get_chroma_qp( h, 0, mbn0_qp ) + 1 ) >> 1;
const int mb_type = mb_types[xy+(y4>>2)*s->mb_stride];\
if(!USES_LIST(mb_type,list))\
return LIST_NOT_USED;\
- mv = s->current_picture_ptr->motion_val[list][h->mb2b_xy[xy]+3 + y4*h->b_stride];\
+ mv = s->current_picture_ptr->f.motion_val[list][h->mb2b_xy[xy] + 3 + y4*h->b_stride];\
h->mv_cache[list][scan8[0]-2][0] = mv[0];\
h->mv_cache[list][scan8[0]-2][1] = mv[1] MV_OP;\
- return s->current_picture_ptr->ref_index[list][4*xy+1 + (y4&~1)] REF_OP;
+ return s->current_picture_ptr->f.ref_index[list][4*xy + 1 + (y4 & ~1)] REF_OP;
if(topright_ref == PART_NOT_AVAILABLE
&& i >= scan8[0]+8 && (i&7)==4
&& h->ref_cache[list][scan8[0]-1] != PART_NOT_AVAILABLE){
- const uint32_t *mb_types = s->current_picture_ptr->mb_type;
+ const uint32_t *mb_types = s->current_picture_ptr->f.mb_type;
const int16_t *mv;
AV_ZERO32(h->mv_cache[list][scan8[0]-2]);
*C = h->mv_cache[list][scan8[0]-2];
DECLARE_ALIGNED(4, static const int16_t, zeromv)[2] = {0};
DECLARE_ALIGNED(4, int16_t, mvbuf)[3][2];
MpegEncContext * const s = &h->s;
- int8_t *ref = s->current_picture.ref_index[0];
- int16_t (*mv)[2] = s->current_picture.motion_val[0];
+ int8_t *ref = s->current_picture.f.ref_index[0];
+ int16_t (*mv)[2] = s->current_picture.f.motion_val[0];
int top_ref, left_ref, diagonal_ref, match_count, mx, my;
const int16_t *A, *B, *C;
int b_stride = h->b_stride;
int i;
for (i = 0; i < 4; ++i) {
if (parity == PICT_BOTTOM_FIELD)
- pic->data[i] += pic->linesize[i];
- pic->reference = parity;
- pic->linesize[i] *= 2;
+ pic->f.data[i] += pic->f.linesize[i];
+ pic->f.reference = parity;
+ pic->f.linesize[i] *= 2;
}
pic->poc= pic->field_poc[parity == PICT_BOTTOM_FIELD];
}
static int split_field_copy(Picture *dest, Picture *src,
int parity, int id_add){
- int match = !!(src->reference & parity);
+ int match = !!(src->f.reference & parity);
if (match) {
*dest = *src;
int index=0;
while(i[0]<len || i[1]<len){
- while(i[0]<len && !(in[ i[0] ] && (in[ i[0] ]->reference & sel)))
+ while (i[0] < len && !(in[ i[0] ] && (in[ i[0] ]->f.reference & sel)))
i[0]++;
- while(i[1]<len && !(in[ i[1] ] && (in[ i[1] ]->reference & (sel^3))))
+ while (i[1] < len && !(in[ i[1] ] && (in[ i[1] ]->f.reference & (sel^3))))
i[1]++;
if(i[0] < len){
in[ i[0] ]->pic_id= is_long ? i[0] : in[ i[0] ]->frame_num;
}
if(lens[0] == lens[1] && lens[1] > 1){
- for(i=0; h->default_ref_list[0][i].data[0] == h->default_ref_list[1][i].data[0] && i<lens[0]; i++);
+ for (i = 0; h->default_ref_list[0][i].f.data[0] == h->default_ref_list[1][i].f.data[0] && i < lens[0]; i++);
if(i == lens[0])
FFSWAP(Picture, h->default_ref_list[1][0], h->default_ref_list[1][1]);
}
for(i= h->short_ref_count-1; i>=0; i--){
ref = h->short_ref[i];
- assert(ref->reference);
+ assert(ref->f.reference);
assert(!ref->long_ref);
if(
ref->frame_num == frame_num &&
- (ref->reference & pic_structure)
+ (ref->f.reference & pic_structure)
)
break;
}
return -1;
}
ref = h->long_ref[long_idx];
- assert(!(ref && !ref->reference));
- if(ref && (ref->reference & pic_structure)){
+ assert(!(ref && !ref->f.reference));
+ if (ref && (ref->f.reference & pic_structure)) {
ref->pic_id= pic_id;
assert(ref->long_ref);
i=0;
}
for(list=0; list<h->list_count; list++){
for(index= 0; index < h->ref_count[list]; index++){
- if(!h->ref_list[list][index].data[0]){
+ if (!h->ref_list[list][index].f.data[0]) {
av_log(h->s.avctx, AV_LOG_ERROR, "Missing reference picture\n");
- if(h->default_ref_list[list][0].data[0])
+ if (h->default_ref_list[list][0].f.data[0])
h->ref_list[list][index]= h->default_ref_list[list][0];
else
return -1;
Picture *field = &h->ref_list[list][16+2*i];
field[0] = *frame;
for(j=0; j<3; j++)
- field[0].linesize[j] <<= 1;
- field[0].reference = PICT_TOP_FIELD;
+ field[0].f.linesize[j] <<= 1;
+ field[0].f.reference = PICT_TOP_FIELD;
field[0].poc= field[0].field_poc[0];
field[1] = field[0];
for(j=0; j<3; j++)
- field[1].data[j] += frame->linesize[j];
- field[1].reference = PICT_BOTTOM_FIELD;
+ field[1].f.data[j] += frame->f.linesize[j];
+ field[1].f.reference = PICT_BOTTOM_FIELD;
field[1].poc= field[1].field_poc[1];
h->luma_weight[16+2*i][list][0] = h->luma_weight[16+2*i+1][list][0] = h->luma_weight[i][list][0];
*/
static inline int unreference_pic(H264Context *h, Picture *pic, int refmask){
int i;
- if (pic->reference &= refmask) {
+ if (pic->f.reference &= refmask) {
return 0;
} else {
for(i = 0; h->delayed_pic[i]; i++)
if(pic == h->delayed_pic[i]){
- pic->reference=DELAYED_PIC_REF;
+ pic->f.reference = DELAYED_PIC_REF;
break;
}
return 1;
av_log(h->s.avctx, AV_LOG_DEBUG, "short term list:\n");
for(i=0; i<h->short_ref_count; i++){
Picture *pic= h->short_ref[i];
- av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n", i, pic->frame_num, pic->poc, pic->data[0]);
+ av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n",
+ i, pic->frame_num, pic->poc, pic->f.data[0]);
}
}
}
for(i = 0; i < 16; i++){
Picture *pic= h->long_ref[i];
if (pic) {
- av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n", i, pic->frame_num, pic->poc, pic->data[0]);
+ av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n",
+ i, pic->frame_num, pic->poc, pic->f.data[0]);
}
}
}
h->mmco_index= 0;
if(h->short_ref_count && h->long_ref_count + h->short_ref_count == h->sps.ref_frame_count &&
- !(FIELD_PICTURE && !s->first_field && s->current_picture_ptr->reference)) {
+ !(FIELD_PICTURE && !s->first_field && s->current_picture_ptr->f.reference)) {
h->mmco[0].opcode= MMCO_SHORT2UNUSED;
h->mmco[0].short_pic_num= h->short_ref[ h->short_ref_count - 1 ]->frame_num;
h->mmco_index= 1;
h->long_ref_count++;
}
- s->current_picture_ptr->reference |= s->picture_structure;
+ s->current_picture_ptr->f.reference |= s->picture_structure;
current_ref_assigned=1;
break;
case MMCO_SET_MAX_LONG:
*/
if (h->short_ref_count && h->short_ref[0] == s->current_picture_ptr) {
/* Just mark the second field valid */
- s->current_picture_ptr->reference = PICT_FRAME;
+ s->current_picture_ptr->f.reference = PICT_FRAME;
} else if (s->current_picture_ptr->long_ref) {
av_log(h->s.avctx, AV_LOG_ERROR, "illegal short term reference "
"assignment for second field "
h->short_ref[0]= s->current_picture_ptr;
h->short_ref_count++;
- s->current_picture_ptr->reference |= s->picture_structure;
+ s->current_picture_ptr->f.reference |= s->picture_structure;
}
}
int quant;
s->dsp.x8_setup_spatial_compensation(s->dest[chroma], s->edge_emu_buffer,
- s->current_picture.linesize[chroma>0],
+ s->current_picture.f.linesize[chroma>0],
&range, &sum, w->edges);
if(chroma){
w->orient=w->chroma_orient;
dc_level+= (w->predicted_dc*divide_quant + (1<<12) )>>13;
dsp_x8_put_solidcolor( av_clip_uint8((dc_level*dc_quant+4)>>3),
- s->dest[chroma], s->current_picture.linesize[!!chroma]);
+ s->dest[chroma], s->current_picture.f.linesize[!!chroma]);
goto block_placed;
}
}
if(w->flat_dc){
- dsp_x8_put_solidcolor(w->predicted_dc, s->dest[chroma], s->current_picture.linesize[!!chroma]);
+ dsp_x8_put_solidcolor(w->predicted_dc, s->dest[chroma], s->current_picture.f.linesize[!!chroma]);
}else{
s->dsp.x8_spatial_compensation[w->orient]( s->edge_emu_buffer,
s->dest[chroma],
- s->current_picture.linesize[!!chroma] );
+ s->current_picture.f.linesize[!!chroma] );
}
if(!zeros_only)
s->dsp.idct_add ( s->dest[chroma],
- s->current_picture.linesize[!!chroma],
+ s->current_picture.f.linesize[!!chroma],
s->block[0] );
block_placed:
if(s->loop_filter){
uint8_t* ptr = s->dest[chroma];
- int linesize = s->current_picture.linesize[!!chroma];
+ int linesize = s->current_picture.f.linesize[!!chroma];
if(!( (w->edges&2) || ( zeros_only && (w->orient|4)==4 ) )){
s->dsp.x8_h_loop_filter(ptr, linesize, w->quant);
static void x8_init_block_index(MpegEncContext *s){ //FIXME maybe merge with ff_*
//not s->linesize as this would be wrong for field pics
//not that IntraX8 has interlacing support ;)
- const int linesize = s->current_picture.linesize[0];
- const int uvlinesize= s->current_picture.linesize[1];
+ const int linesize = s->current_picture.f.linesize[0];
+ const int uvlinesize = s->current_picture.f.linesize[1];
- s->dest[0] = s->current_picture.data[0];
- s->dest[1] = s->current_picture.data[1];
- s->dest[2] = s->current_picture.data[2];
+ s->dest[0] = s->current_picture.f.data[0];
+ s->dest[1] = s->current_picture.f.data[1];
+ s->dest[2] = s->current_picture.f.data[2];
s->dest[0] += s->mb_y * linesize << 3;
s->dest[1] += ( s->mb_y&(~1) ) * uvlinesize << 2;//chroma blocks are on add rows
/*emulate MB info in the relevant tables*/
s->mbskip_table [mb_xy]=0;
s->mbintra_table[mb_xy]=1;
- s->current_picture.qscale_table[mb_xy]=w->quant;
+ s->current_picture.f.qscale_table[mb_xy] = w->quant;
mb_xy++;
}
s->dest[0]+= 8;
do{
if (get_bits1(&s->gb)) {
/* skip mb */
- mot_val = s->current_picture.motion_val[0][ s->block_index[0] ];
+ mot_val = s->current_picture.f.motion_val[0][s->block_index[0]];
mot_val[0 ]= mot_val[2 ]=
mot_val[0+stride]= mot_val[2+stride]= 0;
mot_val[1 ]= mot_val[3 ]=
mot_val[1+stride]= mot_val[3+stride]= 0;
- s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
goto end;
}
cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2);
}while(cbpc == 20);
if(cbpc & 4){
- s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA;
}else{
get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
if (cbpc & 8) {
}
if ((cbpc & 16) == 0) {
- s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
/* 16x16 motion prediction */
mot_val= h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
if (s->umvplus)
mot_val[1 ]= mot_val[3 ]=
mot_val[1+stride]= mot_val[3+stride]= my;
} else {
- s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
for(i=0;i<4;i++) {
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
if (s->umvplus)
s->block_last_index[i] = -1;
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
- s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0;
s->mb_skipped = !(s->obmc | s->loop_filter);
s->mv_dir = MV_DIR_FORWARD;
if ((cbpc & 16) == 0) {
- s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
/* 16x16 motion prediction */
s->mv_type = MV_TYPE_16X16;
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1)
skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */
} else {
- s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
s->mv_type = MV_TYPE_8X8;
for(i=0;i<4;i++) {
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
} else if(s->pict_type==AV_PICTURE_TYPE_B) {
int mb_type;
const int stride= s->b8_stride;
- int16_t *mot_val0 = s->current_picture.motion_val[0][ 2*(s->mb_x + s->mb_y*stride) ];
- int16_t *mot_val1 = s->current_picture.motion_val[1][ 2*(s->mb_x + s->mb_y*stride) ];
+ int16_t *mot_val0 = s->current_picture.f.motion_val[0][2 * (s->mb_x + s->mb_y * stride)];
+ int16_t *mot_val1 = s->current_picture.f.motion_val[1][2 * (s->mb_x + s->mb_y * stride)];
// const int mv_xy= s->mb_x + 1 + s->mb_y * s->mb_stride;
//FIXME ugly
}
}
- s->current_picture.mb_type[xy]= mb_type;
+ s->current_picture.f.mb_type[xy] = mb_type;
} else { /* I-Frame */
do{
cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2);
dquant = cbpc & 4;
s->mb_intra = 1;
intra:
- s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA;
if (s->h263_aic) {
s->ac_pred = get_bits1(&s->gb);
if(s->ac_pred){
- s->current_picture.mb_type[xy]= MB_TYPE_INTRA | MB_TYPE_ACPRED;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED;
s->h263_aic_dir = get_bits1(&s->gb);
}
i = get_bits(&s->gb, 8); /* picture timestamp */
if( (s->picture_number&~0xFF)+i < s->picture_number)
i+= 256;
- s->current_picture_ptr->pts=
+ s->current_picture_ptr->f.pts =
s->picture_number= (s->picture_number&~0xFF) + i;
/* PTYPE starts here */
*/
void ff_clean_h263_qscales(MpegEncContext *s){
int i;
- int8_t * const qscale_table= s->current_picture.qscale_table;
+ int8_t * const qscale_table = s->current_picture.f.qscale_table;
ff_init_qscale_tab(s);
/* motion vectors: 8x8 mode*/
h263_pred_motion(s, i, 0, &pred_x, &pred_y);
- motion_x= s->current_picture.motion_val[0][ s->block_index[i] ][0];
- motion_y= s->current_picture.motion_val[0][ s->block_index[i] ][1];
+ motion_x = s->current_picture.f.motion_val[0][s->block_index[i]][0];
+ motion_y = s->current_picture.f.motion_val[0][s->block_index[i]][1];
if (!s->umvplus) {
ff_h263_encode_motion_vector(s, motion_x - pred_x,
motion_y - pred_y, 1);
if(mv4){
int mot_xy= s->block_index[0];
- s->current_picture.motion_val[0][mot_xy ][0]= mx;
- s->current_picture.motion_val[0][mot_xy ][1]= my;
- s->current_picture.motion_val[0][mot_xy+1][0]= mx;
- s->current_picture.motion_val[0][mot_xy+1][1]= my;
+ s->current_picture.f.motion_val[0][mot_xy ][0] = mx;
+ s->current_picture.f.motion_val[0][mot_xy ][1] = my;
+ s->current_picture.f.motion_val[0][mot_xy + 1][0] = mx;
+ s->current_picture.f.motion_val[0][mot_xy + 1][1] = my;
mot_xy += s->b8_stride;
- s->current_picture.motion_val[0][mot_xy ][0]= mx;
- s->current_picture.motion_val[0][mot_xy ][1]= my;
- s->current_picture.motion_val[0][mot_xy+1][0]= mx;
- s->current_picture.motion_val[0][mot_xy+1][1]= my;
+ s->current_picture.f.motion_val[0][mot_xy ][0] = mx;
+ s->current_picture.f.motion_val[0][mot_xy ][1] = my;
+ s->current_picture.f.motion_val[0][mot_xy + 1][0] = mx;
+ s->current_picture.f.motion_val[0][mot_xy + 1][1] = my;
}
}
const int mot_stride = s->b8_stride;
const int mot_xy = s->block_index[block];
- P_LEFT[0] = s->current_picture.motion_val[0][mot_xy - 1][0];
- P_LEFT[1] = s->current_picture.motion_val[0][mot_xy - 1][1];
+ P_LEFT[0] = s->current_picture.f.motion_val[0][mot_xy - 1][0];
+ P_LEFT[1] = s->current_picture.f.motion_val[0][mot_xy - 1][1];
if(P_LEFT[0] > (c->xmax<<shift)) P_LEFT[0] = (c->xmax<<shift);
c->pred_x= pred_x4= P_LEFT[0];
c->pred_y= pred_y4= P_LEFT[1];
} else {
- P_TOP[0] = s->current_picture.motion_val[0][mot_xy - mot_stride ][0];
- P_TOP[1] = s->current_picture.motion_val[0][mot_xy - mot_stride ][1];
- P_TOPRIGHT[0] = s->current_picture.motion_val[0][mot_xy - mot_stride + off[block]][0];
- P_TOPRIGHT[1] = s->current_picture.motion_val[0][mot_xy - mot_stride + off[block]][1];
+ P_TOP[0] = s->current_picture.f.motion_val[0][mot_xy - mot_stride ][0];
+ P_TOP[1] = s->current_picture.f.motion_val[0][mot_xy - mot_stride ][1];
+ P_TOPRIGHT[0] = s->current_picture.f.motion_val[0][mot_xy - mot_stride + off[block]][0];
+ P_TOPRIGHT[1] = s->current_picture.f.motion_val[0][mot_xy - mot_stride + off[block]][1];
if(P_TOP[1] > (c->ymax<<shift)) P_TOP[1] = (c->ymax<<shift);
if(P_TOPRIGHT[0] < (c->xmin<<shift)) P_TOPRIGHT[0]= (c->xmin<<shift);
if(P_TOPRIGHT[0] > (c->xmax<<shift)) P_TOPRIGHT[0]= (c->xmax<<shift);
my4_sum+= my4;
}
- s->current_picture.motion_val[0][ s->block_index[block] ][0]= mx4;
- s->current_picture.motion_val[0][ s->block_index[block] ][1]= my4;
+ s->current_picture.f.motion_val[0][s->block_index[block]][0] = mx4;
+ s->current_picture.f.motion_val[0][s->block_index[block]][1] = my4;
if(mx4 != mx || my4 != my) same=0;
}
return INT_MAX;
if(s->dsp.me_sub_cmp[0] != s->dsp.mb_cmp[0]){
- dmin_sum += s->dsp.mb_cmp[0](s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*16*stride, c->scratchpad, stride, 16);
+ dmin_sum += s->dsp.mb_cmp[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*16*stride, c->scratchpad, stride, 16);
}
if(c->avctx->mb_cmp&FF_CMP_CHROMA){
offset= (s->mb_x*8 + (mx>>1)) + (s->mb_y*8 + (my>>1))*s->uvlinesize;
if(s->no_rounding){
- s->dsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad , s->last_picture.data[1] + offset, s->uvlinesize, 8);
- s->dsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad+8 , s->last_picture.data[2] + offset, s->uvlinesize, 8);
+ s->dsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad , s->last_picture.f.data[1] + offset, s->uvlinesize, 8);
+ s->dsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad + 8, s->last_picture.f.data[2] + offset, s->uvlinesize, 8);
}else{
- s->dsp.put_pixels_tab [1][dxy](c->scratchpad , s->last_picture.data[1] + offset, s->uvlinesize, 8);
- s->dsp.put_pixels_tab [1][dxy](c->scratchpad+8 , s->last_picture.data[2] + offset, s->uvlinesize, 8);
+ s->dsp.put_pixels_tab [1][dxy](c->scratchpad , s->last_picture.f.data[1] + offset, s->uvlinesize, 8);
+ s->dsp.put_pixels_tab [1][dxy](c->scratchpad + 8, s->last_picture.f.data[2] + offset, s->uvlinesize, 8);
}
- dmin_sum += s->dsp.mb_cmp[1](s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*8*s->uvlinesize, c->scratchpad , s->uvlinesize, 8);
- dmin_sum += s->dsp.mb_cmp[1](s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*8*s->uvlinesize, c->scratchpad+8, s->uvlinesize, 8);
+ dmin_sum += s->dsp.mb_cmp[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*8*s->uvlinesize, c->scratchpad , s->uvlinesize, 8);
+ dmin_sum += s->dsp.mb_cmp[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*8*s->uvlinesize, c->scratchpad+8, s->uvlinesize, 8);
}
c->pred_x= mx;
Picture *p= s->current_picture_ptr;
int mb_xy= mb_x + mb_y*s->mb_stride;
int xy= 2*mb_x + 2*mb_y*s->b8_stride;
- int mb_type= s->current_picture.mb_type[mb_xy];
+ int mb_type= s->current_picture.f.mb_type[mb_xy];
int flags= c->flags;
int shift= (flags&FLAG_QPEL) + 1;
int mask= (1<<shift)-1;
for(i=0; i<4; i++){
int xy= s->block_index[i];
- clip_input_mv(s, p->motion_val[0][xy], !!IS_INTERLACED(mb_type));
- clip_input_mv(s, p->motion_val[1][xy], !!IS_INTERLACED(mb_type));
+ clip_input_mv(s, p->f.motion_val[0][xy], !!IS_INTERLACED(mb_type));
+ clip_input_mv(s, p->f.motion_val[1][xy], !!IS_INTERLACED(mb_type));
}
if(IS_INTERLACED(mb_type)){
}
if(USES_LIST(mb_type, 0)){
- int field_select0= p->ref_index[0][4*mb_xy ];
- int field_select1= p->ref_index[0][4*mb_xy+2];
+ int field_select0= p->f.ref_index[0][4*mb_xy ];
+ int field_select1= p->f.ref_index[0][4*mb_xy+2];
assert(field_select0==0 ||field_select0==1);
assert(field_select1==0 ||field_select1==1);
init_interlaced_ref(s, 0);
if(p_type){
s->p_field_select_table[0][mb_xy]= field_select0;
s->p_field_select_table[1][mb_xy]= field_select1;
- *(uint32_t*)s->p_field_mv_table[0][field_select0][mb_xy]= *(uint32_t*)p->motion_val[0][xy ];
- *(uint32_t*)s->p_field_mv_table[1][field_select1][mb_xy]= *(uint32_t*)p->motion_val[0][xy2];
+ *(uint32_t*)s->p_field_mv_table[0][field_select0][mb_xy] = *(uint32_t*)p->f.motion_val[0][xy ];
+ *(uint32_t*)s->p_field_mv_table[1][field_select1][mb_xy] = *(uint32_t*)p->f.motion_val[0][xy2];
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTER_I;
}else{
s->b_field_select_table[0][0][mb_xy]= field_select0;
s->b_field_select_table[0][1][mb_xy]= field_select1;
- *(uint32_t*)s->b_field_mv_table[0][0][field_select0][mb_xy]= *(uint32_t*)p->motion_val[0][xy ];
- *(uint32_t*)s->b_field_mv_table[0][1][field_select1][mb_xy]= *(uint32_t*)p->motion_val[0][xy2];
+ *(uint32_t*)s->b_field_mv_table[0][0][field_select0][mb_xy] = *(uint32_t*)p->f.motion_val[0][xy ];
+ *(uint32_t*)s->b_field_mv_table[0][1][field_select1][mb_xy] = *(uint32_t*)p->f.motion_val[0][xy2];
s->mb_type[mb_xy]= CANDIDATE_MB_TYPE_FORWARD_I;
}
- x= p->motion_val[0][xy ][0];
- y= p->motion_val[0][xy ][1];
+ x = p->f.motion_val[0][xy ][0];
+ y = p->f.motion_val[0][xy ][1];
d = cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 8, field_select0, 0, cmpf, chroma_cmpf, flags);
- x= p->motion_val[0][xy2][0];
- y= p->motion_val[0][xy2][1];
+ x = p->f.motion_val[0][xy2][0];
+ y = p->f.motion_val[0][xy2][1];
d+= cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 8, field_select1, 1, cmpf, chroma_cmpf, flags);
}
if(USES_LIST(mb_type, 1)){
- int field_select0= p->ref_index[1][4*mb_xy ];
- int field_select1= p->ref_index[1][4*mb_xy+2];
+ int field_select0 = p->f.ref_index[1][4 * mb_xy ];
+ int field_select1 = p->f.ref_index[1][4 * mb_xy + 2];
assert(field_select0==0 ||field_select0==1);
assert(field_select1==0 ||field_select1==1);
init_interlaced_ref(s, 2);
s->b_field_select_table[1][0][mb_xy]= field_select0;
s->b_field_select_table[1][1][mb_xy]= field_select1;
- *(uint32_t*)s->b_field_mv_table[1][0][field_select0][mb_xy]= *(uint32_t*)p->motion_val[1][xy ];
- *(uint32_t*)s->b_field_mv_table[1][1][field_select1][mb_xy]= *(uint32_t*)p->motion_val[1][xy2];
+ *(uint32_t*)s->b_field_mv_table[1][0][field_select0][mb_xy] = *(uint32_t*)p->f.motion_val[1][xy ];
+ *(uint32_t*)s->b_field_mv_table[1][1][field_select1][mb_xy] = *(uint32_t*)p->f.motion_val[1][xy2];
if(USES_LIST(mb_type, 0)){
s->mb_type[mb_xy]= CANDIDATE_MB_TYPE_BIDIR_I;
}else{
s->mb_type[mb_xy]= CANDIDATE_MB_TYPE_BACKWARD_I;
}
- x= p->motion_val[1][xy ][0];
- y= p->motion_val[1][xy ][1];
+ x = p->f.motion_val[1][xy ][0];
+ y = p->f.motion_val[1][xy ][1];
d = cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 8, field_select0+2, 0, cmpf, chroma_cmpf, flags);
- x= p->motion_val[1][xy2][0];
- y= p->motion_val[1][xy2][1];
+ x = p->f.motion_val[1][xy2][0];
+ y = p->f.motion_val[1][xy2][1];
d+= cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 8, field_select1+2, 1, cmpf, chroma_cmpf, flags);
//FIXME bidir scores
}
init_mv4_ref(c);
for(i=0; i<4; i++){
xy= s->block_index[i];
- x= p->motion_val[0][xy][0];
- y= p->motion_val[0][xy][1];
+ x= p->f.motion_val[0][xy][0];
+ y= p->f.motion_val[0][xy][1];
d+= cmp(s, x>>shift, y>>shift, x&mask, y&mask, 1, 8, i, i, cmpf, chroma_cmpf, flags);
}
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTER4V;
}else{
if(USES_LIST(mb_type, 0)){
if(p_type){
- *(uint32_t*)s->p_mv_table[mb_xy]= *(uint32_t*)p->motion_val[0][xy];
+ *(uint32_t*)s->p_mv_table[mb_xy] = *(uint32_t*)p->f.motion_val[0][xy];
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTER;
}else if(USES_LIST(mb_type, 1)){
- *(uint32_t*)s->b_bidir_forw_mv_table[mb_xy]= *(uint32_t*)p->motion_val[0][xy];
- *(uint32_t*)s->b_bidir_back_mv_table[mb_xy]= *(uint32_t*)p->motion_val[1][xy];
+ *(uint32_t*)s->b_bidir_forw_mv_table[mb_xy] = *(uint32_t*)p->f.motion_val[0][xy];
+ *(uint32_t*)s->b_bidir_back_mv_table[mb_xy] = *(uint32_t*)p->f.motion_val[1][xy];
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_BIDIR;
}else{
- *(uint32_t*)s->b_forw_mv_table[mb_xy]= *(uint32_t*)p->motion_val[0][xy];
+ *(uint32_t*)s->b_forw_mv_table[mb_xy] = *(uint32_t*)p->f.motion_val[0][xy];
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_FORWARD;
}
- x= p->motion_val[0][xy][0];
- y= p->motion_val[0][xy][1];
+ x = p->f.motion_val[0][xy][0];
+ y = p->f.motion_val[0][xy][1];
d = cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 16, 0, 0, cmpf, chroma_cmpf, flags);
}else if(USES_LIST(mb_type, 1)){
- *(uint32_t*)s->b_back_mv_table[mb_xy]= *(uint32_t*)p->motion_val[1][xy];
+ *(uint32_t*)s->b_back_mv_table[mb_xy] = *(uint32_t*)p->f.motion_val[1][xy];
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_BACKWARD;
- x= p->motion_val[1][xy][0];
- y= p->motion_val[1][xy][1];
+ x = p->f.motion_val[1][xy][0];
+ y = p->f.motion_val[1][xy][1];
d = cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 16, 2, 0, cmpf, chroma_cmpf, flags);
}else
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTRA;
int mb_type=0;
Picture * const pic= &s->current_picture;
- init_ref(c, s->new_picture.data, s->last_picture.data, NULL, 16*mb_x, 16*mb_y, 0);
+ init_ref(c, s->new_picture.f.data, s->last_picture.f.data, NULL, 16*mb_x, 16*mb_y, 0);
assert(s->quarter_sample==0 || s->quarter_sample==1);
assert(s->linesize == c->stride);
const int mot_stride = s->b8_stride;
const int mot_xy = s->block_index[0];
- P_LEFT[0] = s->current_picture.motion_val[0][mot_xy - 1][0];
- P_LEFT[1] = s->current_picture.motion_val[0][mot_xy - 1][1];
+ P_LEFT[0] = s->current_picture.f.motion_val[0][mot_xy - 1][0];
+ P_LEFT[1] = s->current_picture.f.motion_val[0][mot_xy - 1][1];
if(P_LEFT[0] > (c->xmax<<shift)) P_LEFT[0] = (c->xmax<<shift);
if(!s->first_slice_line) {
- P_TOP[0] = s->current_picture.motion_val[0][mot_xy - mot_stride ][0];
- P_TOP[1] = s->current_picture.motion_val[0][mot_xy - mot_stride ][1];
- P_TOPRIGHT[0] = s->current_picture.motion_val[0][mot_xy - mot_stride + 2][0];
- P_TOPRIGHT[1] = s->current_picture.motion_val[0][mot_xy - mot_stride + 2][1];
+ P_TOP[0] = s->current_picture.f.motion_val[0][mot_xy - mot_stride ][0];
+ P_TOP[1] = s->current_picture.f.motion_val[0][mot_xy - mot_stride ][1];
+ P_TOPRIGHT[0] = s->current_picture.f.motion_val[0][mot_xy - mot_stride + 2][0];
+ P_TOPRIGHT[1] = s->current_picture.f.motion_val[0][mot_xy - mot_stride + 2][1];
if(P_TOP[1] > (c->ymax<<shift)) P_TOP[1] = (c->ymax<<shift);
if(P_TOPRIGHT[0] < (c->xmin<<shift)) P_TOPRIGHT[0]= (c->xmin<<shift);
if(P_TOPRIGHT[1] > (c->ymax<<shift)) P_TOPRIGHT[1]= (c->ymax<<shift);
}else{
mean= (s->last_dc[i] + 4)>>3;
}
- dest_c = s->new_picture.data[i] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
+ dest_c = s->new_picture.f.data[i] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
mean*= 0x01010101;
for(i=0; i<8; i++){
if(intra_score < dmin){
mb_type= CANDIDATE_MB_TYPE_INTRA;
- s->current_picture.mb_type[mb_y*s->mb_stride + mb_x]= CANDIDATE_MB_TYPE_INTRA; //FIXME cleanup
+ s->current_picture.f.mb_type[mb_y*s->mb_stride + mb_x] = CANDIDATE_MB_TYPE_INTRA; //FIXME cleanup
}else
- s->current_picture.mb_type[mb_y*s->mb_stride + mb_x]= 0;
+ s->current_picture.f.mb_type[mb_y*s->mb_stride + mb_x] = 0;
{
int p_score= FFMIN(vard, varc-500+(s->lambda2>>FF_LAMBDA_SHIFT)*100);
int P[10][2];
const int shift= 1+s->quarter_sample;
const int xy= mb_x + mb_y*s->mb_stride;
- init_ref(c, s->new_picture.data, s->last_picture.data, NULL, 16*mb_x, 16*mb_y, 0);
+ init_ref(c, s->new_picture.f.data, s->last_picture.f.data, NULL, 16*mb_x, 16*mb_y, 0);
assert(s->quarter_sample==0 || s->quarter_sample==1);
ymin= xmin=(-32)>>shift;
ymax= xmax= 31>>shift;
- if(IS_8X8(s->next_picture.mb_type[mot_xy])){
+ if (IS_8X8(s->next_picture.f.mb_type[mot_xy])) {
s->mv_type= MV_TYPE_8X8;
}else{
s->mv_type= MV_TYPE_16X16;
int index= s->block_index[i];
int min, max;
- c->co_located_mv[i][0]= s->next_picture.motion_val[0][index][0];
- c->co_located_mv[i][1]= s->next_picture.motion_val[0][index][1];
+ c->co_located_mv[i][0] = s->next_picture.f.motion_val[0][index][0];
+ c->co_located_mv[i][1] = s->next_picture.f.motion_val[0][index][1];
c->direct_basis_mv[i][0]= c->co_located_mv[i][0]*time_pb/time_pp + ((i& 1)<<(shift+3));
c->direct_basis_mv[i][1]= c->co_located_mv[i][1]*time_pb/time_pp + ((i>>1)<<(shift+3));
// c->direct_basis_mv[1][i][0]= c->co_located_mv[i][0]*(time_pb - time_pp)/time_pp + ((i &1)<<(shift+3);
int fmin, bmin, dmin, fbmin, bimin, fimin;
int type=0;
const int xy = mb_y*s->mb_stride + mb_x;
- init_ref(c, s->new_picture.data, s->last_picture.data, s->next_picture.data, 16*mb_x, 16*mb_y, 2);
+ init_ref(c, s->new_picture.f.data, s->last_picture.f.data,
+ s->next_picture.f.data, 16 * mb_x, 16 * mb_y, 2);
get_limits(s, 16*mb_x, 16*mb_y);
c->skip=0;
- if(s->codec_id == CODEC_ID_MPEG4 && s->next_picture.mbskip_table[xy]){
+ if (s->codec_id == CODEC_ID_MPEG4 && s->next_picture.f.mbskip_table[xy]) {
int score= direct_search(s, mb_x, mb_y); //FIXME just check 0,0
score= ((unsigned)(score*score + 128*256))>>16;
int block;
for(block=0; block<4; block++){
int off= (block& 1) + (block>>1)*wrap;
- int mx= s->current_picture.motion_val[0][ xy + off ][0];
- int my= s->current_picture.motion_val[0][ xy + off ][1];
+ int mx = s->current_picture.f.motion_val[0][ xy + off ][0];
+ int my = s->current_picture.f.motion_val[0][ xy + off ][1];
if( mx >=range || mx <-range
|| my >=range || my <-range){
s->mb_type[i] &= ~CANDIDATE_MB_TYPE_INTER4V;
s->mb_type[i] |= CANDIDATE_MB_TYPE_INTRA;
- s->current_picture.mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
+ s->current_picture.f.mb_type[i] = CANDIDATE_MB_TYPE_INTRA;
}
}
}
if (s->mb_skip_run-- != 0) {
if (s->pict_type == AV_PICTURE_TYPE_P) {
s->mb_skipped = 1;
- s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
+ s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride] = MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
} else {
int mb_type;
if(s->mb_x)
- mb_type= s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1];
+ mb_type = s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1];
else
- mb_type= s->current_picture.mb_type[ s->mb_width + (s->mb_y-1)*s->mb_stride - 1]; // FIXME not sure if this is allowed in MPEG at all
+ mb_type = s->current_picture.f.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1]; // FIXME not sure if this is allowed in MPEG at all
if(IS_INTRA(mb_type))
return -1;
- s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]=
+ s->current_picture.f.mb_type[s->mb_x + s->mb_y*s->mb_stride] =
mb_type | MB_TYPE_SKIP;
-// assert(s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1]&(MB_TYPE_16x16|MB_TYPE_16x8));
+// assert(s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1]&(MB_TYPE_16x16|MB_TYPE_16x8));
if((s->mv[0][0][0]|s->mv[0][0][1]|s->mv[1][0][0]|s->mv[1][0][1])==0)
s->mb_skipped = 1;
}
}
- s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= mb_type;
+ s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type;
return 0;
}
s->mpeg_f_code[1][0] = f_code;
s->mpeg_f_code[1][1] = f_code;
}
- s->current_picture.pict_type= s->pict_type;
- s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
+ s->current_picture.f.pict_type = s->pict_type;
+ s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
if(avctx->debug & FF_DEBUG_PICT_INFO)
av_log(avctx, AV_LOG_DEBUG, "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
s->pict_type= AV_PICTURE_TYPE_P;
}else
s->pict_type= AV_PICTURE_TYPE_B;
- s->current_picture.pict_type= s->pict_type;
- s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
+ s->current_picture.f.pict_type = s->pict_type;
+ s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
}
s->intra_dc_precision = get_bits(&s->gb, 2);
s->picture_structure = get_bits(&s->gb, 2);
ff_er_frame_start(s);
/* first check if we must repeat the frame */
- s->current_picture_ptr->repeat_pict = 0;
+ s->current_picture_ptr->f.repeat_pict = 0;
if (s->repeat_first_field) {
if (s->progressive_sequence) {
if (s->top_field_first)
- s->current_picture_ptr->repeat_pict = 4;
+ s->current_picture_ptr->f.repeat_pict = 4;
else
- s->current_picture_ptr->repeat_pict = 2;
+ s->current_picture_ptr->f.repeat_pict = 2;
} else if (s->progressive_frame) {
- s->current_picture_ptr->repeat_pict = 1;
+ s->current_picture_ptr->f.repeat_pict = 1;
}
}
- *s->current_picture_ptr->pan_scan= s1->pan_scan;
+ *s->current_picture_ptr->f.pan_scan = s1->pan_scan;
if (HAVE_PTHREADS && (avctx->active_thread_type & FF_THREAD_FRAME))
ff_thread_finish_setup(avctx);
}
for(i=0; i<4; i++){
- s->current_picture.data[i] = s->current_picture_ptr->data[i];
+ s->current_picture.f.data[i] = s->current_picture_ptr->f.data[i];
if(s->picture_structure == PICT_BOTTOM_FIELD){
- s->current_picture.data[i] += s->current_picture_ptr->linesize[i];
+ s->current_picture.f.data[i] += s->current_picture_ptr->f.linesize[i];
}
}
}
if(mpeg_decode_mb(s, s->block) < 0)
return -1;
- if(s->current_picture.motion_val[0] && !s->encoding){ //note motion_val is normally NULL unless we want to extract the MVs
+ if (s->current_picture.f.motion_val[0] && !s->encoding) { //note motion_val is normally NULL unless we want to extract the MVs
const int wrap = s->b8_stride;
int xy = s->mb_x*2 + s->mb_y*2*wrap;
int b8_xy= 4*(s->mb_x + s->mb_y*s->mb_stride);
motion_y = s->mv[dir][i][1];
}
- s->current_picture.motion_val[dir][xy ][0] = motion_x;
- s->current_picture.motion_val[dir][xy ][1] = motion_y;
- s->current_picture.motion_val[dir][xy + 1][0] = motion_x;
- s->current_picture.motion_val[dir][xy + 1][1] = motion_y;
- s->current_picture.ref_index [dir][b8_xy ]=
- s->current_picture.ref_index [dir][b8_xy + 1]= s->field_select[dir][i];
+ s->current_picture.f.motion_val[dir][xy ][0] = motion_x;
+ s->current_picture.f.motion_val[dir][xy ][1] = motion_y;
+ s->current_picture.f.motion_val[dir][xy + 1][0] = motion_x;
+ s->current_picture.f.motion_val[dir][xy + 1][1] = motion_y;
+ s->current_picture.f.ref_index [dir][b8_xy ] =
+ s->current_picture.f.ref_index [dir][b8_xy + 1] = s->field_select[dir][i];
assert(s->field_select[dir][i]==0 || s->field_select[dir][i]==1);
}
xy += wrap;
if (/*s->mb_y<<field_pic == s->mb_height &&*/ !s->first_field) {
/* end of image */
- s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_MPEG2;
+ s->current_picture_ptr->f.qscale_type = FF_QSCALE_TYPE_MPEG2;
ff_er_frame_end(s);
if(aspect_ratio==0.0) aspect_ratio= 1.0; //pixel aspect 1:1 (VGA)
- if (s->current_picture.key_frame) {
+ if (s->current_picture.f.key_frame) {
AVRational framerate= ff_frame_rate_tab[s->frame_rate_index];
/* mpeg1 header repeated every gop */
/* time code : we must convert from the real frame rate to a
fake mpeg frame rate in case of low frame rate */
fps = (framerate.num + framerate.den/2)/ framerate.den;
- time_code = s->current_picture_ptr->coded_picture_number + s->avctx->timecode_frame_start;
+ time_code = s->current_picture_ptr->f.coded_picture_number + s->avctx->timecode_frame_start;
- s->gop_picture_number = s->current_picture_ptr->coded_picture_number;
+ s->gop_picture_number = s->current_picture_ptr->f.coded_picture_number;
if (s->avctx->flags2 & CODEC_FLAG2_DROP_FRAME_TIMECODE) {
/* only works for NTSC 29.97 */
int d = time_code / 17982;
if (s->progressive_sequence) {
put_bits(&s->pb, 1, 0); /* no repeat */
} else {
- put_bits(&s->pb, 1, s->current_picture_ptr->top_field_first);
+ put_bits(&s->pb, 1, s->current_picture_ptr->f.top_field_first);
}
/* XXX: optimize the generation of this flag with entropy
measures */
uint16_t time_pb= s->pb_time;
int p_mx, p_my;
- p_mx= s->next_picture.motion_val[0][xy][0];
+ p_mx = s->next_picture.f.motion_val[0][xy][0];
if((unsigned)(p_mx + tab_bias) < tab_size){
s->mv[0][i][0] = s->direct_scale_mv[0][p_mx + tab_bias] + mx;
s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx
s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx
: p_mx*(time_pb - time_pp)/time_pp;
}
- p_my= s->next_picture.motion_val[0][xy][1];
+ p_my = s->next_picture.f.motion_val[0][xy][1];
if((unsigned)(p_my + tab_bias) < tab_size){
s->mv[0][i][1] = s->direct_scale_mv[0][p_my + tab_bias] + my;
s->mv[1][i][1] = my ? s->mv[0][i][1] - p_my
*/
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){
const int mb_index= s->mb_x + s->mb_y*s->mb_stride;
- const int colocated_mb_type= s->next_picture.mb_type[mb_index];
+ const int colocated_mb_type = s->next_picture.f.mb_type[mb_index];
uint16_t time_pp;
uint16_t time_pb;
int i;
} else if(IS_INTERLACED(colocated_mb_type)){
s->mv_type = MV_TYPE_FIELD;
for(i=0; i<2; i++){
- int field_select= s->next_picture.ref_index[0][4*mb_index + 2*i];
+ int field_select = s->next_picture.f.ref_index[0][4 * mb_index + 2 * i];
s->field_select[0][i]= field_select;
s->field_select[1][i]= i;
if(s->top_field_first){
{
int i;
int16_t *ac_val, *ac_val1;
- int8_t * const qscale_table= s->current_picture.qscale_table;
+ int8_t * const qscale_table = s->current_picture.f.qscale_table;
/* find prediction */
ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
if(s->pict_type == AV_PICTURE_TYPE_B){
int mb_x = 0, mb_y = 0;
- while(s->next_picture.mbskip_table[ s->mb_index2xy[ mb_num ] ]) {
+ while (s->next_picture.f.mbskip_table[s->mb_index2xy[mb_num]]) {
if (!mb_x) ff_thread_await_progress((AVFrame*)s->next_picture_ptr, mb_y++, 0);
mb_num++;
if (++mb_x == s->mb_width) mb_x = 0;
}while(cbpc == 8);
s->cbp_table[xy]= cbpc & 3;
- s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA;
s->mb_intra = 1;
if(cbpc & 4) {
ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
}
- s->current_picture.qscale_table[xy]= s->qscale;
+ s->current_picture.f.qscale_table[xy]= s->qscale;
s->mbintra_table[xy]= 1;
for(i=0; i<6; i++){
s->pred_dir_table[xy]= dir;
}else{ /* P/S_TYPE */
int mx, my, pred_x, pred_y, bits;
- int16_t * const mot_val= s->current_picture.motion_val[0][s->block_index[0]];
+ int16_t * const mot_val = s->current_picture.f.motion_val[0][s->block_index[0]];
const int stride= s->b8_stride*2;
try_again:
if(bits&0x10000){
/* skip mb */
if(s->pict_type==AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE){
- s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0;
mx= get_amv(s, 0);
my= get_amv(s, 1);
}else{
- s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
mx=my=0;
}
mot_val[0 ]= mot_val[2 ]=
s->mb_intra = ((cbpc & 4) != 0);
if(s->mb_intra){
- s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA;
s->mbintra_table[xy]= 1;
mot_val[0 ]= mot_val[2 ]=
mot_val[0+stride]= mot_val[2+stride]= 0;
my = h263_decode_motion(s, pred_y, s->f_code);
if (my >= 0xffff)
return -1;
- s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
} else {
mx = get_amv(s, 0);
my = get_amv(s, 1);
- s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0;
}
mot_val[0 ]= mot_val[2 ] =
mot_val[1+stride]= mot_val[3+stride]= my;
} else {
int i;
- s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
for(i=0;i<4;i++) {
int16_t *mot_val= h263_pred_motion(s, i, 0, &pred_x, &pred_y);
mx = h263_decode_motion(s, pred_x, s->f_code);
}
s->cbp_table[xy]|= cbpy<<2;
- s->current_picture.mb_type[xy] |= ac_pred*MB_TYPE_ACPRED;
+ s->current_picture.f.mb_type[xy] |= ac_pred*MB_TYPE_ACPRED;
}else{ /* P || S_TYPE */
- if(IS_INTRA(s->current_picture.mb_type[xy])){
+ if (IS_INTRA(s->current_picture.f.mb_type[xy])) {
int dir=0,i;
int ac_pred = get_bits1(&s->gb);
int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
if(s->cbp_table[xy] & 8) {
ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
}
- s->current_picture.qscale_table[xy]= s->qscale;
+ s->current_picture.f.qscale_table[xy] = s->qscale;
for(i=0; i<6; i++){
int dc_pred_dir;
}
s->cbp_table[xy]&= 3; //remove dquant
s->cbp_table[xy]|= cbpy<<2;
- s->current_picture.mb_type[xy] |= ac_pred*MB_TYPE_ACPRED;
+ s->current_picture.f.mb_type[xy] |= ac_pred*MB_TYPE_ACPRED;
s->pred_dir_table[xy]= dir;
- }else if(IS_SKIP(s->current_picture.mb_type[xy])){
- s->current_picture.qscale_table[xy]= s->qscale;
+ } else if (IS_SKIP(s->current_picture.f.mb_type[xy])) {
+ s->current_picture.f.qscale_table[xy] = s->qscale;
s->cbp_table[xy]= 0;
}else{
int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
if(s->cbp_table[xy] & 8) {
ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
}
- s->current_picture.qscale_table[xy]= s->qscale;
+ s->current_picture.f.qscale_table[xy] = s->qscale;
s->cbp_table[xy]&= 3; //remove dquant
s->cbp_table[xy]|= (cbpy^0xf)<<2;
int cbp, mb_type;
const int xy= s->mb_x + s->mb_y*s->mb_stride;
- mb_type= s->current_picture.mb_type[xy];
+ mb_type = s->current_picture.f.mb_type[xy];
cbp = s->cbp_table[xy];
s->use_intra_dc_vlc= s->qscale < s->intra_dc_threshold;
- if(s->current_picture.qscale_table[xy] != s->qscale){
- ff_set_qscale(s, s->current_picture.qscale_table[xy] );
+ if (s->current_picture.f.qscale_table[xy] != s->qscale) {
+ ff_set_qscale(s, s->current_picture.f.qscale_table[xy]);
}
if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
int i;
for(i=0; i<4; i++){
- s->mv[0][i][0] = s->current_picture.motion_val[0][ s->block_index[i] ][0];
- s->mv[0][i][1] = s->current_picture.motion_val[0][ s->block_index[i] ][1];
+ s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
+ s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
}
s->mb_intra = IS_INTRA(mb_type);
s->mb_skipped = 1;
}
}else if(s->mb_intra){
- s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]);
+ s->ac_pred = IS_ACPRED(s->current_picture.f.mb_type[xy]);
}else if(!s->mb_intra){
// s->mcsel= 0; //FIXME do we need to init that
}
} else { /* I-Frame */
s->mb_intra = 1;
- s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]);
+ s->ac_pred = IS_ACPRED(s->current_picture.f.mb_type[xy]);
}
if (!IS_SKIP(mb_type)) {
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
if(s->pict_type==AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE){
- s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0;
s->mcsel=1;
s->mv[0][0][0]= get_amv(s, 0);
s->mv[0][0][1]= get_amv(s, 1);
s->mb_skipped = 0;
}else{
- s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
s->mcsel=0;
s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0;
s->mv_dir = MV_DIR_FORWARD;
if ((cbpc & 16) == 0) {
if(s->mcsel){
- s->current_picture.mb_type[xy]= MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0;
/* 16x16 global motion prediction */
s->mv_type = MV_TYPE_16X16;
mx= get_amv(s, 0);
s->mv[0][0][0] = mx;
s->mv[0][0][1] = my;
}else if((!s->progressive_sequence) && get_bits1(&s->gb)){
- s->current_picture.mb_type[xy]= MB_TYPE_16x8 | MB_TYPE_L0 | MB_TYPE_INTERLACED;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_16x8 | MB_TYPE_L0 | MB_TYPE_INTERLACED;
/* 16x8 field motion prediction */
s->mv_type= MV_TYPE_FIELD;
s->mv[0][i][1] = my;
}
}else{
- s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
/* 16x16 motion prediction */
s->mv_type = MV_TYPE_16X16;
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
s->mv[0][0][1] = my;
}
} else {
- s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
s->mv_type = MV_TYPE_8X8;
for(i=0;i<4;i++) {
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
}
/* if we skipped it in the future P Frame than skip it now too */
- s->mb_skipped= s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]; // Note, skiptab=0 if last was GMC
+ s->mb_skipped = s->next_picture.f.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]; // Note, skiptab=0 if last was GMC
if(s->mb_skipped){
/* skip mb */
s->mv[0][0][1] = 0;
s->mv[1][0][0] = 0;
s->mv[1][0][1] = 0;
- s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
goto end;
}
s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
mb_type |= ff_mpeg4_set_direct_mv(s, mx, my);
}
- s->current_picture.mb_type[xy]= mb_type;
+ s->current_picture.f.mb_type[xy] = mb_type;
} else { /* I-Frame */
do{
cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2);
intra:
s->ac_pred = get_bits1(&s->gb);
if(s->ac_pred)
- s->current_picture.mb_type[xy]= MB_TYPE_INTRA | MB_TYPE_ACPRED;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED;
else
- s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
+ s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA;
cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
if(cbpy<0){
if(mpeg4_is_resync(s)){
const int delta= s->mb_x + 1 == s->mb_width ? 2 : 1;
- if(s->pict_type==AV_PICTURE_TYPE_B && s->next_picture.mbskip_table[xy + delta]){
+ if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture.f.mbskip_table[xy + delta]) {
ff_thread_await_progress((AVFrame*)s->next_picture_ptr,
(s->mb_x + delta >= s->mb_width) ? FFMIN(s->mb_y+1, s->mb_height-1) : s->mb_y, 0);
}
- if(s->pict_type==AV_PICTURE_TYPE_B && s->next_picture.mbskip_table[xy + delta])
+ if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture.f.mbskip_table[xy + delta])
return SLICE_OK;
return SLICE_END;
}
}
if(s->avctx->time_base.num)
- s->current_picture_ptr->pts= (s->time + s->avctx->time_base.num/2) / s->avctx->time_base.num;
+ s->current_picture_ptr->f.pts = (s->time + s->avctx->time_base.num / 2) / s->avctx->time_base.num;
else
- s->current_picture_ptr->pts= AV_NOPTS_VALUE;
+ s->current_picture_ptr->f.pts = AV_NOPTS_VALUE;
if(s->avctx->debug&FF_DEBUG_PTS)
- av_log(s->avctx, AV_LOG_DEBUG, "MPEG4 PTS: %"PRId64"\n", s->current_picture_ptr->pts);
+ av_log(s->avctx, AV_LOG_DEBUG, "MPEG4 PTS: %"PRId64"\n",
+ s->current_picture_ptr->f.pts);
check_marker(gb, "before vop_coded");
{
int score= 0;
int i, n;
- int8_t * const qscale_table= s->current_picture.qscale_table;
+ int8_t * const qscale_table = s->current_picture.f.qscale_table;
memcpy(zigzag_last_index, s->block_last_index, sizeof(int)*6);
*/
void ff_clean_mpeg4_qscales(MpegEncContext *s){
int i;
- int8_t * const qscale_table= s->current_picture.qscale_table;
+ int8_t * const qscale_table = s->current_picture.f.qscale_table;
ff_clean_h263_qscales(s);
assert(mb_type>=0);
/* nothing to do if this MB was skipped in the next P Frame */
- if(s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]){ //FIXME avoid DCT & ...
+ if (s->next_picture.f.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]) { //FIXME avoid DCT & ...
s->skip_count++;
s->mv[0][0][0]=
s->mv[0][0][1]=
if(y+16 > s->height) y= s->height-16;
offset= x + y*s->linesize;
- p_pic= s->new_picture.data[0] + offset;
+ p_pic = s->new_picture.f.data[0] + offset;
s->mb_skipped=1;
for(i=0; i<s->max_b_frames; i++){
int diff;
Picture *pic= s->reordered_input_picture[i+1];
- if(pic==NULL || pic->pict_type!=AV_PICTURE_TYPE_B) break;
+ if (pic == NULL || pic->f.pict_type != AV_PICTURE_TYPE_B)
+ break;
- b_pic= pic->data[0] + offset;
- if(pic->type != FF_BUFFER_TYPE_SHARED)
+ b_pic = pic->f.data[0] + offset;
+ if (pic->f.type != FF_BUFFER_TYPE_SHARED)
b_pic+= INPLACE_OFFSET;
diff= s->dsp.sad[0](NULL, p_pic, b_pic, s->linesize, 16);
if(diff>s->qscale*70){ //FIXME check that 70 is optimal
/* motion vectors: 8x8 mode*/
h263_pred_motion(s, i, 0, &pred_x, &pred_y);
- ff_h263_encode_motion_vector(s, s->current_picture.motion_val[0][ s->block_index[i] ][0] - pred_x,
- s->current_picture.motion_val[0][ s->block_index[i] ][1] - pred_y, s->f_code);
+ ff_h263_encode_motion_vector(s, s->current_picture.f.motion_val[0][ s->block_index[i] ][0] - pred_x,
+ s->current_picture.f.motion_val[0][ s->block_index[i] ][1] - pred_y, s->f_code);
}
}
put_bits(&s->pb, 16, 0);
put_bits(&s->pb, 16, GOP_STARTCODE);
- time= s->current_picture_ptr->pts;
+ time = s->current_picture_ptr->f.pts;
if(s->reordered_input_picture[1])
- time= FFMIN(time, s->reordered_input_picture[1]->pts);
+ time = FFMIN(time, s->reordered_input_picture[1]->f.pts);
time= time*s->avctx->time_base.num;
seconds= time/s->avctx->time_base.den;
}
put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */
if(!s->progressive_sequence){
- put_bits(&s->pb, 1, s->current_picture_ptr->top_field_first);
+ put_bits(&s->pb, 1, s->current_picture_ptr->f.top_field_first);
put_bits(&s->pb, 1, s->alternate_scan);
}
//FIXME sprite stuff
void ff_copy_picture(Picture *dst, Picture *src){
*dst = *src;
- dst->type= FF_BUFFER_TYPE_COPY;
+ dst->f.type= FF_BUFFER_TYPE_COPY;
}
/**
static void free_frame_buffer(MpegEncContext *s, Picture *pic)
{
ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
- av_freep(&pic->hwaccel_picture_private);
+ av_freep(&pic->f.hwaccel_picture_private);
}
/**
if (s->avctx->hwaccel) {
assert(!pic->hwaccel_picture_private);
if (s->avctx->hwaccel->priv_data_size) {
- pic->hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
- if (!pic->hwaccel_picture_private) {
+ pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
+ if (!pic->f.hwaccel_picture_private) {
av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
return -1;
}
r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
- if (r<0 || !pic->age || !pic->type || !pic->data[0]) {
- av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
- av_freep(&pic->hwaccel_picture_private);
+ if (r < 0 || !pic->f.age || !pic->f.type || !pic->f.data[0]) {
+ av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n",
+ r, pic->f.age, pic->f.type, pic->f.data[0]);
+ av_freep(&pic->f.hwaccel_picture_private);
return -1;
}
- if (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])) {
+ if (s->linesize && (s->linesize != pic->f.linesize[0] || s->uvlinesize != pic->f.linesize[1])) {
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
free_frame_buffer(s, pic);
return -1;
}
- if (pic->linesize[1] != pic->linesize[2]) {
+ if (pic->f.linesize[1] != pic->f.linesize[2]) {
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
free_frame_buffer(s, pic);
return -1;
int r= -1;
if(shared){
- assert(pic->data[0]);
+ assert(pic->f.data[0]);
assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
- pic->type= FF_BUFFER_TYPE_SHARED;
+ pic->f.type = FF_BUFFER_TYPE_SHARED;
}else{
- assert(!pic->data[0]);
+ assert(!pic->f.data[0]);
if (alloc_frame_buffer(s, pic) < 0)
return -1;
- s->linesize = pic->linesize[0];
- s->uvlinesize= pic->linesize[1];
+ s->linesize = pic->f.linesize[0];
+ s->uvlinesize = pic->f.linesize[1];
}
- if(pic->qscale_table==NULL){
+ if (pic->f.qscale_table == NULL) {
if (s->encoding) {
FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
}
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2, fail) //the +2 is for the slice end check
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table, mb_array_size * sizeof(uint8_t) + 2, fail) //the +2 is for the slice end check
FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base , (big_mb_num + s->mb_stride) * sizeof(uint8_t) , fail)
FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
- pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
- pic->qscale_table = pic->qscale_table_base + 2*s->mb_stride + 1;
+ pic->f.mb_type = pic->mb_type_base + 2*s->mb_stride + 1;
+ pic->f.qscale_table = pic->qscale_table_base + 2*s->mb_stride + 1;
if(s->out_format == FMT_H264){
for(i=0; i<2; i++){
FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
- pic->motion_val[i]= pic->motion_val_base[i]+4;
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
+ pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
}
- pic->motion_subsample_log2= 2;
+ pic->f.motion_subsample_log2 = 2;
}else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
for(i=0; i<2; i++){
FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
- pic->motion_val[i]= pic->motion_val_base[i]+4;
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
+ pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
}
- pic->motion_subsample_log2= 3;
+ pic->f.motion_subsample_log2 = 3;
}
if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6, fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff, 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
}
- pic->qstride= s->mb_stride;
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->pan_scan , 1 * sizeof(AVPanScan), fail)
+ pic->f.qstride = s->mb_stride;
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan , 1 * sizeof(AVPanScan), fail)
}
/* It might be nicer if the application would keep track of these
* but it would require an API change. */
memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
- if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == AV_PICTURE_TYPE_B)
- pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
+ if (pic->f.age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->f.age] == AV_PICTURE_TYPE_B)
+ pic->f.age = INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
pic->owner2 = NULL;
return 0;
static void free_picture(MpegEncContext *s, Picture *pic){
int i;
- if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
+ if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
free_frame_buffer(s, pic);
}
av_freep(&pic->mb_var);
av_freep(&pic->mc_mb_var);
av_freep(&pic->mb_mean);
- av_freep(&pic->mbskip_table);
+ av_freep(&pic->f.mbskip_table);
av_freep(&pic->qscale_table_base);
av_freep(&pic->mb_type_base);
- av_freep(&pic->dct_coeff);
- av_freep(&pic->pan_scan);
- pic->mb_type= NULL;
+ av_freep(&pic->f.dct_coeff);
+ av_freep(&pic->f.pan_scan);
+ pic->f.mb_type = NULL;
for(i=0; i<2; i++){
av_freep(&pic->motion_val_base[i]);
- av_freep(&pic->ref_index[i]);
+ av_freep(&pic->f.ref_index[i]);
}
- if(pic->type == FF_BUFFER_TYPE_SHARED){
+ if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
for(i=0; i<4; i++){
- pic->base[i]=
- pic->data[i]= NULL;
+ pic->f.base[i] =
+ pic->f.data[i] = NULL;
}
- pic->type= 0;
+ pic->f.type = 0;
}
}
if(!s1->first_field){
s->last_pict_type= s1->pict_type;
- if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->quality;
+ if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
if(s1->pict_type!=FF_B_TYPE){
s->last_non_b_pict_type= s1->pict_type;
/* release non reference frames */
for(i=0; i<s->picture_count; i++){
- if(s->picture[i].data[0] && !s->picture[i].reference
+ if (s->picture[i].f.data[0] && !s->picture[i].f.reference
&& (!s->picture[i].owner2 || s->picture[i].owner2 == s)
&& (remove_current || &s->picture[i] != s->current_picture_ptr)
/*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
if(shared){
for(i=s->picture_range_start; i<s->picture_range_end; i++){
- if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
+ if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
+ return i;
}
}else{
for(i=s->picture_range_start; i<s->picture_range_end; i++){
- if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
+ if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
+ return i; //FIXME
}
for(i=s->picture_range_start; i<s->picture_range_end; i++){
- if(s->picture[i].data[0]==NULL) return i;
+ if (s->picture[i].f.data[0] == NULL)
+ return i;
}
}
assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
/* mark&release old frames */
- if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
+ if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->f.data[0]) {
if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
free_frame_buffer(s, s->last_picture_ptr);
/* if(mpeg124/h263) */
if(!s->encoding){
for(i=0; i<s->picture_count; i++){
- if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
+ if (s->picture[i].f.data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].f.reference) {
av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
free_frame_buffer(s, &s->picture[i]);
}
if(!s->encoding){
ff_release_unused_pictures(s, 1);
- if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
+ if (s->current_picture_ptr && s->current_picture_ptr->f.data[0] == NULL)
pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
else{
i= ff_find_unused_picture(s, 0);
pic= &s->picture[i];
}
- pic->reference= 0;
+ pic->f.reference = 0;
if (!s->dropable){
if (s->codec_id == CODEC_ID_H264)
- pic->reference = s->picture_structure;
+ pic->f.reference = s->picture_structure;
else if (s->pict_type != AV_PICTURE_TYPE_B)
- pic->reference = 3;
+ pic->f.reference = 3;
}
- pic->coded_picture_number= s->coded_picture_number++;
+ pic->f.coded_picture_number = s->coded_picture_number++;
if(ff_alloc_picture(s, pic, 0) < 0)
return -1;
s->current_picture_ptr= pic;
//FIXME use only the vars from current_pic
- s->current_picture_ptr->top_field_first= s->top_field_first;
+ s->current_picture_ptr->f.top_field_first = s->top_field_first;
if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
if(s->picture_structure != PICT_FRAME)
- s->current_picture_ptr->top_field_first= (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
+ s->current_picture_ptr->f.top_field_first = (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
}
- s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
- s->current_picture_ptr->field_picture= s->picture_structure != PICT_FRAME;
+ s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame && !s->progressive_sequence;
+ s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
}
- s->current_picture_ptr->pict_type= s->pict_type;
+ s->current_picture_ptr->f.pict_type = s->pict_type;
// if(s->flags && CODEC_FLAG_QSCALE)
// s->current_picture_ptr->quality= s->new_picture_ptr->quality;
- s->current_picture_ptr->key_frame= s->pict_type == AV_PICTURE_TYPE_I;
+ s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
ff_copy_picture(&s->current_picture, s->current_picture_ptr);
s->next_picture_ptr= s->current_picture_ptr;
}
/* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
- s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL,
- s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL,
- s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
+ s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
+ s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
+ s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
s->pict_type, s->dropable);*/
if(s->codec_id != CODEC_ID_H264){
- if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) &&
+ if ((s->last_picture_ptr == NULL || s->last_picture_ptr->f.data[0] == NULL) &&
(s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
if (s->pict_type != AV_PICTURE_TYPE_I)
av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
}
- if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==AV_PICTURE_TYPE_B){
+ if ((s->next_picture_ptr == NULL || s->next_picture_ptr->f.data[0] == NULL) && s->pict_type == AV_PICTURE_TYPE_B) {
/* Allocate a dummy frame */
i= ff_find_unused_picture(s, 0);
s->next_picture_ptr= &s->picture[i];
if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
- assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
+ assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->f.data[0]));
if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
int i;
for(i=0; i<4; i++){
if(s->picture_structure == PICT_BOTTOM_FIELD){
- s->current_picture.data[i] += s->current_picture.linesize[i];
+ s->current_picture.f.data[i] += s->current_picture.f.linesize[i];
}
- s->current_picture.linesize[i] *= 2;
- s->last_picture.linesize[i] *=2;
- s->next_picture.linesize[i] *=2;
+ s->current_picture.f.linesize[i] *= 2;
+ s->last_picture.f.linesize[i] *= 2;
+ s->next_picture.f.linesize[i] *= 2;
}
}
&& !s->avctx->hwaccel
&& !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
&& s->unrestricted_mv
- && s->current_picture.reference
+ && s->current_picture.f.reference
&& !s->intra_only
&& !(s->flags&CODEC_FLAG_EMU_EDGE)) {
int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
- s->dsp.draw_edges(s->current_picture.data[0], s->linesize ,
+ s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
s->h_edge_pos , s->v_edge_pos,
EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
- s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize,
+ s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
- s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize,
+ s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
}
emms_c();
s->last_pict_type = s->pict_type;
- s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
+ s->last_lambda_for[s->pict_type] = s->current_picture_ptr->f.quality;
if(s->pict_type!=AV_PICTURE_TYPE_B){
s->last_non_b_pict_type= s->pict_type;
}
#if 0
/* copy back current_picture variables */
for(i=0; i<MAX_PICTURE_COUNT; i++){
- if(s->picture[i].data[0] == s->current_picture.data[0]){
+ if(s->picture[i].f.data[0] == s->current_picture.f.data[0]){
s->picture[i]= s->current_picture;
break;
}
if(s->encoding){
/* release non-reference frames */
for(i=0; i<s->picture_count; i++){
- if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
+ if (s->picture[i].f.data[0] && !s->picture[i].f.reference /*&& s->picture[i].type != FF_BUFFER_TYPE_SHARED*/) {
free_frame_buffer(s, &s->picture[i]);
}
}
#endif
s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
- if (s->codec_id != CODEC_ID_H264 && s->current_picture.reference) {
+ if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
}
}
const int s_mask= (2<<lowres)-1;
const int h_edge_pos = s->h_edge_pos >> lowres;
const int v_edge_pos = s->v_edge_pos >> lowres;
- linesize = s->current_picture.linesize[0] << field_based;
- uvlinesize = s->current_picture.linesize[1] << field_based;
+ linesize = s->current_picture.f.linesize[0] << field_based;
+ uvlinesize = s->current_picture.f.linesize[1] << field_based;
if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
motion_x/=2;
}
}
- if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
+ if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
dest_y += s->linesize;
dest_cb+= s->uvlinesize;
dest_cr+= s->uvlinesize;
s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
} else {
if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
- ref_picture= s->current_picture_ptr->data;
+ ref_picture = s->current_picture_ptr->f.data;
}
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
ref2picture= ref_picture;
}else{
- ref2picture= s->current_picture_ptr->data;
+ ref2picture = s->current_picture_ptr->f.data;
}
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
//opposite parity is always in the same frame if this is second field
if(!s->first_field){
- ref_picture = s->current_picture_ptr->data;
+ ref_picture = s->current_picture_ptr->f.data;
}
}
}
if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
/* save DCT coefficients */
int i,j;
- DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
+ DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
for(i=0; i<6; i++){
for(j=0; j<64; j++){
}
}
- s->current_picture.qscale_table[mb_xy]= s->qscale;
+ s->current_picture.f.qscale_table[mb_xy] = s->qscale;
/* update DC predictors for P macroblocks */
if (!s->mb_intra) {
int dct_linesize, dct_offset;
op_pixels_func (*op_pix)[4];
qpel_mc_func (*op_qpix)[16];
- const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
- const int uvlinesize= s->current_picture.linesize[1];
+ const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
+ const int uvlinesize = s->current_picture.f.linesize[1];
const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
/* skip only during decoding as we might trash the buffers during encoding a bit */
if(!s->encoding){
uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
- const int age= s->current_picture.age;
+ const int age = s->current_picture.f.age;
assert(age);
if(*mbskip_ptr >99) *mbskip_ptr= 99;
/* if previous was skipped too, then nothing to do ! */
- if (*mbskip_ptr >= age && s->current_picture.reference){
+ if (*mbskip_ptr >= age && s->current_picture.f.reference){
return;
}
- } else if(!s->current_picture.reference){
+ } else if(!s->current_picture.f.reference) {
(*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
if(*mbskip_ptr >99) *mbskip_ptr= 99;
} else{
h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
if (s->mv_dir & MV_DIR_FORWARD) {
- MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
+ MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
op_pix = s->dsp.avg_h264_chroma_pixels_tab;
}
if (s->mv_dir & MV_DIR_BACKWARD) {
- MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
+ MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
}
}else{
op_qpix= s->me.qpel_put;
op_pix = s->dsp.put_no_rnd_pixels_tab;
}
if (s->mv_dir & MV_DIR_FORWARD) {
- MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
+ MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
op_pix = s->dsp.avg_pixels_tab;
op_qpix= s->me.qpel_avg;
}
if (s->mv_dir & MV_DIR_BACKWARD) {
- MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
+ MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
}
}
}
if (!s->avctx->hwaccel
&& !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
&& s->unrestricted_mv
- && s->current_picture.reference
+ && s->current_picture.f.reference
&& !s->intra_only
&& !(s->flags&CODEC_FLAG_EMU_EDGE)) {
int sides = 0, edge_h;
edge_h= FFMIN(h, s->v_edge_pos - y);
- s->dsp.draw_edges(s->current_picture_ptr->data[0] + y *s->linesize , s->linesize,
+ s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize , s->linesize,
s->h_edge_pos , edge_h , EDGE_WIDTH , EDGE_WIDTH , sides);
- s->dsp.draw_edges(s->current_picture_ptr->data[1] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
+ s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
- s->dsp.draw_edges(s->current_picture_ptr->data[2] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
+ s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
}
}
void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
- const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
- const int uvlinesize= s->current_picture.linesize[1];
+ const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
+ const int uvlinesize = s->current_picture.f.linesize[1];
const int mb_size= 4 - s->avctx->lowres;
s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
//block_index is not used by mpeg2, so it is not affected by chroma_format
- s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
- s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
- s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
+ s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
+ s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
+ s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
{
return;
for(i=0; i<s->picture_count; i++){
- if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
- || s->picture[i].type == FF_BUFFER_TYPE_USER))
+ if (s->picture[i].f.data[0] &&
+ (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
+ s->picture[i].f.type == FF_BUFFER_TYPE_USER))
free_frame_buffer(s, &s->picture[i]);
}
s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
#ifndef AVCODEC_MPEGVIDEO_H
#define AVCODEC_MPEGVIDEO_H
+#include "avcodec.h"
#include "dsputil.h"
#include "get_bits.h"
#include "put_bits.h"
* Picture.
*/
typedef struct Picture{
- FF_COMMON_FRAME
+ struct AVFrame f;
/**
* halfpel luma planes.
#endif
v_edge_pos = s->v_edge_pos >> field_based;
- linesize = s->current_picture.linesize[0] << field_based;
- uvlinesize = s->current_picture.linesize[1] << field_based;
+ linesize = s->current_picture.f.linesize[0] << field_based;
+ uvlinesize = s->current_picture.f.linesize[1] << field_based;
dxy = ((motion_y & 1) << 1) | (motion_x & 1);
src_x = s->mb_x* 16 + (motion_x >> 1);
assert(!s->mb_skipped);
- memcpy(mv_cache[1][1], s->current_picture.motion_val[0][mot_xy ], sizeof(int16_t)*4);
- memcpy(mv_cache[2][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4);
- memcpy(mv_cache[3][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4);
+ memcpy(mv_cache[1][1], s->current_picture.f.motion_val[0][mot_xy ], sizeof(int16_t) * 4);
+ memcpy(mv_cache[2][1], s->current_picture.f.motion_val[0][mot_xy + mot_stride], sizeof(int16_t) * 4);
+ memcpy(mv_cache[3][1], s->current_picture.f.motion_val[0][mot_xy + mot_stride], sizeof(int16_t) * 4);
- if(mb_y==0 || IS_INTRA(s->current_picture.mb_type[xy-s->mb_stride])){
+ if (mb_y == 0 || IS_INTRA(s->current_picture.f.mb_type[xy - s->mb_stride])) {
memcpy(mv_cache[0][1], mv_cache[1][1], sizeof(int16_t)*4);
}else{
- memcpy(mv_cache[0][1], s->current_picture.motion_val[0][mot_xy-mot_stride], sizeof(int16_t)*4);
+ memcpy(mv_cache[0][1], s->current_picture.f.motion_val[0][mot_xy - mot_stride], sizeof(int16_t) * 4);
}
- if(mb_x==0 || IS_INTRA(s->current_picture.mb_type[xy-1])){
+ if (mb_x == 0 || IS_INTRA(s->current_picture.f.mb_type[xy - 1])) {
AV_COPY32(mv_cache[1][0], mv_cache[1][1]);
AV_COPY32(mv_cache[2][0], mv_cache[2][1]);
}else{
- AV_COPY32(mv_cache[1][0], s->current_picture.motion_val[0][mot_xy-1]);
- AV_COPY32(mv_cache[2][0], s->current_picture.motion_val[0][mot_xy-1+mot_stride]);
+ AV_COPY32(mv_cache[1][0], s->current_picture.f.motion_val[0][mot_xy - 1]);
+ AV_COPY32(mv_cache[2][0], s->current_picture.f.motion_val[0][mot_xy - 1 + mot_stride]);
}
- if(mb_x+1>=s->mb_width || IS_INTRA(s->current_picture.mb_type[xy+1])){
+ if (mb_x + 1 >= s->mb_width || IS_INTRA(s->current_picture.f.mb_type[xy + 1])) {
AV_COPY32(mv_cache[1][3], mv_cache[1][2]);
AV_COPY32(mv_cache[2][3], mv_cache[2][2]);
}else{
- AV_COPY32(mv_cache[1][3], s->current_picture.motion_val[0][mot_xy+2]);
- AV_COPY32(mv_cache[2][3], s->current_picture.motion_val[0][mot_xy+2+mot_stride]);
+ AV_COPY32(mv_cache[1][3], s->current_picture.f.motion_val[0][mot_xy + 2]);
+ AV_COPY32(mv_cache[2][3], s->current_picture.f.motion_val[0][mot_xy + 2 + mot_stride]);
}
mx = 0;
}
} else {
if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
- ref_picture= s->current_picture_ptr->data;
+ ref_picture = s->current_picture_ptr->f.data;
}
mpeg_motion(s, dest_y, dest_cb, dest_cr,
|| s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
ref2picture= ref_picture;
}else{
- ref2picture= s->current_picture_ptr->data;
+ ref2picture = s->current_picture_ptr->f.data;
}
mpeg_motion(s, dest_y, dest_cb, dest_cr,
//opposite parity is always in the same frame if this is second field
if(!s->first_field){
- ref_picture = s->current_picture_ptr->data;
+ ref_picture = s->current_picture_ptr->f.data;
}
}
}
* init s->current_picture.qscale_table from s->lambda_table
*/
void ff_init_qscale_tab(MpegEncContext *s){
- int8_t * const qscale_table= s->current_picture.qscale_table;
+ int8_t * const qscale_table = s->current_picture.f.qscale_table;
int i;
for(i=0; i<s->mb_num; i++){
int64_t score64=0;
for(plane=0; plane<3; plane++){
- const int stride= p->linesize[plane];
+ const int stride = p->f.linesize[plane];
const int bw= plane ? 1 : 2;
for(y=0; y<s->mb_height*bw; y++){
for(x=0; x<s->mb_width*bw; x++){
- int off= p->type == FF_BUFFER_TYPE_SHARED ? 0: 16;
- int v= s->dsp.frame_skip_cmp[1](s, p->data[plane] + 8*(x + y*stride)+off, ref->data[plane] + 8*(x + y*stride), stride, 8);
+ int off = p->f.type == FF_BUFFER_TYPE_SHARED ? 0: 16;
+ int v = s->dsp.frame_skip_cmp[1](s, p->f.data[plane] + 8*(x + y*stride)+off, ref->f.data[plane] + 8*(x + y*stride), stride, 8);
switch(s->avctx->frame_skip_exp){
case 0: score= FFMAX(score, v); break;
if(pre_input_ptr && (!i || s->input_picture[i-1])) {
pre_input= *pre_input_ptr;
- if(pre_input.type != FF_BUFFER_TYPE_SHARED && i) {
- pre_input.data[0]+=INPLACE_OFFSET;
- pre_input.data[1]+=INPLACE_OFFSET;
- pre_input.data[2]+=INPLACE_OFFSET;
+ if (pre_input.f.type != FF_BUFFER_TYPE_SHARED && i) {
+ pre_input.f.data[0] += INPLACE_OFFSET;
+ pre_input.f.data[1] += INPLACE_OFFSET;
+ pre_input.f.data[2] += INPLACE_OFFSET;
}
- s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0], pre_input.data[0], pre_input.linesize[0], c->width, c->height);
- s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1], pre_input.data[1], pre_input.linesize[1], c->width>>1, c->height>>1);
- s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2], pre_input.data[2], pre_input.linesize[2], c->width>>1, c->height>>1);
+ s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0], pre_input.f.data[0], pre_input.f.linesize[0], c->width, c->height);
+ s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1], pre_input.f.data[1], pre_input.f.linesize[1], c->width >> 1, c->height >> 1);
+ s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2], pre_input.f.data[2], pre_input.f.linesize[2], c->width >> 1, c->height >> 1);
}
}
if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){
s->reordered_input_picture[0]= s->input_picture[0];
- s->reordered_input_picture[0]->pict_type= AV_PICTURE_TYPE_I;
- s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
+ s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
+ s->reordered_input_picture[0]->f.coded_picture_number = s->coded_picture_number++;
}else{
int b_frames;
if(s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor){
if(s->picture_in_gop_number < s->gop_size && skip_check(s, s->input_picture[0], s->next_picture_ptr)){
//FIXME check that te gop check above is +-1 correct
-//av_log(NULL, AV_LOG_DEBUG, "skip %p %"PRId64"\n", s->input_picture[0]->data[0], s->input_picture[0]->pts);
+//av_log(NULL, AV_LOG_DEBUG, "skip %p %"PRId64"\n", s->input_picture[0]->f.data[0], s->input_picture[0]->pts);
- if(s->input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
+ if (s->input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED) {
for(i=0; i<4; i++)
- s->input_picture[0]->data[i]= NULL;
- s->input_picture[0]->type= 0;
+ s->input_picture[0]->f.data[i] = NULL;
+ s->input_picture[0]->f.type = 0;
}else{
assert( s->input_picture[0]->type==FF_BUFFER_TYPE_USER
|| s->input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
if(s->flags&CODEC_FLAG_PASS2){
for(i=0; i<s->max_b_frames+1; i++){
- int pict_num= s->input_picture[0]->display_picture_number + i;
+ int pict_num = s->input_picture[0]->f.display_picture_number + i;
if(pict_num >= s->rc_context.num_entries)
break;
break;
}
- s->input_picture[i]->pict_type=
+ s->input_picture[i]->f.pict_type =
s->rc_context.entry[pict_num].new_pict_type;
}
}
for(i=1; i<s->max_b_frames+1; i++){
if(s->input_picture[i] && s->input_picture[i]->b_frame_score==0){
s->input_picture[i]->b_frame_score=
- get_intra_count(s, s->input_picture[i ]->data[0],
- s->input_picture[i-1]->data[0], s->linesize) + 1;
+ get_intra_count(s, s->input_picture[i ]->f.data[0],
+ s->input_picture[i-1]->f.data[0], s->linesize) + 1;
}
}
for(i=0; i<s->max_b_frames+1; i++){
//av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %d\n", b_count);
for(i= b_frames - 1; i>=0; i--){
- int type= s->input_picture[i]->pict_type;
+ int type = s->input_picture[i]->f.pict_type;
if(type && type != AV_PICTURE_TYPE_B)
b_frames= i;
}
- if(s->input_picture[b_frames]->pict_type == AV_PICTURE_TYPE_B && b_frames == s->max_b_frames){
+ if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B && b_frames == s->max_b_frames){
av_log(s->avctx, AV_LOG_ERROR, "warning, too many b frames in a row\n");
}
}else{
if(s->flags & CODEC_FLAG_CLOSED_GOP)
b_frames=0;
- s->input_picture[b_frames]->pict_type= AV_PICTURE_TYPE_I;
+ s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
}
}
if( (s->flags & CODEC_FLAG_CLOSED_GOP)
&& b_frames
- && s->input_picture[b_frames]->pict_type== AV_PICTURE_TYPE_I)
+ && s->input_picture[b_frames]->f.pict_type== AV_PICTURE_TYPE_I)
b_frames--;
s->reordered_input_picture[0]= s->input_picture[b_frames];
- if(s->reordered_input_picture[0]->pict_type != AV_PICTURE_TYPE_I)
- s->reordered_input_picture[0]->pict_type= AV_PICTURE_TYPE_P;
- s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
+ if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
+ s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
+ s->reordered_input_picture[0]->f.coded_picture_number = s->coded_picture_number++;
for(i=0; i<b_frames; i++){
- s->reordered_input_picture[i+1]= s->input_picture[i];
- s->reordered_input_picture[i+1]->pict_type= AV_PICTURE_TYPE_B;
- s->reordered_input_picture[i+1]->coded_picture_number= s->coded_picture_number++;
+ s->reordered_input_picture[i + 1] = s->input_picture[i];
+ s->reordered_input_picture[i + 1]->f.pict_type = AV_PICTURE_TYPE_B;
+ s->reordered_input_picture[i + 1]->f.coded_picture_number = s->coded_picture_number++;
}
}
}
no_output_pic:
if(s->reordered_input_picture[0]){
- s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=AV_PICTURE_TYPE_B ? 3 : 0;
+ s->reordered_input_picture[0]->f.reference = s->reordered_input_picture[0]->f.pict_type!=AV_PICTURE_TYPE_B ? 3 : 0;
ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]);
- if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED || s->avctx->rc_buffer_size){
+ if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED || s->avctx->rc_buffer_size) {
// input is a shared pix, so we can't modifiy it -> alloc a new one & ensure that the shared one is reuseable
int i= ff_find_unused_picture(s, 0);
Picture *pic= &s->picture[i];
- pic->reference = s->reordered_input_picture[0]->reference;
+ pic->f.reference = s->reordered_input_picture[0]->f.reference;
if(ff_alloc_picture(s, pic, 0) < 0){
return -1;
}
/* mark us unused / free shared pic */
- if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_INTERNAL)
+ if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL)
s->avctx->release_buffer(s->avctx, (AVFrame*)s->reordered_input_picture[0]);
for(i=0; i<4; i++)
- s->reordered_input_picture[0]->data[i]= NULL;
- s->reordered_input_picture[0]->type= 0;
+ s->reordered_input_picture[0]->f.data[i] = NULL;
+ s->reordered_input_picture[0]->f.type = 0;
copy_picture_attributes(s, (AVFrame*)pic, (AVFrame*)s->reordered_input_picture[0]);
s->current_picture_ptr= s->reordered_input_picture[0];
for(i=0; i<4; i++){
- s->new_picture.data[i]+= INPLACE_OFFSET;
+ s->new_picture.f.data[i] += INPLACE_OFFSET;
}
}
ff_copy_picture(&s->current_picture, s->current_picture_ptr);
- s->picture_number= s->new_picture.display_picture_number;
+ s->picture_number = s->new_picture.f.display_picture_number;
//printf("dpn:%d\n", s->picture_number);
}else{
memset(&s->new_picture, 0, sizeof(Picture));
}
/* output? */
- if(s->new_picture.data[0]){
- s->pict_type= s->new_picture.pict_type;
+ if (s->new_picture.f.data[0]) {
+ s->pict_type = s->new_picture.f.pict_type;
//emms_c();
//printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
MPV_frame_start(s, avctx);
ff_write_pass1_stats(s);
for(i=0; i<4; i++){
- s->current_picture_ptr->error[i]= s->current_picture.error[i];
- avctx->error[i] += s->current_picture_ptr->error[i];
+ s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
+ avctx->error[i] += s->current_picture_ptr->f.error[i];
}
if(s->flags&CODEC_FLAG_PASS1)
update_qscale(s);
if(!(s->flags&CODEC_FLAG_QP_RD)){
- s->qscale= s->current_picture_ptr->qscale_table[mb_xy];
+ s->qscale = s->current_picture_ptr->f.qscale_table[mb_xy];
s->dquant= s->qscale - last_qp;
if(s->out_format==FMT_H263){
wrap_y = s->linesize;
wrap_c = s->uvlinesize;
- ptr_y = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
- ptr_cb = s->new_picture.data[1] + (mb_y * mb_block_height * wrap_c) + mb_x * 8;
- ptr_cr = s->new_picture.data[2] + (mb_y * mb_block_height * wrap_c) + mb_x * 8;
+ ptr_y = s->new_picture.f.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
+ ptr_cb = s->new_picture.f.data[1] + (mb_y * mb_block_height * wrap_c) + mb_x * 8;
+ ptr_cr = s->new_picture.f.data[2] + (mb_y * mb_block_height * wrap_c) + mb_x * 8;
if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
uint8_t *ebuf= s->edge_emu_buffer + 32;
}
if (s->mv_dir & MV_DIR_FORWARD) {
- MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
+ MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
op_pix = s->dsp.avg_pixels_tab;
op_qpix= s->dsp.avg_qpel_pixels_tab;
}
if (s->mv_dir & MV_DIR_BACKWARD) {
- MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
+ MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
}
if(s->flags&CODEC_FLAG_INTERLACED_DCT){
if(w==16 && h==16)
if(s->avctx->mb_cmp == FF_CMP_NSSE){
- return s->dsp.nsse[0](s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
- +s->dsp.nsse[1](s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
- +s->dsp.nsse[1](s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
+ return s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
+ +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
+ +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
}else{
- return s->dsp.sse[0](NULL, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
- +s->dsp.sse[1](NULL, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
- +s->dsp.sse[1](NULL, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
+ return s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
+ +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
+ +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
}
else
- return sse(s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
- +sse(s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
- +sse(s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
+ return sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
+ +sse(s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
+ +sse(s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
}
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
for(mb_x=0; mb_x < s->mb_width; mb_x++) {
int xx = mb_x * 16;
int yy = mb_y * 16;
- uint8_t *pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
+ uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
int varc;
int sum = s->dsp.pix_sum(pix, s->linesize);
/* note: quant matrix value (8) is implied here */
s->last_dc[i] = 128 << s->intra_dc_precision;
- s->current_picture.error[i] = 0;
+ s->current_picture.f.error[i] = 0;
}
s->mb_skip_run = 0;
memset(s->last_mv, 0, sizeof(s->last_mv));
s->mv_type = MV_TYPE_8X8;
s->mb_intra= 0;
for(i=0; i<4; i++){
- s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
- s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
+ s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
+ s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
}
encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
&dmin, &next_block, 0, 0);
}
}
- s->current_picture.qscale_table[xy]= best_s.qscale;
+ s->current_picture.f.qscale_table[xy] = best_s.qscale;
copy_context_after_encode(s, &best_s, -1);
s->mv_type = MV_TYPE_8X8;
s->mb_intra= 0;
for(i=0; i<4; i++){
- s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
- s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
+ s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
+ s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
}
break;
case CANDIDATE_MB_TYPE_DIRECT:
if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
- s->current_picture.error[0] += sse(
- s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
+ s->current_picture.f.error[0] += sse(
+ s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
s->dest[0], w, h, s->linesize);
- s->current_picture.error[1] += sse(
- s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
+ s->current_picture.f.error[1] += sse(
+ s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
- s->current_picture.error[2] += sse(
- s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
+ s->current_picture.f.error[2] += sse(
+ s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
}
if(s->loop_filter){
MERGE(misc_bits);
MERGE(error_count);
MERGE(padding_bug_score);
- MERGE(current_picture.error[0]);
- MERGE(current_picture.error[1]);
- MERGE(current_picture.error[2]);
+ MERGE(current_picture.f.error[0]);
+ MERGE(current_picture.f.error[1]);
+ MERGE(current_picture.f.error[2]);
if(dst->avctx->noise_reduction){
for(i=0; i<64; i++){
static int estimate_qp(MpegEncContext *s, int dry_run){
if (s->next_lambda){
- s->current_picture_ptr->quality=
- s->current_picture.quality = s->next_lambda;
+ s->current_picture_ptr->f.quality =
+ s->current_picture.f.quality = s->next_lambda;
if(!dry_run) s->next_lambda= 0;
} else if (!s->fixed_qscale) {
- s->current_picture_ptr->quality=
- s->current_picture.quality = ff_rate_estimate_qscale(s, dry_run);
- if (s->current_picture.quality < 0)
+ s->current_picture_ptr->f.quality =
+ s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
+ if (s->current_picture.f.quality < 0)
return -1;
}
s->lambda= s->lambda_table[0];
//FIXME broken
}else
- s->lambda= s->current_picture.quality;
+ s->lambda = s->current_picture.f.quality;
//printf("%d %d\n", s->avctx->global_quality, s->current_picture.quality);
update_qscale(s);
return 0;
/* must be called before writing the header */
static void set_frame_distances(MpegEncContext * s){
assert(s->current_picture_ptr->pts != AV_NOPTS_VALUE);
- s->time= s->current_picture_ptr->pts*s->avctx->time_base.num;
+ s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
if(s->pict_type==AV_PICTURE_TYPE_B){
s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
}
//FIXME var duplication
- s->current_picture_ptr->key_frame=
- s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
- s->current_picture_ptr->pict_type=
- s->current_picture.pict_type= s->pict_type;
+ s->current_picture_ptr->f.key_frame =
+ s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
+ s->current_picture_ptr->f.pict_type =
+ s->current_picture.f.pict_type = s->pict_type;
- if(s->current_picture.key_frame)
+ if (s->current_picture.f.key_frame)
s->picture_in_gop_number=0;
s->last_bits= put_bits_count(&s->pb);
}else{
if(n<4){
wrap= s->linesize;
- dest= s->current_picture.data[0] + (((n>>1) + 2*s->mb_y) * 8* wrap ) + ((n&1) + 2*s->mb_x) * 8;
+ dest= s->current_picture.f.data[0] + (((n >> 1) + 2*s->mb_y) * 8* wrap ) + ((n & 1) + 2*s->mb_x) * 8;
}else{
wrap= s->uvlinesize;
- dest= s->current_picture.data[n-3] + (s->mb_y * 8 * wrap) + s->mb_x * 8;
+ dest= s->current_picture.f.data[n - 3] + (s->mb_y * 8 * wrap) + s->mb_x * 8;
}
if(s->mb_x==0) a= (1024 + (scale>>1))/scale;
else a= get_dc(dest-8, wrap, scale*8);
{
int cbp, code, i;
uint8_t *coded_val;
- uint32_t * const mb_type_ptr= &s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ];
+ uint32_t * const mb_type_ptr = &s->current_picture.f.mb_type[s->mb_x + s->mb_y*s->mb_stride];
if (s->pict_type == AV_PICTURE_TYPE_P) {
if (s->use_skip_mb_code) {
void ff_write_pass1_stats(MpegEncContext *s){
snprintf(s->avctx->stats_out, 256, "in:%d out:%d type:%d q:%d itex:%d ptex:%d mv:%d misc:%d fcode:%d bcode:%d mc-var:%d var:%d icount:%d skipcount:%d hbits:%d;\n",
- s->current_picture_ptr->display_picture_number, s->current_picture_ptr->coded_picture_number, s->pict_type,
- s->current_picture.quality, s->i_tex_bits, s->p_tex_bits, s->mv_bits, s->misc_bits,
- s->f_code, s->b_code, s->current_picture.mc_mb_var_sum, s->current_picture.mb_var_sum, s->i_count, s->skip_count, s->header_bits);
+ s->current_picture_ptr->f.display_picture_number, s->current_picture_ptr->f.coded_picture_number, s->pict_type,
+ s->current_picture.f.quality, s->i_tex_bits, s->p_tex_bits, s->mv_bits, s->misc_bits,
+ s->f_code, s->b_code, s->current_picture.mc_mb_var_sum, s->current_picture.mb_var_sum, s->i_count, s->skip_count, s->header_bits);
}
static inline double qp2bits(RateControlEntry *rce, double qp){
//if(dts_pic)
// av_log(NULL, AV_LOG_ERROR, "%Ld %Ld %Ld %d\n", s->current_picture_ptr->pts, s->user_specified_pts, dts_pic->pts, picture_number);
- if(!dts_pic || dts_pic->pts == AV_NOPTS_VALUE)
+ if (!dts_pic || dts_pic->f.pts == AV_NOPTS_VALUE)
wanted_bits= (uint64_t)(s->bit_rate*(double)picture_number/fps);
else
- wanted_bits= (uint64_t)(s->bit_rate*(double)dts_pic->pts/fps);
+ wanted_bits = (uint64_t)(s->bit_rate*(double)dts_pic->f.pts / fps);
}
diff= s->total_bits - wanted_bits;
mb_pos = row * s->mb_stride;
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
- int mbtype = s->current_picture_ptr->mb_type[mb_pos];
+ int mbtype = s->current_picture_ptr->f.mb_type[mb_pos];
if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype))
r->deblock_coefs[mb_pos] = 0xFFFF;
if(IS_INTRA(mbtype))
*/
mb_pos = row * s->mb_stride;
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
- cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos]];
+ cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos]];
if(mb_x)
- left_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos - 1]];
+ left_lim = rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos - 1]];
for(j = 0; j < 16; j += 4){
- Y = s->current_picture_ptr->data[0] + mb_x*16 + (row*16 + j) * s->linesize + 4 * !mb_x;
+ Y = s->current_picture_ptr->f.data[0] + mb_x*16 + (row*16 + j) * s->linesize + 4 * !mb_x;
for(i = !mb_x; i < 4; i++, Y += 4){
int ij = i + j;
loc_lim = 0;
if(mb_x)
left_cbp = (r->cbp_chroma[mb_pos - 1] >> (k*4)) & 0xF;
for(j = 0; j < 8; j += 4){
- C = s->current_picture_ptr->data[k+1] + mb_x*8 + (row*8 + j) * s->uvlinesize + 4 * !mb_x;
+ C = s->current_picture_ptr->f.data[k + 1] + mb_x*8 + (row*8 + j) * s->uvlinesize + 4 * !mb_x;
for(i = !mb_x; i < 2; i++, C += 4){
int ij = i + (j >> 1);
loc_lim = 0;
}
mb_pos = row * s->mb_stride;
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
- cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos]];
+ cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos]];
if(row)
- top_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos - s->mb_stride]];
+ top_lim = rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos - s->mb_stride]];
for(j = 4*!row; j < 16; j += 4){
- Y = s->current_picture_ptr->data[0] + mb_x*16 + (row*16 + j) * s->linesize;
+ Y = s->current_picture_ptr->f.data[0] + mb_x*16 + (row*16 + j) * s->linesize;
for(i = 0; i < 4; i++, Y += 4){
int ij = i + j;
loc_lim = 0;
if(row)
top_cbp = (r->cbp_chroma[mb_pos - s->mb_stride] >> (k*4)) & 0xF;
for(j = 4*!row; j < 8; j += 4){
- C = s->current_picture_ptr->data[k+1] + mb_x*8 + (row*8 + j) * s->uvlinesize;
+ C = s->current_picture_ptr->f.data[k+1] + mb_x*8 + (row*8 + j) * s->uvlinesize;
for(i = 0; i < 2; i++, C += 4){
int ij = i + (j >> 1);
loc_lim = 0;
if(!get_bits1(gb))
av_log(s->avctx, AV_LOG_ERROR, "Need DQUANT\n");
}
- s->current_picture_ptr->mb_type[mb_pos] = r->is16 ? MB_TYPE_INTRA16x16 : MB_TYPE_INTRA;
+ s->current_picture_ptr->f.mb_type[mb_pos] = r->is16 ? MB_TYPE_INTRA16x16 : MB_TYPE_INTRA;
r->block_type = r->is16 ? RV34_MB_TYPE_INTRA16x16 : RV34_MB_TYPE_INTRA;
}else{
r->block_type = r->decode_mb_info(r);
if(r->block_type == -1)
return -1;
- s->current_picture_ptr->mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type];
+ s->current_picture_ptr->f.mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type];
r->mb_type[mb_pos] = r->block_type;
if(r->block_type == RV34_MB_SKIP){
if(s->pict_type == AV_PICTURE_TYPE_P)
if(s->pict_type == AV_PICTURE_TYPE_B)
r->mb_type[mb_pos] = RV34_MB_B_DIRECT;
}
- r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->mb_type[mb_pos]);
+ r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->f.mb_type[mb_pos]);
rv34_decode_mv(r, r->block_type);
if(r->block_type == RV34_MB_SKIP){
fill_rectangle(intra_types, 4, 4, r->intra_types_stride, 0, sizeof(intra_types[0]));
r->chroma_vlc = 1;
r->luma_vlc = 0;
}
- if(IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){
+ if(IS_INTRA(s->current_picture_ptr->f.mb_type[mb_pos])){
if(r->is16){
t = get_bits(gb, 2);
fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
c_off = -1;
if(r->avail_cache[avail_index - 1]){
- A[0] = s->current_picture_ptr->motion_val[0][mv_pos-1][0];
- A[1] = s->current_picture_ptr->motion_val[0][mv_pos-1][1];
+ A[0] = s->current_picture_ptr->f.motion_val[0][mv_pos-1][0];
+ A[1] = s->current_picture_ptr->f.motion_val[0][mv_pos-1][1];
}
if(r->avail_cache[avail_index - 4]){
- B[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][0];
- B[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][1];
+ B[0] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride][0];
+ B[1] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride][1];
}else{
B[0] = A[0];
B[1] = A[1];
}
if(!r->avail_cache[avail_index - 4 + c_off]){
if(r->avail_cache[avail_index - 4] && (r->avail_cache[avail_index - 1] || r->rv30)){
- C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][0];
- C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][1];
+ C[0] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride-1][0];
+ C[1] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride-1][1];
}else{
C[0] = A[0];
C[1] = A[1];
}
}else{
- C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][0];
- C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][1];
+ C[0] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride+c_off][0];
+ C[1] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride+c_off][1];
}
mx = mid_pred(A[0], B[0], C[0]);
my = mid_pred(A[1], B[1], C[1]);
my += r->dmv[dmv_no][1];
for(j = 0; j < part_sizes_h[block_type]; j++){
for(i = 0; i < part_sizes_w[block_type]; i++){
- s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx;
- s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][1] = my;
+ s->current_picture_ptr->f.motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx;
+ s->current_picture_ptr->f.motion_val[0][mv_pos + i + j*s->b8_stride][1] = my;
}
}
}
int i, j;
Picture *cur_pic = s->current_picture_ptr;
const int mask = dir ? MB_TYPE_L1 : MB_TYPE_L0;
- int type = cur_pic->mb_type[mb_pos];
+ int type = cur_pic->f.mb_type[mb_pos];
memset(A, 0, sizeof(A));
memset(B, 0, sizeof(B));
memset(C, 0, sizeof(C));
if((r->avail_cache[6-1] & type) & mask){
- A[0] = cur_pic->motion_val[dir][mv_pos - 1][0];
- A[1] = cur_pic->motion_val[dir][mv_pos - 1][1];
+ A[0] = cur_pic->f.motion_val[dir][mv_pos - 1][0];
+ A[1] = cur_pic->f.motion_val[dir][mv_pos - 1][1];
has_A = 1;
}
if((r->avail_cache[6-4] & type) & mask){
- B[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][0];
- B[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][1];
+ B[0] = cur_pic->f.motion_val[dir][mv_pos - s->b8_stride][0];
+ B[1] = cur_pic->f.motion_val[dir][mv_pos - s->b8_stride][1];
has_B = 1;
}
if(r->avail_cache[6-4] && (r->avail_cache[6-2] & type) & mask){
- C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][0];
- C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][1];
+ C[0] = cur_pic->f.motion_val[dir][mv_pos - s->b8_stride + 2][0];
+ C[1] = cur_pic->f.motion_val[dir][mv_pos - s->b8_stride + 2][1];
has_C = 1;
}else if((s->mb_x+1) == s->mb_width && (r->avail_cache[6-5] & type) & mask){
- C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][0];
- C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][1];
+ C[0] = cur_pic->f.motion_val[dir][mv_pos - s->b8_stride - 1][0];
+ C[1] = cur_pic->f.motion_val[dir][mv_pos - s->b8_stride - 1][1];
has_C = 1;
}
for(j = 0; j < 2; j++){
for(i = 0; i < 2; i++){
- cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][0] = mx;
- cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][1] = my;
+ cur_pic->f.motion_val[dir][mv_pos + i + j*s->b8_stride][0] = mx;
+ cur_pic->f.motion_val[dir][mv_pos + i + j*s->b8_stride][1] = my;
}
}
if(block_type == RV34_MB_B_BACKWARD || block_type == RV34_MB_B_FORWARD){
- ZERO8x2(cur_pic->motion_val[!dir][mv_pos], s->b8_stride);
+ ZERO8x2(cur_pic->f.motion_val[!dir][mv_pos], s->b8_stride);
}
}
int avail_index = avail_indexes[0];
if(r->avail_cache[avail_index - 1]){
- A[0] = s->current_picture_ptr->motion_val[0][mv_pos-1][0];
- A[1] = s->current_picture_ptr->motion_val[0][mv_pos-1][1];
+ A[0] = s->current_picture_ptr->f.motion_val[0][mv_pos - 1][0];
+ A[1] = s->current_picture_ptr->f.motion_val[0][mv_pos - 1][1];
}
if(r->avail_cache[avail_index - 4]){
- B[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][0];
- B[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][1];
+ B[0] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride][0];
+ B[1] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride][1];
}else{
B[0] = A[0];
B[1] = A[1];
}
if(!r->avail_cache[avail_index - 4 + 2]){
if(r->avail_cache[avail_index - 4] && (r->avail_cache[avail_index - 1])){
- C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][0];
- C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][1];
+ C[0] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride - 1][0];
+ C[1] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride - 1][1];
}else{
C[0] = A[0];
C[1] = A[1];
}
}else{
- C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+2][0];
- C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+2][1];
+ C[0] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride + 2][0];
+ C[1] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride + 2][1];
}
mx = mid_pred(A[0], B[0], C[0]);
my = mid_pred(A[1], B[1], C[1]);
for(j = 0; j < 2; j++){
for(i = 0; i < 2; i++){
for(k = 0; k < 2; k++){
- s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx;
- s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][1] = my;
+ s->current_picture_ptr->f.motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx;
+ s->current_picture_ptr->f.motion_val[k][mv_pos + i + j*s->b8_stride][1] = my;
}
}
}
if(thirdpel){
int chroma_mx, chroma_my;
- mx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24);
- my = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24);
- lx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) % 3;
- ly = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) % 3;
- chroma_mx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + 1) >> 1;
- chroma_my = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + 1) >> 1;
+ mx = (s->current_picture_ptr->f.motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24);
+ my = (s->current_picture_ptr->f.motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24);
+ lx = (s->current_picture_ptr->f.motion_val[dir][mv_pos][0] + (3 << 24)) % 3;
+ ly = (s->current_picture_ptr->f.motion_val[dir][mv_pos][1] + (3 << 24)) % 3;
+ chroma_mx = (s->current_picture_ptr->f.motion_val[dir][mv_pos][0] + 1) >> 1;
+ chroma_my = (s->current_picture_ptr->f.motion_val[dir][mv_pos][1] + 1) >> 1;
umx = (chroma_mx + (3 << 24)) / 3 - (1 << 24);
umy = (chroma_my + (3 << 24)) / 3 - (1 << 24);
uvmx = chroma_coeffs[(chroma_mx + (3 << 24)) % 3];
uvmy = chroma_coeffs[(chroma_my + (3 << 24)) % 3];
}else{
int cx, cy;
- mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] >> 2;
- my = s->current_picture_ptr->motion_val[dir][mv_pos][1] >> 2;
- lx = s->current_picture_ptr->motion_val[dir][mv_pos][0] & 3;
- ly = s->current_picture_ptr->motion_val[dir][mv_pos][1] & 3;
- cx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
- cy = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
+ mx = s->current_picture_ptr->f.motion_val[dir][mv_pos][0] >> 2;
+ my = s->current_picture_ptr->f.motion_val[dir][mv_pos][1] >> 2;
+ lx = s->current_picture_ptr->f.motion_val[dir][mv_pos][0] & 3;
+ ly = s->current_picture_ptr->f.motion_val[dir][mv_pos][1] & 3;
+ cx = s->current_picture_ptr->f.motion_val[dir][mv_pos][0] / 2;
+ cy = s->current_picture_ptr->f.motion_val[dir][mv_pos][1] / 2;
umx = cx >> 2;
umy = cy >> 2;
uvmx = (cx & 3) << 1;
uvmx = uvmy = 4;
}
dxy = ly*4 + lx;
- srcY = dir ? s->next_picture_ptr->data[0] : s->last_picture_ptr->data[0];
- srcU = dir ? s->next_picture_ptr->data[1] : s->last_picture_ptr->data[1];
- srcV = dir ? s->next_picture_ptr->data[2] : s->last_picture_ptr->data[2];
+ srcY = dir ? s->next_picture_ptr->f.data[0] : s->last_picture_ptr->f.data[0];
+ srcU = dir ? s->next_picture_ptr->f.data[1] : s->last_picture_ptr->f.data[1];
+ srcV = dir ? s->next_picture_ptr->f.data[2] : s->last_picture_ptr->f.data[2];
src_x = s->mb_x * 16 + xoff + mx;
src_y = s->mb_y * 16 + yoff + my;
uvsrc_x = s->mb_x * 8 + (xoff >> 1) + umx;
switch(block_type){
case RV34_MB_TYPE_INTRA:
case RV34_MB_TYPE_INTRA16x16:
- ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
+ ZERO8x2(s->current_picture_ptr->f.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
return 0;
case RV34_MB_SKIP:
if(s->pict_type == AV_PICTURE_TYPE_P){
- ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
+ ZERO8x2(s->current_picture_ptr->f.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
break;
}
case RV34_MB_B_DIRECT:
//surprisingly, it uses motion scheme from next reference frame
- next_bt = s->next_picture_ptr->mb_type[s->mb_x + s->mb_y * s->mb_stride];
+ next_bt = s->next_picture_ptr->f.mb_type[s->mb_x + s->mb_y * s->mb_stride];
if(IS_INTRA(next_bt) || IS_SKIP(next_bt)){
- ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
- ZERO8x2(s->current_picture_ptr->motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
+ ZERO8x2(s->current_picture_ptr->f.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
+ ZERO8x2(s->current_picture_ptr->f.motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
}else
for(j = 0; j < 2; j++)
for(i = 0; i < 2; i++)
for(k = 0; k < 2; k++)
for(l = 0; l < 2; l++)
- s->current_picture_ptr->motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][k]);
+ s->current_picture_ptr->f.motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_picture_ptr->f.motion_val[0][mv_pos + i + j*s->b8_stride][k]);
if(!(IS_16X8(next_bt) || IS_8X16(next_bt) || IS_8X8(next_bt))) //we can use whole macroblock MC
rv34_mc_2mv(r, block_type);
else
rv34_mc_2mv_skip(r);
- ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
+ ZERO8x2(s->current_picture_ptr->f.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
break;
case RV34_MB_P_16x16:
case RV34_MB_P_MIX16x16:
MpegEncContext *s = &r->s;
int hmvmask = 0, vmvmask = 0, i, j;
int midx = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
- int16_t (*motion_val)[2] = &s->current_picture_ptr->motion_val[0][midx];
+ int16_t (*motion_val)[2] = &s->current_picture_ptr->f.motion_val[0][midx];
for(j = 0; j < 16; j += 8){
for(i = 0; i < 2; i++){
if(is_mv_diff_gt_3(motion_val + i, 1))
dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
if(s->mb_x && dist)
r->avail_cache[5] =
- r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1];
+ r->avail_cache[9] = s->current_picture_ptr->f.mb_type[mb_pos - 1];
if(dist >= s->mb_width)
r->avail_cache[2] =
- r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride];
+ r->avail_cache[3] = s->current_picture_ptr->f.mb_type[mb_pos - s->mb_stride];
if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
- r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1];
+ r->avail_cache[4] = s->current_picture_ptr->f.mb_type[mb_pos - s->mb_stride + 1];
if(s->mb_x && dist > s->mb_width)
- r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1];
+ r->avail_cache[1] = s->current_picture_ptr->f.mb_type[mb_pos - s->mb_stride - 1];
s->qscale = r->si.quant;
cbp = cbp2 = rv34_decode_mb_header(r, intra_types);
r->deblock_coefs[mb_pos] = 0xFFFF;
else
r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos];
- s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
+ s->current_picture_ptr->f.qscale_table[mb_pos] = s->qscale;
if(cbp == -1)
return -1;
rv34_dequant4x4(s->block[blknum] + blkoff, rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]],rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]]);
rv34_inv_transform(s->block[blknum] + blkoff);
}
- if(IS_INTRA(s->current_picture_ptr->mb_type[mb_pos]))
+ if (IS_INTRA(s->current_picture_ptr->f.mb_type[mb_pos]))
rv34_output_macroblock(r, intra_types, cbp2, r->is16);
else
rv34_apply_differences(r, cbp2);
av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
return -1;
}
- if((!s->last_picture_ptr || !s->last_picture_ptr->data[0]) && si.type == AV_PICTURE_TYPE_B)
+ if ((!s->last_picture_ptr || !s->last_picture_ptr->f.data[0]) && si.type == AV_PICTURE_TYPE_B)
return -1;
if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==AV_PICTURE_TYPE_B)
|| (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=AV_PICTURE_TYPE_I)
mb_pos = row * s->mb_stride;
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
- int mbtype = s->current_picture_ptr->mb_type[mb_pos];
+ int mbtype = s->current_picture_ptr->f.mb_type[mb_pos];
if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype))
r->cbp_luma [mb_pos] = r->deblock_coefs[mb_pos] = 0xFFFF;
if(IS_INTRA(mbtype))
int avail[4];
int y_to_deblock, c_to_deblock[2];
- q = s->current_picture_ptr->qscale_table[mb_pos];
+ q = s->current_picture_ptr->f.qscale_table[mb_pos];
alpha = rv40_alpha_tab[q];
beta = rv40_beta_tab [q];
betaY = betaC = beta * 3;
if(avail[i]){
int pos = mb_pos + neighbour_offs_x[i] + neighbour_offs_y[i]*s->mb_stride;
mvmasks[i] = r->deblock_coefs[pos];
- mbtype [i] = s->current_picture_ptr->mb_type[pos];
+ mbtype [i] = s->current_picture_ptr->f.mb_type[pos];
cbp [i] = r->cbp_luma[pos];
uvcbp[i][0] = r->cbp_chroma[pos] & 0xF;
uvcbp[i][1] = r->cbp_chroma[pos] >> 4;
}
for(j = 0; j < 16; j += 4){
- Y = s->current_picture_ptr->data[0] + mb_x*16 + (row*16 + j) * s->linesize;
+ Y = s->current_picture_ptr->f.data[0] + mb_x*16 + (row*16 + j) * s->linesize;
for(i = 0; i < 4; i++, Y += 4){
int ij = i + j;
int clip_cur = y_to_deblock & (MASK_CUR << ij) ? clip[POS_CUR] : 0;
}
for(k = 0; k < 2; k++){
for(j = 0; j < 2; j++){
- C = s->current_picture_ptr->data[k+1] + mb_x*8 + (row*8 + j*4) * s->uvlinesize;
+ C = s->current_picture_ptr->f.data[k + 1] + mb_x*8 + (row*8 + j*4) * s->uvlinesize;
for(i = 0; i < 2; i++, C += 4){
int ij = i + j*2;
int clip_cur = c_to_deblock[k] & (MASK_CUR << ij) ? clip[POS_CUR] : 0;
frame_start(s);
s->m.current_picture_ptr= &s->m.current_picture;
- s->m.last_picture.pts= s->m.current_picture.pts;
- s->m.current_picture.pts= pict->pts;
+ s->m.last_picture.f.pts = s->m.current_picture.f.pts;
+ s->m.current_picture.f.pts = pict->pts;
if(pict->pict_type == AV_PICTURE_TYPE_P){
int block_width = (width +15)>>4;
int block_height= (height+15)>>4;
assert(s->last_picture[0].data[0]);
s->m.avctx= s->avctx;
- s->m.current_picture.data[0]= s->current_picture.data[0];
- s->m. last_picture.data[0]= s->last_picture[0].data[0];
- s->m. new_picture.data[0]= s-> input_picture.data[0];
+ s->m.current_picture.f.data[0] = s->current_picture.data[0];
+ s->m. last_picture.f.data[0] = s->last_picture[0].data[0];
+ s->m. new_picture.f.data[0] = s-> input_picture.data[0];
s->m. last_picture_ptr= &s->m. last_picture;
s->m.linesize=
- s->m. last_picture.linesize[0]=
- s->m. new_picture.linesize[0]=
- s->m.current_picture.linesize[0]= stride;
+ s->m. last_picture.f.linesize[0] =
+ s->m. new_picture.f.linesize[0] =
+ s->m.current_picture.f.linesize[0] = stride;
s->m.uvlinesize= s->current_picture.linesize[1];
s->m.width = width;
s->m.height= height;
s->current_picture.quality = pict->quality;
s->m.frame_bits = 8*(s->c.bytestream - s->c.bytestream_start);
s->m.p_tex_bits = s->m.frame_bits - s->m.misc_bits - s->m.mv_bits;
- s->m.current_picture.display_picture_number =
- s->m.current_picture.coded_picture_number = avctx->frame_number;
- s->m.current_picture.quality = pict->quality;
+ s->m.current_picture.f.display_picture_number =
+ s->m.current_picture.f.coded_picture_number = avctx->frame_number;
+ s->m.current_picture.f.quality = pict->quality;
s->m.total_bits += 8*(s->c.bytestream - s->c.bytestream_start);
if(s->pass1_rc)
if (ff_rate_estimate_qscale(&s->m, 0) < 0)
linesize= s->uvlinesize;
}
- current = s->current_picture.data[i];
+ current = s->current_picture.f.data[i];
if(s->pict_type==AV_PICTURE_TYPE_B){
- previous = s->next_picture.data[i];
+ previous = s->next_picture.f.data[i];
}else{
- previous = s->last_picture.data[i];
+ previous = s->last_picture.f.data[i];
}
if (s->pict_type == AV_PICTURE_TYPE_I) {
s->m.avctx= s->avctx;
s->m.current_picture_ptr= &s->m.current_picture;
s->m.last_picture_ptr = &s->m.last_picture;
- s->m.last_picture.data[0]= ref_plane;
+ s->m.last_picture.f.data[0] = ref_plane;
s->m.linesize=
- s->m.last_picture.linesize[0]=
- s->m.new_picture.linesize[0]=
- s->m.current_picture.linesize[0]= stride;
+ s->m.last_picture.f.linesize[0] =
+ s->m.new_picture.f.linesize[0] =
+ s->m.current_picture.f.linesize[0] = stride;
s->m.width= width;
s->m.height= height;
s->m.mb_width= block_width;
s->m.current_picture.mb_mean= (uint8_t *)s->dummy;
s->m.current_picture.mb_var= (uint16_t*)s->dummy;
s->m.current_picture.mc_mb_var= (uint16_t*)s->dummy;
- s->m.current_picture.mb_type= s->dummy;
+ s->m.current_picture.f.mb_type = s->dummy;
- s->m.current_picture.motion_val[0]= s->motion_val8[plane] + 2;
+ s->m.current_picture.f.motion_val[0] = s->motion_val8[plane] + 2;
s->m.p_mv_table= s->motion_val16[plane] + s->m.mb_stride + 1;
s->m.dsp= s->dsp; //move
ff_init_me(&s->m);
s->m.me.dia_size= s->avctx->dia_size;
s->m.first_slice_line=1;
for (y = 0; y < block_height; y++) {
- s->m.new_picture.data[0]= src - y*16*stride; //ugly
+ s->m.new_picture.f.data[0] = src - y*16*stride; //ugly
s->m.mb_y= y;
for(i=0; i<16 && i + 16*y<height; i++){
}
/* form component predictions */
- dest = s->current_picture.data[0] + x + y*s->linesize;
- src = pic->data[0] + mx + my*s->linesize;
+ dest = s->current_picture.f.data[0] + x + y*s->linesize;
+ src = pic->f.data[0] + mx + my*s->linesize;
if (emu) {
s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, (width + 1), (height + 1),
blocksize++;
for (i = 1; i < 3; i++) {
- dest = s->current_picture.data[i] + (x >> 1) + (y >> 1)*s->uvlinesize;
- src = pic->data[i] + mx + my*s->uvlinesize;
+ dest = s->current_picture.f.data[i] + (x >> 1) + (y >> 1) * s->uvlinesize;
+ src = pic->f.data[i] + mx + my * s->uvlinesize;
if (emu) {
s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->uvlinesize, (width + 1), (height + 1),
if (mode != PREDICT_MODE) {
pred_motion(h, k, (part_width >> 2), dir, 1, &mx, &my);
} else {
- mx = s->next_picture.motion_val[0][b_xy][0]<<1;
- my = s->next_picture.motion_val[0][b_xy][1]<<1;
+ mx = s->next_picture.f.motion_val[0][b_xy][0] << 1;
+ my = s->next_picture.f.motion_val[0][b_xy][1] << 1;
if (dir == 0) {
mx = ((mx * h->frame_num_offset) / h->prev_frame_num_offset + 1) >> 1;
}
/* write back motion vectors */
- fill_rectangle(s->current_picture.motion_val[dir][b_xy], part_width>>2, part_height>>2, h->b_stride, pack16to32(mx,my), 4);
+ fill_rectangle(s->current_picture.f.motion_val[dir][b_xy],
+ part_width >> 2, part_height >> 2, h->b_stride,
+ pack16to32(mx, my), 4);
}
}
h->topright_samples_available = 0xFFFF;
if (mb_type == 0) { /* SKIP */
- if (s->pict_type == AV_PICTURE_TYPE_P || s->next_picture.mb_type[mb_xy] == -1) {
+ if (s->pict_type == AV_PICTURE_TYPE_P || s->next_picture.f.mb_type[mb_xy] == -1) {
svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 0, 0);
if (s->pict_type == AV_PICTURE_TYPE_B) {
mb_type = MB_TYPE_SKIP;
} else {
- mb_type = FFMIN(s->next_picture.mb_type[mb_xy], 6);
+ mb_type = FFMIN(s->next_picture.f.mb_type[mb_xy], 6);
if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 0, 0) < 0)
return -1;
if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 1, 1) < 0)
for (m = 0; m < 2; m++) {
if (s->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1]+6] != -1) {
for (i = 0; i < 4; i++) {
- *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - 1 + i*h->b_stride];
+ *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = *(uint32_t *) s->current_picture.f.motion_val[m][b_xy - 1 + i*h->b_stride];
}
} else {
for (i = 0; i < 4; i++) {
}
}
if (s->mb_y > 0) {
- memcpy(h->mv_cache[m][scan8[0] - 1*8], s->current_picture.motion_val[m][b_xy - h->b_stride], 4*2*sizeof(int16_t));
+ memcpy(h->mv_cache[m][scan8[0] - 1*8], s->current_picture.f.motion_val[m][b_xy - h->b_stride], 4*2*sizeof(int16_t));
memset(&h->ref_cache[m][scan8[0] - 1*8], (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
if (s->mb_x < (s->mb_width - 1)) {
- *(uint32_t *) h->mv_cache[m][scan8[0] + 4 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride + 4];
+ *(uint32_t *) h->mv_cache[m][scan8[0] + 4 - 1*8] = *(uint32_t *) s->current_picture.f.motion_val[m][b_xy - h->b_stride + 4];
h->ref_cache[m][scan8[0] + 4 - 1*8] =
(h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride + 1]+6] == -1 ||
h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride ] ] == -1) ? PART_NOT_AVAILABLE : 1;
}else
h->ref_cache[m][scan8[0] + 4 - 1*8] = PART_NOT_AVAILABLE;
if (s->mb_x > 0) {
- *(uint32_t *) h->mv_cache[m][scan8[0] - 1 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride - 1];
+ *(uint32_t *) h->mv_cache[m][scan8[0] - 1 - 1*8] = *(uint32_t *) s->current_picture.f.motion_val[m][b_xy - h->b_stride - 1];
h->ref_cache[m][scan8[0] - 1 - 1*8] = (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride - 1]+3] == -1) ? PART_NOT_AVAILABLE : 1;
}else
h->ref_cache[m][scan8[0] - 1 - 1*8] = PART_NOT_AVAILABLE;
return -1;
} else {
for (i = 0; i < 4; i++) {
- memset(s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
+ memset(s->current_picture.f.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
}
}
if (mb_type != 1) {
return -1;
} else {
for (i = 0; i < 4; i++) {
- memset(s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
+ memset(s->current_picture.f.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
}
}
}
if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) {
for (i = 0; i < 4; i++) {
- memset(s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
+ memset(s->current_picture.f.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
}
if (s->pict_type == AV_PICTURE_TYPE_B) {
for (i = 0; i < 4; i++) {
- memset(s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
+ memset(s->current_picture.f.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
}
}
}
}
h->cbp= cbp;
- s->current_picture.mb_type[mb_xy] = mb_type;
+ s->current_picture.f.mb_type[mb_xy] = mb_type;
if (IS_INTRA(mb_type)) {
h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8);
}
/* for skipping the frame */
- s->current_picture.pict_type = s->pict_type;
- s->current_picture.key_frame = (s->pict_type == AV_PICTURE_TYPE_I);
+ s->current_picture.f.pict_type = s->pict_type;
+ s->current_picture.f.key_frame = (s->pict_type == AV_PICTURE_TYPE_I);
/* Skip B-frames if we do not have reference frames. */
if (s->last_picture_ptr == NULL && s->pict_type == AV_PICTURE_TYPE_B)
}
if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay) {
- s->current_picture.mb_type[s->mb_x + s->mb_y*s->mb_stride] =
+ s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
(s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
}
}
uint8_t *srcY, *srcU, *srcV;
int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
- if(!v->s.last_picture.data[0])return;
+ if(!v->s.last_picture.f.data[0])return;
mx = s->mv[dir][0][0];
my = s->mv[dir][0][1];
// store motion vectors for further use in B frames
if(s->pict_type == AV_PICTURE_TYPE_P) {
- s->current_picture.motion_val[1][s->block_index[0]][0] = mx;
- s->current_picture.motion_val[1][s->block_index[0]][1] = my;
+ s->current_picture.f.motion_val[1][s->block_index[0]][0] = mx;
+ s->current_picture.f.motion_val[1][s->block_index[0]][1] = my;
}
uvmx = (mx + ((mx & 3) == 3)) >> 1;
uvmy = (my + ((my & 3) == 3)) >> 1;
uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
}
if(!dir) {
- srcY = s->last_picture.data[0];
- srcU = s->last_picture.data[1];
- srcV = s->last_picture.data[2];
+ srcY = s->last_picture.f.data[0];
+ srcU = s->last_picture.f.data[1];
+ srcV = s->last_picture.f.data[2];
} else {
- srcY = s->next_picture.data[0];
- srcU = s->next_picture.data[1];
- srcV = s->next_picture.data[2];
+ srcY = s->next_picture.f.data[0];
+ srcU = s->next_picture.f.data[1];
+ srcV = s->next_picture.f.data[2];
}
src_x = s->mb_x * 16 + (mx >> 2);
int dxy, mx, my, src_x, src_y;
int off;
- if(!v->s.last_picture.data[0])return;
+ if(!v->s.last_picture.f.data[0])return;
mx = s->mv[0][n][0];
my = s->mv[0][n][1];
- srcY = s->last_picture.data[0];
+ srcY = s->last_picture.f.data[0];
off = s->linesize * 4 * (n&2) + (n&1) * 8;
int mvx[4], mvy[4], intra[4];
static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
- if(!v->s.last_picture.data[0])return;
+ if(!v->s.last_picture.f.data[0])return;
if(s->flags & CODEC_FLAG_GRAY) return;
for(i = 0; i < 4; i++) {
tx = (mvx[t1] + mvx[t2]) / 2;
ty = (mvy[t1] + mvy[t2]) / 2;
} else {
- s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
- s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
+ s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
+ s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
return; //no need to do MC for inter blocks
}
- s->current_picture.motion_val[1][s->block_index[0]][0] = tx;
- s->current_picture.motion_val[1][s->block_index[0]][1] = ty;
+ s->current_picture.f.motion_val[1][s->block_index[0]][0] = tx;
+ s->current_picture.f.motion_val[1][s->block_index[0]][1] = ty;
uvmx = (tx + ((tx&3) == 3)) >> 1;
uvmy = (ty + ((ty&3) == 3)) >> 1;
v->luma_mv[s->mb_x][0] = uvmx;
uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
}
- srcU = s->last_picture.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
- srcV = s->last_picture.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
+ srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
+ srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
|| (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
|| (unsigned)uvsrc_y > (s->v_edge_pos >> 1) - 9){
xy = s->block_index[n];
if(s->mb_intra){
- s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
- s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
- s->current_picture.motion_val[1][xy][0] = 0;
- s->current_picture.motion_val[1][xy][1] = 0;
+ s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = 0;
+ s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = 0;
+ s->current_picture.f.motion_val[1][xy][0] = 0;
+ s->current_picture.f.motion_val[1][xy][1] = 0;
if(mv1) { /* duplicate motion data for 1-MV block */
- s->current_picture.motion_val[0][xy + 1][0] = 0;
- s->current_picture.motion_val[0][xy + 1][1] = 0;
- s->current_picture.motion_val[0][xy + wrap][0] = 0;
- s->current_picture.motion_val[0][xy + wrap][1] = 0;
- s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
- s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
+ s->current_picture.f.motion_val[0][xy + 1][0] = 0;
+ s->current_picture.f.motion_val[0][xy + 1][1] = 0;
+ s->current_picture.f.motion_val[0][xy + wrap][0] = 0;
+ s->current_picture.f.motion_val[0][xy + wrap][1] = 0;
+ s->current_picture.f.motion_val[0][xy + wrap + 1][0] = 0;
+ s->current_picture.f.motion_val[0][xy + wrap + 1][1] = 0;
v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
- s->current_picture.motion_val[1][xy + 1][0] = 0;
- s->current_picture.motion_val[1][xy + 1][1] = 0;
- s->current_picture.motion_val[1][xy + wrap][0] = 0;
- s->current_picture.motion_val[1][xy + wrap][1] = 0;
- s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
- s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
+ s->current_picture.f.motion_val[1][xy + 1][0] = 0;
+ s->current_picture.f.motion_val[1][xy + 1][1] = 0;
+ s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
+ s->current_picture.f.motion_val[1][xy + wrap][1] = 0;
+ s->current_picture.f.motion_val[1][xy + wrap + 1][0] = 0;
+ s->current_picture.f.motion_val[1][xy + wrap + 1][1] = 0;
}
return;
}
- C = s->current_picture.motion_val[0][xy - 1];
- A = s->current_picture.motion_val[0][xy - wrap];
+ C = s->current_picture.f.motion_val[0][xy - 1];
+ A = s->current_picture.f.motion_val[0][xy - wrap];
if(mv1)
off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
else {
off = -1;
}
}
- B = s->current_picture.motion_val[0][xy - wrap + off];
+ B = s->current_picture.f.motion_val[0][xy - wrap + off];
if(!s->first_slice_line || (n==2 || n==3)) { // predictor A is not out of bounds
if(s->mb_width == 1) {
}
}
/* store MV using signed modulus of MV range defined in 4.11 */
- s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
- s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
+ s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
+ s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
if(mv1) { /* duplicate motion data for 1-MV block */
- s->current_picture.motion_val[0][xy + 1][0] = s->current_picture.motion_val[0][xy][0];
- s->current_picture.motion_val[0][xy + 1][1] = s->current_picture.motion_val[0][xy][1];
- s->current_picture.motion_val[0][xy + wrap][0] = s->current_picture.motion_val[0][xy][0];
- s->current_picture.motion_val[0][xy + wrap][1] = s->current_picture.motion_val[0][xy][1];
- s->current_picture.motion_val[0][xy + wrap + 1][0] = s->current_picture.motion_val[0][xy][0];
- s->current_picture.motion_val[0][xy + wrap + 1][1] = s->current_picture.motion_val[0][xy][1];
+ s->current_picture.f.motion_val[0][xy + 1][0] = s->current_picture.f.motion_val[0][xy][0];
+ s->current_picture.f.motion_val[0][xy + 1][1] = s->current_picture.f.motion_val[0][xy][1];
+ s->current_picture.f.motion_val[0][xy + wrap][0] = s->current_picture.f.motion_val[0][xy][0];
+ s->current_picture.f.motion_val[0][xy + wrap][1] = s->current_picture.f.motion_val[0][xy][1];
+ s->current_picture.f.motion_val[0][xy + wrap + 1][0] = s->current_picture.f.motion_val[0][xy][0];
+ s->current_picture.f.motion_val[0][xy + wrap + 1][1] = s->current_picture.f.motion_val[0][xy][1];
}
}
uint8_t *srcY, *srcU, *srcV;
int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
- if(!v->s.next_picture.data[0])return;
+ if(!v->s.next_picture.f.data[0])return;
mx = s->mv[1][0][0];
my = s->mv[1][0][1];
uvmx = uvmx + ((uvmx<0)?-(uvmx&1):(uvmx&1));
uvmy = uvmy + ((uvmy<0)?-(uvmy&1):(uvmy&1));
}
- srcY = s->next_picture.data[0];
- srcU = s->next_picture.data[1];
- srcV = s->next_picture.data[2];
+ srcY = s->next_picture.f.data[0];
+ srcU = s->next_picture.f.data[1];
+ srcV = s->next_picture.f.data[2];
src_x = s->mb_x * 16 + (mx >> 2);
src_y = s->mb_y * 16 + (my >> 2);
xy = s->block_index[0];
if(s->mb_intra) {
- s->current_picture.motion_val[0][xy][0] =
- s->current_picture.motion_val[0][xy][1] =
- s->current_picture.motion_val[1][xy][0] =
- s->current_picture.motion_val[1][xy][1] = 0;
+ s->current_picture.f.motion_val[0][xy][0] =
+ s->current_picture.f.motion_val[0][xy][1] =
+ s->current_picture.f.motion_val[1][xy][0] =
+ s->current_picture.f.motion_val[1][xy][1] = 0;
return;
}
- s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
- s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
- s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
- s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
+ s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
+ s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
+ s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
+ s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
/* Pullback predicted motion vectors as specified in 8.4.5.4 */
s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
if(direct) {
- s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
- s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
- s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
- s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
+ s->current_picture.f.motion_val[0][xy][0] = s->mv[0][0][0];
+ s->current_picture.f.motion_val[0][xy][1] = s->mv[0][0][1];
+ s->current_picture.f.motion_val[1][xy][0] = s->mv[1][0][0];
+ s->current_picture.f.motion_val[1][xy][1] = s->mv[1][0][1];
return;
}
if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
- C = s->current_picture.motion_val[0][xy - 2];
- A = s->current_picture.motion_val[0][xy - wrap*2];
+ C = s->current_picture.f.motion_val[0][xy - 2];
+ A = s->current_picture.f.motion_val[0][xy - wrap*2];
off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
- B = s->current_picture.motion_val[0][xy - wrap*2 + off];
+ B = s->current_picture.f.motion_val[0][xy - wr