c->sample_fmt = AV_SAMPLE_FMT_S16;
/* open it */
- if (avcodec_open(c, codec) < 0) {
+ if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
/* open it */
- if (avcodec_open(c, codec) < 0) {
+ if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
av_opt_set(c->priv_data, "preset", "slow", 0);
/* open it */
- if (avcodec_open(c, codec) < 0) {
+ if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
available in the bitstream. */
/* open it */
- if (avcodec_open(c, codec) < 0) {
+ if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
}
/* open it */
- if (avcodec_open(c, codec) < 0) {
+ if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
get_audio_frame(samples, audio_input_frame_size, c->channels);
- pkt.size = avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);
+ pkt.size = avcodec_encode_audio2(c, audio_outbuf, audio_outbuf_size, samples);
if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE)
pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
}
/* open the codec */
- if (avcodec_open(c, codec) < 0) {
+ if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
.id = CODEC_ID_FLV1,
.priv_data_size = sizeof(MpegEncContext),
.init = ff_MPV_encode_init,
- .encode = ff_MPV_encode_picture,
+ .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("Flash Video (FLV) / Sorenson Spark / Sorenson H.263"),
.id = CODEC_ID_H261,
.priv_data_size = sizeof(H261Context),
.init = ff_MPV_encode_init,
- .encode = ff_MPV_encode_picture,
+ .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("H.261"),
// maximum over s->mjpeg_vsample[i]
#define V_MAX 2
-static int amv_encode_picture(AVCodecContext *avctx,
- unsigned char *buf, int buf_size, void *data)
-{
+static int amv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
+ AVFrame *pic, int *got_packet)
- AVFrame* pic=data;
+{
MpegEncContext *s = avctx->priv_data;
int i;
pic->data[i] += (pic->linesize[i] * (s->mjpeg_vsample[i] * (8 * s->mb_height -((s->height/V_MAX)&7)) - 1 ));
pic->linesize[i] *= -1;
}
- return ff_MPV_encode_picture(avctx,buf, buf_size, pic);
+ return ff_MPV_encode_picture(avctx, pkt, pic, got_packet);
}
AVCodec ff_mjpeg_encoder = {
.id = CODEC_ID_MJPEG,
.priv_data_size = sizeof(MpegEncContext),
.init = ff_MPV_encode_init,
- .encode = ff_MPV_encode_picture,
+ .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
.id = CODEC_ID_AMV,
.priv_data_size = sizeof(MpegEncContext),
.init = ff_MPV_encode_init,
- .encode = amv_encode_picture,
+ .encode2 = amv_encode_picture,
.close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_NONE},
.long_name = NULL_IF_CONFIG_SMALL("AMV Video"),
.id = CODEC_ID_MPEG1VIDEO,
.priv_data_size = sizeof(MpegEncContext),
.init = encode_init,
- .encode = ff_MPV_encode_picture,
+ .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.supported_framerates= avpriv_frame_rate_tab+1,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.id = CODEC_ID_MPEG2VIDEO,
.priv_data_size = sizeof(MpegEncContext),
.init = encode_init,
- .encode = ff_MPV_encode_picture,
+ .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.supported_framerates= avpriv_frame_rate_tab+1,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE},
.id = CODEC_ID_MPEG4,
.priv_data_size = sizeof(MpegEncContext),
.init = encode_init,
- .encode = ff_MPV_encode_picture,
+ .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
* offsets used in asm. */
int64_t user_specified_pts;///< last non zero pts from AVFrame which was passed into avcodec_encode_video()
+ /**
+ * pts difference between the first and second input frame, used for
+ * calculating dts of the first frame when there's a delay */
+ int64_t dts_delta;
+ /**
+ * reordered pts to be used as dts for the next output frame when there's
+ * a delay */
+ int64_t reordered_pts;
/** bit output */
PutBitContext pb;
void ff_MPV_frame_end(MpegEncContext *s);
int ff_MPV_encode_init(AVCodecContext *avctx);
int ff_MPV_encode_end(AVCodecContext *avctx);
-int ff_MPV_encode_picture(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data);
+int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
+ const AVFrame *frame, int *got_packet);
void ff_MPV_common_init_mmx(MpegEncContext *s);
void ff_MPV_common_init_axp(MpegEncContext *s);
void ff_MPV_common_init_mmi(MpegEncContext *s);
AVFrame *pic = NULL;
int64_t pts;
int i;
- const int encoding_delay = s->max_b_frames;
+ const int encoding_delay = s->max_b_frames ? s->max_b_frames :
+ (s->low_delay ? 0 : 1);
int direct = 1;
if (pic_arg) {
"last=%"PRId64"\n", pts, s->user_specified_pts);
return -1;
}
+
+ if (!s->low_delay && pic_arg->display_picture_number == 1)
+ s->dts_delta = time - last;
}
s->user_specified_pts = pts;
} else {
return 0;
}
-int ff_MPV_encode_picture(AVCodecContext *avctx,
- unsigned char *buf, int buf_size, void *data)
+int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
+ const AVFrame *pic_arg, int *got_packet)
{
MpegEncContext *s = avctx->priv_data;
- AVFrame *pic_arg = data;
- int i, stuffing_count;
+ int i, stuffing_count, ret;
int context_count = s->slice_context_count;
+ if (!pkt->data &&
+ (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000)) < 0)
+ return ret;
+
for (i = 0; i < context_count; i++) {
int start_y = s->thread_context[i]->start_mb_y;
int end_y = s->thread_context[i]-> end_mb_y;
int h = s->mb_height;
- uint8_t *start = buf + (size_t)(((int64_t) buf_size) * start_y / h);
- uint8_t *end = buf + (size_t)(((int64_t) buf_size) * end_y / h);
+ uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
+ uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
init_put_bits(&s->thread_context[i]->pb, start, end - start);
}
}
s->total_bits += s->frame_bits;
avctx->frame_bits = s->frame_bits;
+
+ pkt->pts = s->current_picture.f.pts;
+ if (!s->low_delay) {
+ if (!s->current_picture.f.coded_picture_number)
+ pkt->dts = pkt->pts - s->dts_delta;
+ else
+ pkt->dts = s->reordered_pts;
+ s->reordered_pts = s->input_picture[0]->f.pts;
+ } else
+ pkt->dts = pkt->pts;
+ if (s->current_picture.f.key_frame)
+ pkt->flags |= AV_PKT_FLAG_KEY;
} else {
assert((put_bits_ptr(&s->pb) == s->pb.buf));
s->frame_bits = 0;
}
assert((s->frame_bits & 7) == 0);
- return s->frame_bits / 8;
+ pkt->size = s->frame_bits / 8;
+ *got_packet = !!pkt->size;
+ return 0;
}
static inline void dct_single_coeff_elimination(MpegEncContext *s,
if(s->data_partitioning){
if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
|| s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
- av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
+ av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
return -1;
}
}
.id = CODEC_ID_H263,
.priv_data_size = sizeof(MpegEncContext),
.init = ff_MPV_encode_init,
- .encode = ff_MPV_encode_picture,
+ .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
.id = CODEC_ID_H263P,
.priv_data_size = sizeof(MpegEncContext),
.init = ff_MPV_encode_init,
- .encode = ff_MPV_encode_picture,
+ .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.capabilities = CODEC_CAP_SLICE_THREADS,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.id = CODEC_ID_MSMPEG4V2,
.priv_data_size = sizeof(MpegEncContext),
.init = ff_MPV_encode_init,
- .encode = ff_MPV_encode_picture,
+ .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
.id = CODEC_ID_MSMPEG4V3,
.priv_data_size = sizeof(MpegEncContext),
.init = ff_MPV_encode_init,
- .encode = ff_MPV_encode_picture,
+ .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
.id = CODEC_ID_WMV1,
.priv_data_size = sizeof(MpegEncContext),
.init = ff_MPV_encode_init,
- .encode = ff_MPV_encode_picture,
+ .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
.id = CODEC_ID_RV10,
.priv_data_size = sizeof(MpegEncContext),
.init = ff_MPV_encode_init,
- .encode = ff_MPV_encode_picture,
+ .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("RealVideo 1.0"),
.id = CODEC_ID_RV20,
.priv_data_size = sizeof(MpegEncContext),
.init = ff_MPV_encode_init,
- .encode = ff_MPV_encode_picture,
+ .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("RealVideo 2.0"),
if (buf_end - buf < 1)
return AVERROR_INVALIDDATA;
- if ((value = *buf++) == 0x80) {
+ if ((value = *buf++) == RLE_TRIGGER) {
run = *buf++ + 1;
if (run != 1)
value = *buf++;
AV_WB32(&avpkt->data[16], s->length);
*got_packet_ptr = 1;
+ avpkt->flags |= AV_PKT_FLAG_KEY;
avpkt->size = bytestream2_tell_p(&s->p);
return 0;
}
// fixed order prediction
#define PRED(x, k) (int32_t)((((uint64_t)x << k) - x) >> k)
switch (s->bps) {
- case 1: *p += PRED(*predictor, 4); break;
- case 2:
- case 3: *p += PRED(*predictor, 5); break;
- case 4: *p += *predictor; break;
+ case 1: *p += PRED(*predictor, 4); break;
+ case 2:
+ case 3: *p += PRED(*predictor, 5); break;
+ case 4: *p += *predictor; break;
}
*predictor = *p;
.id = CODEC_ID_WMV2,
.priv_data_size = sizeof(Wmv2Context),
.init = wmv2_encode_init,
- .encode = ff_MPV_encode_picture,
+ .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 8"),
/**
* Read the format header and initialize the AVFormatContext
- * structure. Return 0 if OK. 'ap' if non-NULL contains
- * additional parameters. Only used in raw format right
- * now. 'av_new_stream' should be called to create new streams.
+ * structure. Return 0 if OK. Only used in raw format right
+ * now. 'avformat_new_stream' should be called to create new streams.
*/
int (*read_header)(struct AVFormatContext *);
/**
* Read one packet and put it in 'pkt'. pts and flags are also
- * set. 'av_new_stream' can be called only if the flag
+ * set. 'avformat_new_stream' can be called only if the flag
* AVFMTCTX_NOHEADER is used and only in the calling thread (not in a
* background thread).
* @return 0 on success, < 0 on error.