Merge commit 'a7fcd4122b19b0f934020f4e261d0c44c4c32e11'
authorMichael Niedermayer <michaelni@gmx.at>
Thu, 26 Jun 2014 22:16:34 +0000 (00:16 +0200)
committerMichael Niedermayer <michaelni@gmx.at>
Thu, 26 Jun 2014 22:16:34 +0000 (00:16 +0200)
* commit 'a7fcd4122b19b0f934020f4e261d0c44c4c32e11':
  output example: store the scaling context in the stream context

Conflicts:
doc/examples/muxing.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
1  2 
doc/examples/muxing.c

@@@ -59,42 -55,26 +59,44 @@@ typedef struct OutputStream 
      AVFrame *tmp_frame;
  
      float t, tincr, tincr2;
+     struct SwsContext *sws_ctx;
  } OutputStream;
  
 -/**************************************************************/
 -/* audio output */
 +static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
 +{
 +    AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
  
 -/*
 - * add an audio output stream
 - */
 -static void add_audio_stream(OutputStream *ost, AVFormatContext *oc,
 -                             enum AVCodecID codec_id)
 +    printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
 +           av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
 +           av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
 +           av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
 +           pkt->stream_index);
 +}
 +
 +static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
 +{
 +    /* rescale output packet timestamp values from codec to stream timebase */
 +    av_packet_rescale_ts(pkt, *time_base, st->time_base);
 +    pkt->stream_index = st->index;
 +
 +    /* Write the compressed frame to the media file. */
 +    log_packet(fmt_ctx, pkt);
 +    return av_interleaved_write_frame(fmt_ctx, pkt);
 +}
 +
 +/* Add an output stream. */
 +static void add_stream(OutputStream *ost, AVFormatContext *oc,
 +                       AVCodec **codec,
 +                       enum AVCodecID codec_id)
  {
      AVCodecContext *c;
 -    AVCodec *codec;
  
 -    /* find the audio encoder */
 -    codec = avcodec_find_encoder(codec_id);
 -    if (!codec) {
 -        fprintf(stderr, "codec not found\n");
 +    /* find the encoder */
 +    *codec = avcodec_find_encoder(codec_id);
 +    if (!(*codec)) {
 +        fprintf(stderr, "Could not find encoder for '%s'\n",
 +                avcodec_get_name(codec_id));
          exit(1);
      }
  
@@@ -410,29 -325,35 +412,29 @@@ static void fill_yuv_image(AVFrame *pic
      }
  }
  
 -static void write_video_frame(AVFormatContext *oc, OutputStream *ost)
 +static void write_video_frame(AVFormatContext *oc, OutputStream *ost, int flush)
  {
      int ret;
-     static struct SwsContext *sws_ctx;
 -    AVCodecContext *c;
 +    AVCodecContext *c = ost->st->codec;
  
 -    c = ost->st->codec;
 -
 -    if (frame_count >= STREAM_NB_FRAMES) {
 -        /* No more frames to compress. The codec has a latency of a few
 -         * frames if using B-frames, so we get the last frames by
 -         * passing the same picture again. */
 -    } else {
 +    if (!flush) {
          if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
              /* as we only generate a YUV420P picture, we must convert it
               * to the codec pixel format if needed */
-             if (!sws_ctx) {
-                 sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
-                                          c->width, c->height, c->pix_fmt,
-                                          SCALE_FLAGS, NULL, NULL, NULL);
-                 if (!sws_ctx) {
+             if (!ost->sws_ctx) {
 -                ost->sws_ctx = sws_getContext(c->width, c->height,
 -                                              AV_PIX_FMT_YUV420P,
++                ost->sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
+                                               c->width, c->height,
+                                               c->pix_fmt,
+                                               SCALE_FLAGS, NULL, NULL, NULL);
+                 if (!ost->sws_ctx) {
                      fprintf(stderr,
 -                            "Cannot initialize the conversion context\n");
 +                            "Could not initialize the conversion context\n");
                      exit(1);
                  }
              }
              fill_yuv_image(ost->tmp_frame, frame_count, c->width, c->height);
-             sws_scale(sws_ctx,
 -            sws_scale(ost->sws_ctx, ost->tmp_frame->data, ost->tmp_frame->linesize,
++            sws_scale(ost->sws_ctx,
 +                      (const uint8_t * const *)ost->tmp_frame->data, ost->tmp_frame->linesize,
                        0, c->height, ost->frame->data, ost->frame->linesize);
          } else {
              fill_yuv_image(ost->frame, frame_count, c->width, c->height);