ff-save.c 26.4 KB
Newer Older
1 2 3 4 5
/* This file is an image processing operation for GEGL
 *
 * GEGL is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
6
 * version 3 of the License, or (at your option) any later version.
7 8 9 10 11 12 13
 *
 * GEGL is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
14
 * License along with GEGL; if not, see <http://www.gnu.org/licenses/>.
15
 *
16
 * Copyright 2003,2004,2007, 2015 Øyvind Kolås <pippin@gimp.org>
17
 */
18 19

#include "config.h"
Martin Nordholts's avatar
Martin Nordholts committed
20 21 22

#include <stdlib.h>

23 24
#include <glib/gi18n-lib.h>

25
#ifdef GEGL_PROPERTIES
26

27
property_string (path, _("File"), "/tmp/fnord.ogv")
28
    description (_("Target path and filename, use '-' for stdout."))
29

30 31 32 33 34
property_audio_fragment (audio, _("audio"), 0)
property_string (audio_codec, _("Audio codec"), "auto")
property_int (audio_bit_rate, _("audio bitrate in kb/s"), 64)
    description (_("Target encoded video bitrate in kb/s"))

35 36
property_double (frame_rate, _("Frames/second"), 25.0)
    value_range (0.0, 100.0)
37

38
property_string (video_codec, _("Video codec"), "auto")
39 40 41 42 43 44 45 46
property_int (video_bit_rate, _("video bitrate in kb/s"), 128)
    description (_("Target encoded video bitrate in kb/s"))

property_int (global_quality, _("global quality"), 0)
property_int (noise_reduction, _("noise reduction"), 0)
property_int (scenechange_threshold, _("scenechange threshold"), 0)
property_int (video_bit_rate_min, _("video bitrate min"), 0)
property_int (video_bit_rate_max, _("video bitrate max"), 0)
47
property_int (video_bit_rate_tolerance, _("video bitrate tolerance"), -1)
48 49 50

property_int (keyint_min, _("keyint-min"), 0)
property_int (trellis, _("trellis"), 0)
51
property_int (bufsize, _("bufsize"), 0)
52 53 54 55 56 57 58 59 60 61 62
property_int (qmin, _("qmin"), 0)
property_int (qmax, _("qmax"), 0)
property_int (max_qdiff, _("max_qdiff"), 0)
property_int (me_range, _("me_range"), 0)
property_int (max_b_frames, _("max_b_frames"), 0)
property_int (gop_size, _("gop-size"), 0)
property_double (qcompress, _("qcompress"), 0.0)
property_double (qblur, _("qblur"), 0.0)
property_double (i_quant_factor, _("i-quant-factor"), 0.0)
property_double (i_quant_offset, _("i-quant-offset"), 0.0)
property_int (me_subpel_quality, _("me-subpel-quality"), 0)
63

64

65

66 67
#else

68
#define GEGL_OP_SINK
69
#define GEGL_OP_C_SOURCE ff-save.c
70

71
#include "gegl-op.h"
72

73
#include <libavformat/avformat.h>
74 75 76 77
#include <libavcodec/avcodec.h>
#include <libavutil/avutil.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93

typedef struct
{
  gdouble    frame;
  gdouble    frames;
  gdouble    width;
  gdouble    height;
  GeglBuffer *input;

  AVOutputFormat *fmt;
  AVFormatContext *oc;
  AVStream *video_st;

  AVFrame  *picture, *tmp_picture;
  uint8_t  *video_outbuf;
  int       frame_count, video_outbuf_size;
94
  int       audio_sample_rate;
95

96 97 98 99
    /** the rest is for audio handling within oxide, note that the interface
     * used passes all used functions in the oxide api through the reg_sym api
     * of gggl, this means that the ops should be usable by other applications
     * using gggl directly,. without needing to link with the oxide library
100 101 102 103 104 105 106 107 108
     */
  AVStream *audio_st;

  uint32_t  sample_rate;
  uint32_t  bits;
  uint32_t  channels;
  uint32_t  fragment_samples;
  uint32_t  fragment_size;

109
  int       bufsize;
110 111
  int       buffer_read_pos;
  int       buffer_write_pos;
112 113
  uint8_t  *buffer; 
                   
114 115
  int       audio_outbuf_size;
  int16_t  *samples;
116

117 118 119
  GList    *audio_track;
  long      audio_pos;
  long      audio_read_pos;
120 121
  
  int       next_apts;
122 123
} Priv;

124 125 126 127 128 129
static void
clear_audio_track (GeglProperties *o)
{
  Priv *p = (Priv*)o->user_data;
  while (p->audio_track)
    {
130
      g_object_unref (p->audio_track->data);
131 132 133 134 135 136 137 138 139 140
      p->audio_track = g_list_remove (p->audio_track, p->audio_track->data);
    }
}

static void get_sample_data (Priv *p, long sample_no, float *left, float *right)
{
  int to_remove = 0;
  GList *l;
  if (sample_no < 0)
    return;
141
  for (l = p->audio_track; l; l = l->next)
142
  {
143
    GeglAudioFragment *af = l->data;
144 145 146
    int channels = gegl_audio_fragment_get_channels (af);
    int pos = gegl_audio_fragment_get_pos (af);
    int sample_count = gegl_audio_fragment_get_sample_count (af);
147
    if (sample_no > pos + sample_count)
148 149 150 151
    {
      to_remove ++;
    }

152 153
    if (pos <= sample_no &&
        sample_no < pos + sample_count)
154
      {
155
        int i = sample_no - pos;
156
        *left  = af->data[0][i];
157
        if (channels == 1)
158 159 160 161 162 163 164 165 166
          *right = af->data[0][i];
        else
          *right = af->data[1][i];

	if (to_remove)  /* consuming audiotrack */
        {
          again:
          for (l = p->audio_track; l; l = l->next)
          {
167
            GeglAudioFragment *af = l->data;
168 169
            int pos = gegl_audio_fragment_get_pos (af);
            int sample_count = gegl_audio_fragment_get_sample_count (af);
170
            if (sample_no > pos + sample_count)
171 172
            {
              p->audio_track = g_list_remove (p->audio_track, af);
173
              g_object_unref (af);
174 175 176 177 178 179 180 181 182 183
              goto again;
            }
          }
        }
        return;
      }
  }
  *left  = 0;
  *right = 0;
}
184

185
static void
186
init (GeglProperties *o)
187
{
188
  static gint inited = 0; /*< this is actually meant to be static, only to be done once */
189
  Priv       *p = (Priv*)o->user_data;
190 191

  if (p == NULL)
192
    {
193
      p = g_new0 (Priv, 1);
194
      o->user_data = (void*) p;
195 196 197 198 199 200 201 202 203
    }

  if (!inited)
    {
      av_register_all ();
      avcodec_register_all ();
      inited = 1;
    }

204 205 206
  clear_audio_track (o);
  p->audio_pos = 0;
  p->audio_read_pos = 0;
207 208

  p->audio_sample_rate = -1; /* only do this if it hasn't been manually set? */
209 210
}

211 212 213 214 215 216
static void close_video       (Priv            *p,
                               AVFormatContext *oc,
                               AVStream        *st);
void        close_audio       (Priv            *p,
                               AVFormatContext *oc,
                               AVStream        *st);
217 218
static int  tfile             (GeglProperties  *o);
static void write_video_frame (GeglProperties  *o,
219 220
                               AVFormatContext *oc,
                               AVStream        *st);
221
static void write_audio_frame (GeglProperties      *o,
222 223
                               AVFormatContext *oc,
                               AVStream        *st);
224 225 226

#define STREAM_FRAME_RATE 25    /* 25 images/s */

227
#ifndef DISABLE_AUDIO
228 229
/* add an audio output stream */
static AVStream *
230
add_audio_stream (GeglProperties *o, AVFormatContext * oc, int codec_id)
231 232 233 234
{
  AVCodecContext *c;
  AVStream *st;

235
  st = avformat_new_stream (oc, NULL);
236 237 238 239 240 241 242 243
  if (!st)
    {
      fprintf (stderr, "Could not alloc stream\n");
      exit (1);
    }

  c = st->codec;
  c->codec_id = codec_id;
244
  c->codec_type = AVMEDIA_TYPE_AUDIO;
245

246 247 248
  if (oc->oformat->flags & AVFMT_GLOBALHEADER)
    c->flags |= CODEC_FLAG_GLOBAL_HEADER;

249 250
  return st;
}
251
#endif
252 253

static void
254
open_audio (GeglProperties *o, AVFormatContext * oc, AVStream * st)
255
{
256
  Priv *p = (Priv*)o->user_data;
257 258
  AVCodecContext *c;
  AVCodec  *codec;
259
  int i;
260 261 262 263 264 265 266 267 268 269

  c = st->codec;

  /* find the audio encoder */
  codec = avcodec_find_encoder (c->codec_id);
  if (!codec)
    {
      fprintf (stderr, "codec not found\n");
      exit (1);
    }
270
  c->bit_rate = o->audio_bit_rate * 1000;
271 272
  c->sample_fmt = codec->sample_fmts ? codec->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;

273 274 275 276
  if (p->audio_sample_rate == -1)
  {
    if (o->audio)
    {
277
      if (gegl_audio_fragment_get_sample_rate (o->audio) == 0)
278
      {
279
        gegl_audio_fragment_set_sample_rate (o->audio, 48000); // XXX: should skip adding audiostream instead
280
      }
281
      p->audio_sample_rate = gegl_audio_fragment_get_sample_rate (o->audio);
282 283 284
    }
  }
  c->sample_rate = p->audio_sample_rate;
285 286 287
  c->channel_layout = AV_CH_LAYOUT_STEREO;
  c->channels = 2;

288

289 290 291 292
  if (codec->supported_samplerates)
  {
    c->sample_rate = codec->supported_samplerates[0];
    for (i = 0; codec->supported_samplerates[i]; i++)
293 294 295 296
    {
      if (codec->supported_samplerates[i] == p->audio_sample_rate)
         c->sample_rate = p->audio_sample_rate;
    }
297
  }
298 299
  //st->time_base = (AVRational){1, c->sample_rate};
  st->time_base = (AVRational){1, p->audio_sample_rate};
300 301

  c->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; // ffmpeg AAC is not quite stable yet
302 303

  /* open it */
304
  if (avcodec_open2 (c, codec, NULL) < 0)
305 306 307 308 309 310
    {
      fprintf (stderr, "could not open codec\n");
      exit (1);
    }
}

311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
                                  uint64_t channel_layout,
                                  int sample_rate, int nb_samples)
{
  AVFrame *frame = av_frame_alloc();
  int ret;

  if (!frame) {
      fprintf(stderr, "Error allocating an audio frame\n");
      exit(1);
  }

  frame->format         = sample_fmt;
  frame->channel_layout = channel_layout;
  frame->sample_rate    = sample_rate;
  frame->nb_samples     = nb_samples;

  if (nb_samples) {
      ret = av_frame_get_buffer(frame, 0);
      if (ret < 0) {
          fprintf(stderr, "Error allocating an audio buffer\n");
          exit(1);
      }
  }
  return frame;
}

338
void
339
write_audio_frame (GeglProperties *o, AVFormatContext * oc, AVStream * st)
340
{
341
  Priv *p = (Priv*)o->user_data;
342 343 344 345 346 347 348 349
  AVCodecContext *c = st->codec;
  int sample_count = 100000;
  static AVPacket  pkt = { 0 };

  if (pkt.size == 0)
  {
    av_init_packet (&pkt);
  }
350

351 352 353
  /* first we add incoming frames audio samples */
  {
    int i;
354 355 356 357 358
    int sample_count = gegl_audio_fragment_get_sample_count (o->audio);
    GeglAudioFragment *af = gegl_audio_fragment_new (gegl_audio_fragment_get_sample_rate (o->audio),
                                                     gegl_audio_fragment_get_channels (o->audio),
                                                     gegl_audio_fragment_get_channel_layout (o->audio),
                                                     sample_count);
359
    gegl_audio_fragment_set_sample_count (af, sample_count);
360
    for (i = 0; i < sample_count; i++)
361
      {
362 363
        af->data[0][i] = o->audio->data[0][i];
        af->data[1][i] = o->audio->data[1][i];
364
      }
365 366
    gegl_audio_fragment_set_pos (af, p->audio_pos);
    p->audio_pos += sample_count;
367 368 369
    p->audio_track = g_list_append (p->audio_track, af);
  }

370 371 372
  if (!(c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
    sample_count = c->frame_size;

373
  /* then we encode as much as we can in a loop using the codec frame size */
374

375
  
376
  while (p->audio_pos - p->audio_read_pos > sample_count)
377 378
  {
    long i;
379 380
    int ret;
    int got_packet = 0;
381
    AVFrame *frame = alloc_audio_frame (c->sample_fmt, c->channel_layout,
382
                                        c->sample_rate, sample_count);
383

384 385
    switch (c->sample_fmt) {
      case AV_SAMPLE_FMT_FLT:
386
        for (i = 0; i < sample_count; i++)
387 388 389 390 391 392 393 394
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((float*)frame->data[0])[c->channels*i+0] = left;
          ((float*)frame->data[0])[c->channels*i+1] = right;
        }
        break;
      case AV_SAMPLE_FMT_FLTP:
395
        for (i = 0; i < sample_count; i++)
396 397 398 399 400 401 402
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((float*)frame->data[0])[i] = left;
          ((float*)frame->data[1])[i] = right;
        }
        break;
403
      case AV_SAMPLE_FMT_S16:
404
        for (i = 0; i < sample_count; i++)
405 406 407 408 409 410 411
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int16_t*)frame->data[0])[c->channels*i+0] = left * (1<<15);
          ((int16_t*)frame->data[0])[c->channels*i+1] = right * (1<<15);
        }
        break;
412
      case AV_SAMPLE_FMT_S32:
413
        for (i = 0; i < sample_count; i++)
414 415 416 417 418 419 420 421
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int32_t*)frame->data[0])[c->channels*i+0] = left * (1<<31);
          ((int32_t*)frame->data[0])[c->channels*i+1] = right * (1<<31);
        }
        break;
      case AV_SAMPLE_FMT_S32P:
422
        for (i = 0; i < sample_count; i++)
423 424 425 426 427 428
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int32_t*)frame->data[0])[i] = left * (1<<31);
          ((int32_t*)frame->data[1])[i] = right * (1<<31);
        }
429 430
        break;
      case AV_SAMPLE_FMT_S16P:
431
        for (i = 0; i < sample_count; i++)
432 433 434 435 436 437
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int16_t*)frame->data[0])[i] = left * (1<<15);
          ((int16_t*)frame->data[1])[i] = right * (1<<15);
        }
438 439 440 441 442
        break;
      default:
        fprintf (stderr, "eeeek unhandled audio format\n");
        break;
    }
443
    frame->pts = p->next_apts;
444
    p->next_apts += sample_count;
445 446 447

    av_frame_make_writable (frame);
    ret = avcodec_encode_audio2 (c, &pkt, frame, &got_packet);
448 449

    av_packet_rescale_ts (&pkt, st->codec->time_base, st->time_base);
450 451 452 453 454
    if (ret < 0) {
      fprintf (stderr, "Error encoding audio frame: %s\n", av_err2str (ret));
    }

    if (got_packet)
455
    {
456 457
      pkt.stream_index = st->index;
      av_interleaved_write_frame (oc, &pkt);
458
      av_free_packet (&pkt);
459 460 461
    }

    av_frame_free (&frame);
462
    p->audio_read_pos += sample_count;
463
  }
464 465 466 467 468 469 470 471 472 473 474
}

void
close_audio (Priv * p, AVFormatContext * oc, AVStream * st)
{
  avcodec_close (st->codec);

}

/* add a video output stream */
static AVStream *
475
add_video_stream (GeglProperties *o, AVFormatContext * oc, int codec_id)
476
{
477
  Priv *p = (Priv*)o->user_data;
478 479 480 481

  AVCodecContext *c;
  AVStream *st;

482
  st = avformat_new_stream (oc, NULL);
483 484
  if (!st)
    {
485
      fprintf (stderr, "Could not alloc stream %p %p %i\n", o, oc, codec_id);
486 487 488 489 490
      exit (1);
    }

  c = st->codec;
  c->codec_id = codec_id;
491
  c->codec_type = AVMEDIA_TYPE_VIDEO;
492
  /* put sample propeters */
493 494 495
  c->bit_rate = o->video_bit_rate * 1000;
  c->rc_min_rate = o->video_bit_rate_min * 1000;
  c->rc_max_rate = o->video_bit_rate_max * 1000;
496 497
  if (o->video_bit_rate_tolerance >= 0)
    c->bit_rate_tolerance = o->video_bit_rate_tolerance * 1000;
498 499 500 501
  /* resolution must be a multiple of two */
  c->width = p->width;
  c->height = p->height;
  /* frames per second */
502
  st->time_base =(AVRational){1, o->frame_rate};
503
  c->time_base = st->time_base;
504

505
  c->pix_fmt = AV_PIX_FMT_YUV420P;
506
  
507

508
  if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO)
509
    {
510
      c->max_b_frames = 2;
511
    }
512

513 514
  if (c->codec_id == AV_CODEC_ID_H264)
   {
515 516 517 518
     c->qcompress = 0.6;  // qcomp=0.6
     c->me_range = 16;    // me_range=16
     c->gop_size = 250;   // g=250
     c->max_b_frames = 3; // bf=3
519 520
   }

521 522
  if (o->bufsize)
    c->rc_buffer_size = o->bufsize * 1000;
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555
  if (o->global_quality)
     c->global_quality = o->global_quality;
  if (o->qcompress != 0.0)
     c->qcompress = o->qcompress;
  if (o->qblur != 0.0)
     c->qblur = o->qblur;
  if (o->max_qdiff != 0)
     c->max_qdiff = o->max_qdiff;
  if (o->me_subpel_quality != 0)
     c->me_subpel_quality = o->me_subpel_quality;
  if (o->i_quant_factor != 0.0)
     c->i_quant_factor = o->i_quant_factor;
  if (o->i_quant_offset != 0.0)
     c->i_quant_offset = o->i_quant_offset;
  if (o->max_b_frames)
    c->max_b_frames = o->max_b_frames;
  if (o->me_range)
    c->me_range = o->me_range;
  if (o->noise_reduction)
    c->noise_reduction = o->noise_reduction;
  if (o->scenechange_threshold)
    c->scenechange_threshold = o->scenechange_threshold;
  if (o->trellis)
    c->trellis = o->trellis;
  if (o->qmin)
    c->qmin = o->qmin;
  if (o->qmax)
    c->qmax = o->qmax;
  if (o->gop_size)
    c->gop_size = o->gop_size;
  if (o->keyint_min)
    c->keyint_min = o->keyint_min;

556 557 558
   if (oc->oformat->flags & AVFMT_GLOBALHEADER)
     c->flags |= CODEC_FLAG_GLOBAL_HEADER;

559 560 561 562 563 564 565 566 567 568 569
  return st;
}


static AVFrame *
alloc_picture (int pix_fmt, int width, int height)
{
  AVFrame  *picture;
  uint8_t  *picture_buf;
  int       size;

570
  picture = av_frame_alloc ();
571 572
  if (!picture)
    return NULL;
573
  size = avpicture_get_size (pix_fmt, width + 1, height + 1);
574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
  picture_buf = malloc (size);
  if (!picture_buf)
    {
      av_free (picture);
      return NULL;
    }
  avpicture_fill ((AVPicture *) picture, picture_buf, pix_fmt, width, height);
  return picture;
}

static void
open_video (Priv * p, AVFormatContext * oc, AVStream * st)
{
  AVCodec  *codec;
  AVCodecContext *c;

  c = st->codec;

  /* find the video encoder */
  codec = avcodec_find_encoder (c->codec_id);
  if (!codec)
    {
      fprintf (stderr, "codec not found\n");
      exit (1);
    }

600 601 602 603 604 605 606 607 608 609 610
  if (codec->pix_fmts){
    int i = 0;
    c->pix_fmt = codec->pix_fmts[0];
    while (codec->pix_fmts[i] !=-1)
    {
      if (codec->pix_fmts[i] ==  AV_PIX_FMT_RGB24)
         c->pix_fmt = AV_PIX_FMT_RGB24;
      i++;
    }
  }

611
  /* open the codec */
612
  if (avcodec_open2 (c, codec, NULL) < 0)
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
    {
      fprintf (stderr, "could not open codec\n");
      exit (1);
    }

  p->video_outbuf = NULL;
  if (!(oc->oformat->flags & AVFMT_RAWPICTURE))
    {
      /* allocate output buffer */
      /* XXX: API change will be done */
      p->video_outbuf_size = 200000;
      p->video_outbuf = malloc (p->video_outbuf_size);
    }

  /* allocate the encoded raw picture */
  p->picture = alloc_picture (c->pix_fmt, c->width, c->height);
  if (!p->picture)
    {
      fprintf (stderr, "Could not allocate picture\n");
      exit (1);
    }

  /* if the output format is not YUV420P, then a temporary YUV420P
     picture is needed too. It is then converted to the required
     output format */
  p->tmp_picture = NULL;
639
  if (c->pix_fmt != AV_PIX_FMT_RGB24)
640
    {
641
      p->tmp_picture = alloc_picture (AV_PIX_FMT_RGB24, c->width, c->height);
642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
      if (!p->tmp_picture)
        {
          fprintf (stderr, "Could not allocate temporary picture\n");
          exit (1);
        }
    }
}

static void
close_video (Priv * p, AVFormatContext * oc, AVStream * st)
{
  avcodec_close (st->codec);
  av_free (p->picture->data[0]);
  av_free (p->picture);
  if (p->tmp_picture)
    {
      av_free (p->tmp_picture->data[0]);
      av_free (p->tmp_picture);
    }
  av_free (p->video_outbuf);
}

#include "string.h"

/* prepare a dummy image */
static void
668
fill_rgb_image (GeglProperties *o,
669
                AVFrame *pict, int frame_index, int width, int height)
670
{
671
  Priv     *p = (Priv*)o->user_data;
672
  GeglRectangle rect={0,0,width,height};
673
  gegl_buffer_get (p->input, &rect, 1.0, babl_format ("R'G'B' u8"), pict->data[0], GEGL_AUTO_ROWSTRIDE, GEGL_ABYSS_NONE);
674 675 676
}

static void
677
write_video_frame (GeglProperties *o,
678
                   AVFormatContext *oc, AVStream *st)
679
{
680 681
  Priv           *p = (Priv*)o->user_data;
  int             out_size, ret;
682
  AVCodecContext *c;
683
  AVFrame        *picture_ptr;
684 685 686

  c = st->codec;

687
  if (c->pix_fmt != AV_PIX_FMT_RGB24)
688
    {
689
      struct SwsContext *img_convert_ctx;
690
      fill_rgb_image (o, p->tmp_picture, p->frame_count, c->width,
691
                      c->height);
692

693
      img_convert_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_RGB24,
694
                                       c->width, c->height, c->pix_fmt,
695 696 697 698 699 700 701 702 703
                                       SWS_BICUBIC, NULL, NULL, NULL);

      if (img_convert_ctx == NULL)
        {
          fprintf(stderr, "ff_save: Cannot initialize conversion context.");
        }
      else
        {
          sws_scale(img_convert_ctx,
704
                    (void*)p->tmp_picture->data,
705 706 707 708 709
                    p->tmp_picture->linesize,
                    0,
                    c->height,
                    p->picture->data,
                    p->picture->linesize);
710 711 712
         p->picture->format = c->pix_fmt;
         p->picture->width = c->width;
         p->picture->height = c->height;
713
        }
714 715 716
    }
  else
    {
717
      fill_rgb_image (o, p->picture, p->frame_count, c->width, c->height);
718
    }
719

720
  picture_ptr      = p->picture;
721
  picture_ptr->pts = p->frame_count;
722 723 724 725 726 727 728 729

  if (oc->oformat->flags & AVFMT_RAWPICTURE)
    {
      /* raw video case. The API will change slightly in the near
         future for that */
      AVPacket  pkt;
      av_init_packet (&pkt);

730
      pkt.flags |= AV_PKT_FLAG_KEY;
731 732 733
      pkt.stream_index = st->index;
      pkt.data = (uint8_t *) picture_ptr;
      pkt.size = sizeof (AVPicture);
734 735
      pkt.pts = picture_ptr->pts;
      av_packet_rescale_ts (&pkt, c->time_base, st->time_base);
736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752

      ret = av_write_frame (oc, &pkt);
    }
  else
    {
      /* encode the image */
      out_size =
        avcodec_encode_video (c,
                              p->video_outbuf,
                              p->video_outbuf_size, picture_ptr);

      /* if zero size, it means the image was buffered */
      if (out_size != 0)
        {
          AVPacket  pkt;
          av_init_packet (&pkt);
          if (c->coded_frame->key_frame)
753
            pkt.flags |= AV_PKT_FLAG_KEY;
754 755 756
          pkt.stream_index = st->index;
          pkt.data = p->video_outbuf;
          pkt.size = out_size;
757 758
          pkt.pts = picture_ptr->pts;
          av_packet_rescale_ts (&pkt, c->time_base, st->time_base);
759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775
          /* write the compressed frame in the media file */
          ret = av_write_frame (oc, &pkt);
        }
      else
        {
          ret = 0;
        }
    }
  if (ret != 0)
    {
      fprintf (stderr, "Error while writing video frame\n");
      exit (1);
    }
  p->frame_count++;
}

static int
776
tfile (GeglProperties *o)
777
{
778
  Priv *p = (Priv*)o->user_data;
779

780
  p->fmt = av_guess_format (NULL, o->path, NULL);
781 782 783 784 785
  if (!p->fmt)
    {
      fprintf (stderr,
               "ff_save couldn't deduce outputformat from file extension: using MPEG.\n%s",
               "");
786
      p->fmt = av_guess_format ("mpeg", NULL, NULL);
787
    }
788
  p->oc = avformat_alloc_context ();
789 790 791 792 793 794 795 796
  if (!p->oc)
    {
      fprintf (stderr, "memory error\n%s", "");
      return -1;
    }

  p->oc->oformat = p->fmt;

797
  snprintf (p->oc->filename, sizeof (p->oc->filename), "%s", o->path);
798 799 800

  p->video_st = NULL;
  p->audio_st = NULL;
801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833
  
  if (strcmp (o->video_codec, "auto"))
  {
    AVCodec *codec = avcodec_find_encoder_by_name (o->video_codec);
    p->fmt->video_codec = AV_CODEC_ID_NONE;
    if (codec)
      p->fmt->video_codec = codec->id;
    else
      {
        fprintf (stderr, "didn't find video encoder \"%s\"\navailable codecs: ", o->video_codec);
        while ((codec = av_codec_next (codec)))
          if (av_codec_is_encoder (codec) &&
              avcodec_get_type (codec->id) == AVMEDIA_TYPE_VIDEO)
          fprintf (stderr, "%s ", codec->name);
        fprintf (stderr, "\n");
      }
  }
  if (strcmp (o->audio_codec, "auto"))
  {
    AVCodec *codec = avcodec_find_encoder_by_name (o->audio_codec);
    p->fmt->audio_codec = AV_CODEC_ID_NONE;
    if (codec)
      p->fmt->audio_codec = codec->id;
    else
      {
        fprintf (stderr, "didn't find audio encoder \"%s\"\navailable codecs: ", o->audio_codec);
        while ((codec = av_codec_next (codec)))
          if (av_codec_is_encoder (codec) &&
              avcodec_get_type (codec->id) == AVMEDIA_TYPE_AUDIO)
                fprintf (stderr, "%s ", codec->name);
        fprintf (stderr, "\n");
      }
  }
834

835
  if (p->fmt->video_codec != AV_CODEC_ID_NONE)
836
    {
837
      p->video_st = add_video_stream (o, p->oc, p->fmt->video_codec);
838
    }
839
  if (p->fmt->audio_codec != AV_CODEC_ID_NONE)
840
    {
841
     p->audio_st = add_audio_stream (o, p->oc, p->fmt->audio_codec);
842 843 844 845 846
    }


  if (p->video_st)
    open_video (p, p->oc, p->video_st);
847

848
  if (p->audio_st)
849
    open_audio (o, p->oc, p->audio_st);
850

851 852
  av_dump_format (p->oc, 0, o->path, 1);

853
  if (avio_open (&p->oc->pb, o->path, AVIO_FLAG_WRITE) < 0)
854
    {
855
      fprintf (stderr, "couldn't open '%s'\n", o->path);
856 857 858
      return -1;
    }

859
  avformat_write_header (p->oc, NULL);
860 861 862
  return 0;
}

863 864 865
static gboolean
process (GeglOperation       *operation,
         GeglBuffer          *input,
866 867
         const GeglRectangle *result,
         gint                 level)
868
{
869
  GeglProperties *o = GEGL_PROPERTIES (operation);
870 871
  Priv           *p = (Priv*)o->user_data;
  static gint     inited = 0;
872 873 874 875 876

  g_assert (input);

  if (p == NULL)
    init (o);
877
  p = (Priv*)o->user_data;
878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895

  p->width = result->width;
  p->height = result->height;
  p->input = input;

  if (!inited)
    {
      tfile (o);
      inited = 1;
    }

  write_video_frame (o, p->oc, p->video_st);
  if (p->audio_st)
    write_audio_frame (o, p->oc, p->audio_st);

  return  TRUE;
}

896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942
static void flush_audio (GeglProperties *o)
{
  Priv *p = (Priv*)o->user_data;

  int got_packet;
  do
  {
    AVPacket  pkt = { 0 };
    int ret;
    got_packet = 0;
    av_init_packet (&pkt);
    ret = avcodec_encode_audio2 (p->audio_st->codec, &pkt, NULL, &got_packet);
    if (ret < 0)
      break;
    if (got_packet)
      {
        pkt.stream_index = p->audio_st->index;
        av_packet_rescale_ts (&pkt, p->audio_st->codec->time_base, p->audio_st->time_base);
        av_interleaved_write_frame (p->oc, &pkt);
        av_free_packet (&pkt);
      }
  } while (got_packet);
}

static void flush_video (GeglProperties *o)
{
  Priv *p = (Priv*)o->user_data;
  int got_packet = 0;
  do {
    AVPacket  pkt = { 0 };
    int ret;
    got_packet = 0;
    av_init_packet (&pkt);
    ret = avcodec_encode_video2 (p->video_st->codec, &pkt, NULL, &got_packet);
    if (ret < 0)
      return;
      
     if (got_packet)
     {
       pkt.stream_index = p->video_st->index;
       av_packet_rescale_ts (&pkt, p->video_st->codec->time_base, p->video_st->time_base);
       av_interleaved_write_frame (p->oc, &pkt);
       av_free_packet (&pkt);
     }
  } while (got_packet);
}

943 944 945
static void
finalize (GObject *object)
{
946 947
  GeglProperties *o = GEGL_PROPERTIES (object);
  if (o->user_data)
948
    {
949
      Priv *p = (Priv*)o->user_data;
950 951
      flush_audio (o);
      flush_video (o);
952

953
      av_write_trailer (p->oc);
954

955 956 957 958
      if (p->video_st)
        close_video (p, p->oc, p->video_st);
      if (p->audio_st)
        close_audio (p, p->oc, p->audio_st);
959

960 961
      avio_closep (&p->oc->pb);
      avformat_free_context (p->oc);
962

963 964
      g_free (o->user_data);
      o->user_data = NULL;
965 966 967 968 969 970 971
    }

  G_OBJECT_CLASS (g_type_class_peek_parent (G_OBJECT_GET_CLASS (object)))->finalize (object);
}


static void
972
gegl_op_class_init (GeglOpClass *klass)
973 974 975 976 977 978 979 980 981 982 983 984
{
  GeglOperationClass     *operation_class;
  GeglOperationSinkClass *sink_class;

  G_OBJECT_CLASS (klass)->finalize = finalize;

  operation_class = GEGL_OPERATION_CLASS (klass);
  sink_class      = GEGL_OPERATION_SINK_CLASS (klass);

  sink_class->process = process;
  sink_class->needs_full = TRUE;

985 986 987 988 989
  gegl_operation_class_set_keys (operation_class,
    "name"        , "gegl:ff-save",
    "categories"  , "output:video",
    "description" , _("FFmpeg video output sink"),
    NULL);
990 991
}

992
#endif