ff-save.c 30.2 KB
Newer Older
1 2 3 4 5
/* This file is an image processing operation for GEGL
 *
 * GEGL is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
6
 * version 3 of the License, or (at your option) any later version.
7 8 9 10 11 12 13
 *
 * GEGL is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
14
 * License along with GEGL; if not, see <http://www.gnu.org/licenses/>.
15
 *
16
 * Copyright 2003,2004,2007, 2015 Øyvind Kolås <pippin@gimp.org>
17
 */
18 19

#include "config.h"
Martin Nordholts's avatar
Martin Nordholts committed
20 21 22

#include <stdlib.h>

23 24
#include <glib/gi18n-lib.h>

25
/* #define USE_FINE_GRAINED_FFMPEG 1 */
26

27
#ifdef GEGL_PROPERTIES
28

29
property_string (path, _("File"), "/tmp/fnord.ogv")
30
    description (_("Target path and filename, use '-' for stdout."))
31

32 33
property_audio_fragment (audio, _("audio"), 0)
property_string (audio_codec, _("Audio codec"), "auto")
34
   description (_("Audio codec to use, or auto to use a good default based on container format."))
35 36 37
property_int (audio_sample_rate, _("audio sample rate"), -1)
    description (_("-1 means autodetect on first audio fragment"))

38 39 40
property_int (audio_bit_rate, _("audio bitrate in kb/s"), 64)
    description (_("Target encoded video bitrate in kb/s"))

41 42
property_double (frame_rate, _("Frames/second"), 25.0)
    value_range (0.0, 100.0)
43

44
property_string (video_codec, _("Video codec"), "auto")
45
   description (_("Video codec to use, or auto to use a good default based on container format."))
46 47
property_int (video_bit_rate, _("video bitrate in kb/s"), 128)
    description (_("Target encoded video bitrate in kb/s"))
48
property_int (video_bufsize, _("Video bufsize"), 0)
49

50
property_string (container_format, _("Container format"), "auto")
51
   description (_("Container format to use, or auto to autodetect based on file extension."))
52

53
#ifdef USE_FINE_GRAINED_FFMPEG
54 55 56 57 58
property_int (global_quality, _("global quality"), 0)
property_int (noise_reduction, _("noise reduction"), 0)
property_int (scenechange_threshold, _("scenechange threshold"), 0)
property_int (video_bit_rate_min, _("video bitrate min"), 0)
property_int (video_bit_rate_max, _("video bitrate max"), 0)
59
property_int (video_bit_rate_tolerance, _("video bitrate tolerance"), -1)
60 61 62 63 64 65 66 67 68 69 70 71 72 73

property_int (keyint_min, _("keyint-min"), 0)
property_int (trellis, _("trellis"), 0)
property_int (qmin, _("qmin"), 0)
property_int (qmax, _("qmax"), 0)
property_int (max_qdiff, _("max_qdiff"), 0)
property_int (me_range, _("me_range"), 0)
property_int (max_b_frames, _("max_b_frames"), 0)
property_int (gop_size, _("gop-size"), 0)
property_double (qcompress, _("qcompress"), 0.0)
property_double (qblur, _("qblur"), 0.0)
property_double (i_quant_factor, _("i-quant-factor"), 0.0)
property_double (i_quant_offset, _("i-quant-offset"), 0.0)
property_int (me_subpel_quality, _("me-subpel-quality"), 0)
74
#endif
75

76

77 78
#else

79
#define GEGL_OP_SINK
80
#define GEGL_OP_NAME ff_save
81
#define GEGL_OP_C_SOURCE ff-save.c
82

83
#include "gegl-op.h"
84

85
#include <libavformat/avformat.h>
86 87 88 89
#include <libavcodec/avcodec.h>
#include <libavutil/avutil.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106

typedef struct
{
  gdouble    frame;
  gdouble    frames;
  gdouble    width;
  gdouble    height;
  GeglBuffer *input;

  AVOutputFormat *fmt;
  AVFormatContext *oc;
  AVStream *video_st;

  AVFrame  *picture, *tmp_picture;
  uint8_t  *video_outbuf;
  int       frame_count, video_outbuf_size;

107 108 109 110
    /** the rest is for audio handling within oxide, note that the interface
     * used passes all used functions in the oxide api through the reg_sym api
     * of gggl, this means that the ops should be usable by other applications
     * using gggl directly,. without needing to link with the oxide library
111 112 113 114 115 116 117 118 119
     */
  AVStream *audio_st;

  uint32_t  sample_rate;
  uint32_t  bits;
  uint32_t  channels;
  uint32_t  fragment_samples;
  uint32_t  fragment_size;

120
  int       bufsize;
121 122
  int       buffer_read_pos;
  int       buffer_write_pos;
123 124
  uint8_t  *buffer;

125 126
  int       audio_outbuf_size;
  int16_t  *samples;
127

128 129 130
  GList    *audio_track;
  long      audio_pos;
  long      audio_read_pos;
131

132
  int       next_apts;
133 134

  int       file_inited;
135 136
} Priv;

137 138 139 140 141 142
static void
clear_audio_track (GeglProperties *o)
{
  Priv *p = (Priv*)o->user_data;
  while (p->audio_track)
    {
143
      g_object_unref (p->audio_track->data);
144 145 146 147
      p->audio_track = g_list_remove (p->audio_track, p->audio_track->data);
    }
}

148
static int
149 150 151 152 153
samples_per_frame (int    frame,           /* frame no    */
                   double frame_rate,      /* frame rate  */
                   int    sample_rate,     /* sample rate */
                   int   *ceiled,          /* rounded up */
                   long  *start)           /* */
154 155 156
{
  double osamples;
  double samples = 0;
157
  double samples_per_frame = sample_rate / frame_rate;
158

159
  if (fabs(fmod (sample_rate, frame_rate)) < 0.0001f)
160 161
  {
    if (start)
162 163 164 165
      *start = (samples_per_frame) * frame;
    if (ceiled)
      *ceiled = samples_per_frame;
    return samples_per_frame;
166 167
  }

168 169
  samples = samples_per_frame * frame;

170
  osamples = samples;
171
  samples += samples_per_frame;
172
  if (start)
173 174 175
    (*start) = ceil(osamples);
  if (ceiled)
    *ceiled = ceil(samples_per_frame);
176 177 178
  return ceil(samples)-ceil(osamples);
}

179 180 181 182 183 184
static void get_sample_data (Priv *p, long sample_no, float *left, float *right)
{
  int to_remove = 0;
  GList *l;
  if (sample_no < 0)
    return;
185
  for (l = p->audio_track; l; l = l->next)
186
  {
187
    GeglAudioFragment *af = l->data;
188 189 190
    int channels = gegl_audio_fragment_get_channels (af);
    int pos = gegl_audio_fragment_get_pos (af);
    int sample_count = gegl_audio_fragment_get_sample_count (af);
191
    if (sample_no > pos + sample_count)
192 193 194 195
    {
      to_remove ++;
    }

196 197
    if (pos <= sample_no &&
        sample_no < pos + sample_count)
198
      {
199
        int i = sample_no - pos;
200
        *left  = af->data[0][i];
201
        if (channels == 1)
202 203 204 205
          *right = af->data[0][i];
        else
          *right = af->data[1][i];

206
        if (0 && to_remove)  /* consuming audiotrack */
207 208 209 210
        {
          again:
          for (l = p->audio_track; l; l = l->next)
          {
211
            GeglAudioFragment *af = l->data;
212 213
            int pos = gegl_audio_fragment_get_pos (af);
            int sample_count = gegl_audio_fragment_get_sample_count (af);
214
            if (sample_no > pos + sample_count)
215 216
            {
              p->audio_track = g_list_remove (p->audio_track, af);
217
              g_object_unref (af);
218
              goto again;
219
            }    
220 221 222 223 224 225 226 227
          }
        }
        return;
      }
  }
  *left  = 0;
  *right = 0;
}
228

229
static void
230
init (GeglProperties *o)
231
{
232
  static gint inited = 0; /*< this is actually meant to be static, only to be done once */
233
  Priv       *p = (Priv*)o->user_data;
234 235

  if (p == NULL)
236
    {
237
      p = g_new0 (Priv, 1);
238
      o->user_data = (void*) p;
239 240 241 242 243 244 245 246 247
    }

  if (!inited)
    {
      av_register_all ();
      avcodec_register_all ();
      inited = 1;
    }

248 249 250
  clear_audio_track (o);
  p->audio_pos = 0;
  p->audio_read_pos = 0;
251

252
  o->audio_sample_rate = -1; /* only do this if it hasn't been manually set? */
253 254

  av_log_set_level (AV_LOG_WARNING);
255 256
}

257 258 259 260 261 262
static void close_video       (Priv            *p,
                               AVFormatContext *oc,
                               AVStream        *st);
void        close_audio       (Priv            *p,
                               AVFormatContext *oc,
                               AVStream        *st);
263 264
static int  tfile             (GeglProperties  *o);
static void write_video_frame (GeglProperties  *o,
265 266
                               AVFormatContext *oc,
                               AVStream        *st);
267
static void write_audio_frame (GeglProperties      *o,
268 269
                               AVFormatContext *oc,
                               AVStream        *st);
270 271 272

#define STREAM_FRAME_RATE 25    /* 25 images/s */

273
#ifndef DISABLE_AUDIO
274 275
/* add an audio output stream */
static AVStream *
276
add_audio_stream (GeglProperties *o, AVFormatContext * oc, int codec_id)
277 278 279 280
{
  AVCodecContext *c;
  AVStream *st;

281
  st = avformat_new_stream (oc, NULL);
282 283 284 285 286 287 288 289
  if (!st)
    {
      fprintf (stderr, "Could not alloc stream\n");
      exit (1);
    }

  c = st->codec;
  c->codec_id = codec_id;
290
  c->codec_type = AVMEDIA_TYPE_AUDIO;
291

292 293 294
  if (oc->oformat->flags & AVFMT_GLOBALHEADER)
    c->flags |= CODEC_FLAG_GLOBAL_HEADER;

295 296
  return st;
}
297
#endif
298 299

static void
300
open_audio (GeglProperties *o, AVFormatContext * oc, AVStream * st)
301 302 303
{
  AVCodecContext *c;
  AVCodec  *codec;
304
  int i;
305 306 307 308 309 310 311 312 313 314

  c = st->codec;

  /* find the audio encoder */
  codec = avcodec_find_encoder (c->codec_id);
  if (!codec)
    {
      fprintf (stderr, "codec not found\n");
      exit (1);
    }
315
  c->bit_rate = o->audio_bit_rate * 1000;
316 317
  c->sample_fmt = codec->sample_fmts ? codec->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;

318
  if (o->audio_sample_rate == -1)
319 320 321
  {
    if (o->audio)
    {
322
      if (gegl_audio_fragment_get_sample_rate (o->audio) == 0)
323
      {
324
        gegl_audio_fragment_set_sample_rate (o->audio, 48000); // XXX: should skip adding audiostream instead
325
      }
326
      o->audio_sample_rate = gegl_audio_fragment_get_sample_rate (o->audio);
327 328
    }
  }
329
  c->sample_rate = o->audio_sample_rate;
330 331 332
  c->channel_layout = AV_CH_LAYOUT_STEREO;
  c->channels = 2;

333

334 335 336 337
  if (codec->supported_samplerates)
  {
    c->sample_rate = codec->supported_samplerates[0];
    for (i = 0; codec->supported_samplerates[i]; i++)
338
    {
339 340
      if (codec->supported_samplerates[i] == o->audio_sample_rate)
         c->sample_rate = o->audio_sample_rate;
341
    }
342
  }
343
  //st->time_base = (AVRational){1, c->sample_rate};
344
  st->time_base = (AVRational){1, o->audio_sample_rate};
345 346

  c->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; // ffmpeg AAC is not quite stable yet
347 348

  /* open it */
349
  if (avcodec_open2 (c, codec, NULL) < 0)
350 351 352 353 354 355
    {
      fprintf (stderr, "could not open codec\n");
      exit (1);
    }
}

356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
                                  uint64_t channel_layout,
                                  int sample_rate, int nb_samples)
{
  AVFrame *frame = av_frame_alloc();
  int ret;

  if (!frame) {
      fprintf(stderr, "Error allocating an audio frame\n");
      exit(1);
  }

  frame->format         = sample_fmt;
  frame->channel_layout = channel_layout;
  frame->sample_rate    = sample_rate;
  frame->nb_samples     = nb_samples;

  if (nb_samples) {
      ret = av_frame_get_buffer(frame, 0);
      if (ret < 0) {
          fprintf(stderr, "Error allocating an audio buffer\n");
          exit(1);
      }
  }
  return frame;
}

383
static void encode_audio_fragments (Priv *p, AVFormatContext *oc, AVStream *st, int frame_size)
384
{
385
  while (p->audio_pos - p->audio_read_pos > frame_size)
386
  {
387
    AVCodecContext *c = st->codec;
388
    long i;
389 390
    int ret;
    int got_packet = 0;
391
  static AVPacket  pkt = { 0 };  /* XXX: static, should be stored in instance somehow */
392
    AVFrame *frame = alloc_audio_frame (c->sample_fmt, c->channel_layout,
393 394 395 396 397 398
                                        c->sample_rate, frame_size);

  if (pkt.size == 0)
  {
    av_init_packet (&pkt);
  }
399

400
    av_frame_make_writable (frame);
401 402
    switch (c->sample_fmt) {
      case AV_SAMPLE_FMT_FLT:
403
        for (i = 0; i < frame_size; i++)
404 405 406 407 408 409 410 411
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((float*)frame->data[0])[c->channels*i+0] = left;
          ((float*)frame->data[0])[c->channels*i+1] = right;
        }
        break;
      case AV_SAMPLE_FMT_FLTP:
412
        for (i = 0; i < frame_size; i++)
413 414 415 416 417 418 419
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((float*)frame->data[0])[i] = left;
          ((float*)frame->data[1])[i] = right;
        }
        break;
420
      case AV_SAMPLE_FMT_S16:
421
        for (i = 0; i < frame_size; i++)
422 423 424 425 426 427 428
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int16_t*)frame->data[0])[c->channels*i+0] = left * (1<<15);
          ((int16_t*)frame->data[0])[c->channels*i+1] = right * (1<<15);
        }
        break;
429
      case AV_SAMPLE_FMT_S32:
430
        for (i = 0; i < frame_size; i++)
431 432 433 434 435 436 437 438
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int32_t*)frame->data[0])[c->channels*i+0] = left * (1<<31);
          ((int32_t*)frame->data[0])[c->channels*i+1] = right * (1<<31);
        }
        break;
      case AV_SAMPLE_FMT_S32P:
439
        for (i = 0; i < frame_size; i++)
440 441 442 443 444 445
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int32_t*)frame->data[0])[i] = left * (1<<31);
          ((int32_t*)frame->data[1])[i] = right * (1<<31);
        }
446 447
        break;
      case AV_SAMPLE_FMT_S16P:
448
        for (i = 0; i < frame_size; i++)
449 450 451 452 453 454
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int16_t*)frame->data[0])[i] = left * (1<<15);
          ((int16_t*)frame->data[1])[i] = right * (1<<15);
        }
455 456 457 458 459
        break;
      default:
        fprintf (stderr, "eeeek unhandled audio format\n");
        break;
    }
460
    frame->pts = p->next_apts;
461
    p->next_apts += frame_size;
462

463
    //ret = avcodec_send_frame (c, frame);
464
    ret = avcodec_encode_audio2 (c, &pkt, frame, &got_packet);
465

466 467 468 469
    if (ret < 0) {
      fprintf (stderr, "Error encoding audio frame: %s\n", av_err2str (ret));
    }
    if (got_packet)
470
    {
471
      av_packet_rescale_ts (&pkt, st->codec->time_base, st->time_base);
472 473
      pkt.stream_index = st->index;
      av_interleaved_write_frame (oc, &pkt);
474
      av_packet_unref (&pkt);
475 476
    }
    av_frame_free (&frame);
477
    p->audio_read_pos += frame_size;
478
  }
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
  av_interleaved_write_frame (oc, NULL);
}

void
write_audio_frame (GeglProperties *o, AVFormatContext * oc, AVStream * st)
{
  Priv *p = (Priv*)o->user_data;
  AVCodecContext *c = st->codec;
  int sample_count = 100000;

  if (o->audio)
  {
    int i;
    int real_sample_count;
    GeglAudioFragment *af;
    real_sample_count = samples_per_frame (p->frame_count, o->frame_rate, o->audio_sample_rate, NULL, NULL);

    af = gegl_audio_fragment_new (gegl_audio_fragment_get_sample_rate (o->audio),
                                  gegl_audio_fragment_get_channels (o->audio),
                                  gegl_audio_fragment_get_channel_layout (o->audio),
                                  real_sample_count);
    gegl_audio_fragment_set_sample_count (af, real_sample_count);

    sample_count = gegl_audio_fragment_get_sample_count (o->audio);

    for (i = 0; i < real_sample_count; i++)
      {
        af->data[0][i] = (i<sample_count)?o->audio->data[0][i]:0.0f;
        af->data[1][i] = (i<sample_count)?o->audio->data[1][i]:0.0f;
      }

    gegl_audio_fragment_set_pos (af, p->audio_pos);
    sample_count = real_sample_count;
    p->audio_pos += real_sample_count;
    p->audio_track = g_list_append (p->audio_track, af);
  }
  else
  {
    int i;
    GeglAudioFragment *af;
    sample_count = samples_per_frame (p->frame_count, o->frame_rate, o->audio_sample_rate, NULL, NULL);
    af = gegl_audio_fragment_new (sample_count, 2, 0, sample_count);
    gegl_audio_fragment_set_sample_count (af, sample_count);
    gegl_audio_fragment_set_pos (af, p->audio_pos);
    for (i = 0; i < sample_count; i++)
      {
        af->data[0][i] = 0.0;
        af->data[1][i] = 0.0;
      }
    p->audio_pos += sample_count;
    p->audio_track = g_list_append (p->audio_track, af);
  }

  if (!(c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
  {
    sample_count = c->frame_size;
  }

  encode_audio_fragments (p, oc, st, sample_count);
538 539 540 541 542 543 544 545 546 547 548
}

void
close_audio (Priv * p, AVFormatContext * oc, AVStream * st)
{
  avcodec_close (st->codec);

}

/* add a video output stream */
static AVStream *
549
add_video_stream (GeglProperties *o, AVFormatContext * oc, int codec_id)
550
{
551
  Priv *p = (Priv*)o->user_data;
552 553 554 555

  AVCodecContext *c;
  AVStream *st;

556
  st = avformat_new_stream (oc, NULL);
557 558
  if (!st)
    {
559
      fprintf (stderr, "Could not alloc stream %p %p %i\n", o, oc, codec_id);
560 561 562 563 564
      exit (1);
    }

  c = st->codec;
  c->codec_id = codec_id;
565
  c->codec_type = AVMEDIA_TYPE_VIDEO;
566
  /* put sample propeters */
567
  c->bit_rate = o->video_bit_rate * 1000;
568
#ifdef USE_FINE_GRAINED_FFMPEG
569 570
  c->rc_min_rate = o->video_bit_rate_min * 1000;
  c->rc_max_rate = o->video_bit_rate_max * 1000;
571 572
  if (o->video_bit_rate_tolerance >= 0)
    c->bit_rate_tolerance = o->video_bit_rate_tolerance * 1000;
573
#endif
574 575 576 577
  /* resolution must be a multiple of two */
  c->width = p->width;
  c->height = p->height;
  /* frames per second */
578
  st->time_base =(AVRational){1000, o->frame_rate * 1000};
579
  c->time_base = st->time_base;
580

581
  c->pix_fmt = AV_PIX_FMT_YUV420P;
582

583
  if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO)
584
    {
585
      c->max_b_frames = 2;
586
    }
587

588 589
  if (c->codec_id == AV_CODEC_ID_H264)
   {
590 591 592 593
     c->qcompress = 0.6;  // qcomp=0.6
     c->me_range = 16;    // me_range=16
     c->gop_size = 250;   // g=250
     c->max_b_frames = 3; // bf=3
594 595
   }

596 597 598
  if (o->video_bufsize)
    c->rc_buffer_size = o->video_bufsize * 1000;
#if USE_FINE_GRAINED_FFMPEG
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
  if (o->global_quality)
     c->global_quality = o->global_quality;
  if (o->qcompress != 0.0)
     c->qcompress = o->qcompress;
  if (o->qblur != 0.0)
     c->qblur = o->qblur;
  if (o->max_qdiff != 0)
     c->max_qdiff = o->max_qdiff;
  if (o->me_subpel_quality != 0)
     c->me_subpel_quality = o->me_subpel_quality;
  if (o->i_quant_factor != 0.0)
     c->i_quant_factor = o->i_quant_factor;
  if (o->i_quant_offset != 0.0)
     c->i_quant_offset = o->i_quant_offset;
  if (o->max_b_frames)
    c->max_b_frames = o->max_b_frames;
  if (o->me_range)
    c->me_range = o->me_range;
  if (o->noise_reduction)
    c->noise_reduction = o->noise_reduction;
  if (o->scenechange_threshold)
    c->scenechange_threshold = o->scenechange_threshold;
  if (o->trellis)
    c->trellis = o->trellis;
  if (o->qmin)
    c->qmin = o->qmin;
  if (o->qmax)
    c->qmax = o->qmax;
  if (o->gop_size)
    c->gop_size = o->gop_size;
  if (o->keyint_min)
    c->keyint_min = o->keyint_min;
631
#endif
632

633 634 635
   if (oc->oformat->flags & AVFMT_GLOBALHEADER)
     c->flags |= CODEC_FLAG_GLOBAL_HEADER;

636 637 638 639 640 641 642 643 644 645 646
  return st;
}


static AVFrame *
alloc_picture (int pix_fmt, int width, int height)
{
  AVFrame  *picture;
  uint8_t  *picture_buf;
  int       size;

647
  picture = av_frame_alloc ();
648 649
  if (!picture)
    return NULL;
650
  size = avpicture_get_size (pix_fmt, width + 1, height + 1);
651 652 653 654 655 656 657 658 659 660 661
  picture_buf = malloc (size);
  if (!picture_buf)
    {
      av_free (picture);
      return NULL;
    }
  avpicture_fill ((AVPicture *) picture, picture_buf, pix_fmt, width, height);
  return picture;
}

static void
662
open_video (GeglProperties *o, AVFormatContext * oc, AVStream * st)
663
{
664
  Priv           *p = (Priv*)o->user_data;
665 666
  AVCodec  *codec;
  AVCodecContext *c;
667
  AVDictionary *codec_options = {0};
668 669 670 671 672 673 674 675 676 677 678

  c = st->codec;

  /* find the video encoder */
  codec = avcodec_find_encoder (c->codec_id);
  if (!codec)
    {
      fprintf (stderr, "codec not found\n");
      exit (1);
    }

679 680 681 682 683 684 685 686 687 688
  if (codec->pix_fmts){
    int i = 0;
    c->pix_fmt = codec->pix_fmts[0];
    while (codec->pix_fmts[i] !=-1)
    {
      if (codec->pix_fmts[i] ==  AV_PIX_FMT_RGB24)
         c->pix_fmt = AV_PIX_FMT_RGB24;
      i++;
    }
  }
689 690 691 692
#if 0
  if (o->video_preset[0])
    av_dict_set (&codec_options, "preset", o->video_preset, 0);
#endif
693

694
  /* open the codec */
695
  if (avcodec_open2 (c, codec, &codec_options) < 0)
696 697 698 699 700 701 702 703
    {
      fprintf (stderr, "could not open codec\n");
      exit (1);
    }

  p->video_outbuf = NULL;
  if (!(oc->oformat->flags & AVFMT_RAWPICTURE))
    {
704 705
      /* allocate output buffer, 1 mb / frame, might fail for some codecs on UHD - but works for now */
      p->video_outbuf_size = 1024 * 1024;
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
      p->video_outbuf = malloc (p->video_outbuf_size);
    }

  /* allocate the encoded raw picture */
  p->picture = alloc_picture (c->pix_fmt, c->width, c->height);
  if (!p->picture)
    {
      fprintf (stderr, "Could not allocate picture\n");
      exit (1);
    }

  /* if the output format is not YUV420P, then a temporary YUV420P
     picture is needed too. It is then converted to the required
     output format */
  p->tmp_picture = NULL;
721
  if (c->pix_fmt != AV_PIX_FMT_RGB24)
722
    {
723
      p->tmp_picture = alloc_picture (AV_PIX_FMT_RGB24, c->width, c->height);
724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742
      if (!p->tmp_picture)
        {
          fprintf (stderr, "Could not allocate temporary picture\n");
          exit (1);
        }
    }
}

static void
close_video (Priv * p, AVFormatContext * oc, AVStream * st)
{
  avcodec_close (st->codec);
  av_free (p->picture->data[0]);
  av_free (p->picture);
  if (p->tmp_picture)
    {
      av_free (p->tmp_picture->data[0]);
      av_free (p->tmp_picture);
    }
743
  free (p->video_outbuf);
744 745 746 747 748 749
}

#include "string.h"

/* prepare a dummy image */
static void
750
fill_rgb_image (GeglProperties *o,
751
                AVFrame *pict, int frame_index, int width, int height)
752
{
753
  Priv     *p = (Priv*)o->user_data;
754
  GeglRectangle rect={0,0,width,height};
755
  gegl_buffer_get (p->input, &rect, 1.0, babl_format ("R'G'B' u8"), pict->data[0], GEGL_AUTO_ROWSTRIDE, GEGL_ABYSS_NONE);
756 757 758
}

static void
759
write_video_frame (GeglProperties *o,
760
                   AVFormatContext *oc, AVStream *st)
761
{
762 763
  Priv           *p = (Priv*)o->user_data;
  int             out_size, ret;
764
  AVCodecContext *c;
765
  AVFrame        *picture_ptr;
766 767 768

  c = st->codec;

769
  if (c->pix_fmt != AV_PIX_FMT_RGB24)
770
    {
771
      struct SwsContext *img_convert_ctx;
772
      fill_rgb_image (o, p->tmp_picture, p->frame_count, c->width,
773
                      c->height);
774

775
      img_convert_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_RGB24,
776
                                       c->width, c->height, c->pix_fmt,
777 778 779 780 781 782 783 784 785
                                       SWS_BICUBIC, NULL, NULL, NULL);

      if (img_convert_ctx == NULL)
        {
          fprintf(stderr, "ff_save: Cannot initialize conversion context.");
        }
      else
        {
          sws_scale(img_convert_ctx,
786
                    (void*)p->tmp_picture->data,
787 788 789 790 791
                    p->tmp_picture->linesize,
                    0,
                    c->height,
                    p->picture->data,
                    p->picture->linesize);
792 793 794
         p->picture->format = c->pix_fmt;
         p->picture->width = c->width;
         p->picture->height = c->height;
795
         sws_freeContext (img_convert_ctx);
796
        }
797 798 799
    }
  else
    {
800
      fill_rgb_image (o, p->picture, p->frame_count, c->width, c->height);
801
    }
802

803
  picture_ptr      = p->picture;
804
  picture_ptr->pts = p->frame_count;
805 806 807 808 809 810 811 812

  if (oc->oformat->flags & AVFMT_RAWPICTURE)
    {
      /* raw video case. The API will change slightly in the near
         future for that */
      AVPacket  pkt;
      av_init_packet (&pkt);

813
      pkt.flags |= AV_PKT_FLAG_KEY;
814 815 816
      pkt.stream_index = st->index;
      pkt.data = (uint8_t *) picture_ptr;
      pkt.size = sizeof (AVPicture);
817
      pkt.pts = picture_ptr->pts;
818
      pkt.dts = picture_ptr->pts;
819
      av_packet_rescale_ts (&pkt, c->time_base, st->time_base);
820 821 822 823 824 825

      ret = av_write_frame (oc, &pkt);
    }
  else
    {
      /* encode the image */
826 827 828 829 830 831 832
      AVPacket pkt2;
      int got_packet = 0;
      av_init_packet(&pkt2);
      pkt2.data = p->video_outbuf;
      pkt2.size = p->video_outbuf_size;

      out_size = avcodec_encode_video2(c, &pkt2, picture_ptr, &got_packet);
833

834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852
      if (!out_size && got_packet && c->coded_frame)
        {
          c->coded_frame->pts       = pkt2.pts;
          c->coded_frame->key_frame = !!(pkt2.flags & AV_PKT_FLAG_KEY);
          if (c->codec->capabilities & AV_CODEC_CAP_INTRA_ONLY)
              c->coded_frame->pict_type = AV_PICTURE_TYPE_I;
        }

      if (pkt2.side_data_elems > 0)
        {
          int i;
          for (i = 0; i < pkt2.side_data_elems; i++)
            av_free(pkt2.side_data[i].data);
          av_freep(&pkt2.side_data);
          pkt2.side_data_elems = 0;
        }

      if (!out_size)
        out_size = pkt2.size;
853 854 855 856 857 858 859

      /* if zero size, it means the image was buffered */
      if (out_size != 0)
        {
          AVPacket  pkt;
          av_init_packet (&pkt);
          if (c->coded_frame->key_frame)
860
            pkt.flags |= AV_PKT_FLAG_KEY;
861 862 863
          pkt.stream_index = st->index;
          pkt.data = p->video_outbuf;
          pkt.size = out_size;
864
          pkt.pts = picture_ptr->pts;
865
          pkt.dts = picture_ptr->pts;
866
          av_packet_rescale_ts (&pkt, c->time_base, st->time_base);
867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883
          /* write the compressed frame in the media file */
          ret = av_write_frame (oc, &pkt);
        }
      else
        {
          ret = 0;
        }
    }
  if (ret != 0)
    {
      fprintf (stderr, "Error while writing video frame\n");
      exit (1);
    }
  p->frame_count++;
}

static int
884
tfile (GeglProperties *o)
885
{
886
  Priv *p = (Priv*)o->user_data;
887

888 889 890 891 892
  if (strcmp (o->container_format, "auto"))
    p->fmt = av_guess_format (o->container_format, o->path, NULL);
  else
    p->fmt = av_guess_format (NULL, o->path, NULL);

893 894 895 896 897
  if (!p->fmt)
    {
      fprintf (stderr,
               "ff_save couldn't deduce outputformat from file extension: using MPEG.\n%s",
               "");
898
      p->fmt = av_guess_format ("mpeg", NULL, NULL);
899
    }
900
  p->oc = avformat_alloc_context ();
901 902 903 904 905 906 907 908
  if (!p->oc)
    {
      fprintf (stderr, "memory error\n%s", "");
      return -1;
    }

  p->oc->oformat = p->fmt;

909
  snprintf (p->oc->filename, sizeof (p->oc->filename), "%s", o->path);
910 911 912

  p->video_st = NULL;
  p->audio_st = NULL;
913

914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945
  if (strcmp (o->video_codec, "auto"))
  {
    AVCodec *codec = avcodec_find_encoder_by_name (o->video_codec);
    p->fmt->video_codec = AV_CODEC_ID_NONE;
    if (codec)
      p->fmt->video_codec = codec->id;
    else
      {
        fprintf (stderr, "didn't find video encoder \"%s\"\navailable codecs: ", o->video_codec);
        while ((codec = av_codec_next (codec)))
          if (av_codec_is_encoder (codec) &&
              avcodec_get_type (codec->id) == AVMEDIA_TYPE_VIDEO)
          fprintf (stderr, "%s ", codec->name);
        fprintf (stderr, "\n");
      }
  }
  if (strcmp (o->audio_codec, "auto"))
  {
    AVCodec *codec = avcodec_find_encoder_by_name (o->audio_codec);
    p->fmt->audio_codec = AV_CODEC_ID_NONE;
    if (codec)
      p->fmt->audio_codec = codec->id;
    else
      {
        fprintf (stderr, "didn't find audio encoder \"%s\"\navailable codecs: ", o->audio_codec);
        while ((codec = av_codec_next (codec)))
          if (av_codec_is_encoder (codec) &&
              avcodec_get_type (codec->id) == AVMEDIA_TYPE_AUDIO)
                fprintf (stderr, "%s ", codec->name);
        fprintf (stderr, "\n");
      }
  }
946

947
  if (p->fmt->video_codec != AV_CODEC_ID_NONE)
948
    {
949
      p->video_st = add_video_stream (o, p->oc, p->fmt->video_codec);
950
    }
951
  if (p->fmt->audio_codec != AV_CODEC_ID_NONE)
952
    {
953
     p->audio_st = add_audio_stream (o, p->oc, p->fmt->audio_codec);
954 955 956 957
    }


  if (p->video_st)
958
    open_video (o, p->oc, p->video_st);
959

960
  if (p->audio_st)
961
    open_audio (o, p->oc, p->audio_st);
962

963 964
  av_dump_format (p->oc, 0, o->path, 1);

965
  if (avio_open (&p->oc->pb, o->path, AVIO_FLAG_WRITE) < 0)
966
    {
967
      fprintf (stderr, "couldn't open '%s'\n", o->path);
968 969 970
      return -1;
    }

971 972 973 974 975
  if (avformat_write_header (p->oc, NULL) < 0)
  {
    fprintf (stderr, "'%s' error writing header\n", o->path);
    return -1;
  }
976 977 978
  return 0;
}

979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005

static void flush_audio (GeglProperties *o)
{
  Priv *p = (Priv*)o->user_data;
  AVPacket  pkt = { 0 };
  int ret;

  int got_packet = 0;
  if (!p->audio_st)
    return;

  got_packet = 0;
  av_init_packet (&pkt);
  ret = avcodec_encode_audio2 (p->audio_st->codec, &pkt, NULL, &got_packet);
  if (ret < 0)
  {
    fprintf (stderr, "audio enc trouble\n");
  }
  if (got_packet)
    {
      pkt.stream_index = p->audio_st->index;
      av_packet_rescale_ts (&pkt, p->audio_st->codec->time_base, p->audio_st->time_base);
      av_interleaved_write_frame (p->oc, &pkt);
      av_packet_unref (&pkt);
    }
}

1006 1007 1008
static gboolean
process (GeglOperation       *operation,
         GeglBuffer          *input,
1009 1010
         const GeglRectangle *result,
         gint                 level)
1011
{
1012
  GeglProperties *o = GEGL_PROPERTIES (operation);
1013
  Priv *p = (Priv*)o->user_data;
1014 1015 1016 1017 1018

  g_assert (input);

  if (p == NULL)
    init (o);
1019
  p = (Priv*)o->user_data;
1020 1021 1022 1023 1024

  p->width = result->width;
  p->height = result->height;
  p->input = input;

1025
  if (!p->file_inited)
1026 1027
    {
      tfile (o);
1028
      p->file_inited = 1;
1029 1030 1031 1032
    }

  write_video_frame (o, p->oc, p->video_st);
  if (p->audio_st)
1033
  {
1034
    write_audio_frame (o, p->oc, p->audio_st);
1035 1036
    //flush_audio (o);
  }
1037 1038 1039 1040

  return  TRUE;
}

1041 1042 1043 1044 1045

static void flush_video (GeglProperties *o)
{
  Priv *p = (Priv*)o->user_data;
  int got_packet = 0;
1046
  long ts = p->frame_count;
1047 1048 1049 1050 1051 1052 1053 1054
  do {
    AVPacket  pkt = { 0 };
    int ret;
    got_packet = 0;
    av_init_packet (&pkt);
    ret = avcodec_encode_video2 (p->video_st->codec, &pkt, NULL, &got_packet);
    if (ret < 0)
      return;
1055

1056 1057 1058
     if (got_packet)
     {
       pkt.stream_index = p->video_st->index;
1059 1060
       pkt.pts = ts;
       pkt.dts = ts++;
1061 1062
       av_packet_rescale_ts (&pkt, p->video_st->codec->time_base, p->video_st->time_base);
       av_interleaved_write_frame (p->oc, &pkt);
1063
       av_packet_unref (&pkt);
1064 1065 1066 1067
     }
  } while (got_packet);
}

1068 1069 1070
static void
finalize (GObject *object)
{
1071 1072
  GeglProperties *o = GEGL_PROPERTIES (object);
  if (o->user_data)
1073
    {
1074
      Priv *p = (Priv*)o->user_data;
1075 1076
      flush_audio (o);
      flush_video (o);
1077

1078
      av_write_trailer (p->oc);
1079

1080 1081 1082 1083
      if (p->video_st)
        close_video (p, p->oc, p->video_st);
      if (p->audio_st)
        close_audio (p, p->oc, p->audio_st);
1084

1085 1086
      avio_closep (&p->oc->pb);
      avformat_free_context (p->oc);
1087

1088 1089
      g_free (o->user_data);
      o->user_data = NULL;
1090 1091 1092 1093 1094 1095 1096
    }

  G_OBJECT_CLASS (g_type_class_peek_parent (G_OBJECT_GET_CLASS (object)))->finalize (object);
}


static void
1097
gegl_op_class_init (GeglOpClass *klass)
1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109
{
  GeglOperationClass     *operation_class;
  GeglOperationSinkClass *sink_class;

  G_OBJECT_CLASS (klass)->finalize = finalize;

  operation_class = GEGL_OPERATION_CLASS (klass);
  sink_class      = GEGL_OPERATION_SINK_CLASS (klass);

  sink_class->process = process;
  sink_class->needs_full = TRUE;

1110 1111
  gegl_operation_class_set_keys (operation_class,
    "name"        , "gegl:ff-save",
1112
    "title"       , _("FFmpeg Frame Saver"),
1113 1114 1115
    "categories"  , "output:video",
    "description" , _("FFmpeg video output sink"),
    NULL);
1116 1117
}

1118
#endif