ff-save.c 30.7 KB
Newer Older
1 2 3 4 5
/* This file is an image processing operation for GEGL
 *
 * GEGL is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
6
 * version 3 of the License, or (at your option) any later version.
7 8 9 10 11 12 13
 *
 * GEGL is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
14
 * License along with GEGL; if not, see <https://www.gnu.org/licenses/>.
15
 *
16
 * Copyright 2003,2004,2007, 2015 Øyvind Kolås <pippin@gimp.org>
17
 */
18 19

#include "config.h"
Martin Nordholts's avatar
Martin Nordholts committed
20 21 22

#include <stdlib.h>

23 24
#include <glib/gi18n-lib.h>

25
/* #define USE_FINE_GRAINED_FFMPEG 1 */
26

27
#ifdef GEGL_PROPERTIES
28

29
property_string (path, _("File"), "/tmp/fnord.ogv")
30
    description (_("Target path and filename, use '-' for stdout."))
31

32 33
property_audio_fragment (audio, _("audio"), 0)
property_string (audio_codec, _("Audio codec"), "auto")
34
   description (_("Audio codec to use, or auto to use a good default based on container format."))
35 36 37
property_int (audio_sample_rate, _("audio sample rate"), -1)
    description (_("-1 means autodetect on first audio fragment"))

38 39 40
property_int (audio_bit_rate, _("audio bitrate in kb/s"), 64)
    description (_("Target encoded video bitrate in kb/s"))

41 42
property_double (frame_rate, _("Frames/second"), 25.0)
    value_range (0.0, 100.0)
43

44
property_string (video_codec, _("Video codec"), "auto")
45
   description (_("Video codec to use, or auto to use a good default based on container format."))
46 47
property_int (video_bit_rate, _("video bitrate in kb/s"), 128)
    description (_("Target encoded video bitrate in kb/s"))
48
property_int (video_bufsize, _("Video bufsize"), 0)
49

50
property_string (container_format, _("Container format"), "auto")
51
   description (_("Container format to use, or auto to autodetect based on file extension."))
52

53
#ifdef USE_FINE_GRAINED_FFMPEG
54 55 56 57 58
property_int (global_quality, _("global quality"), 0)
property_int (noise_reduction, _("noise reduction"), 0)
property_int (scenechange_threshold, _("scenechange threshold"), 0)
property_int (video_bit_rate_min, _("video bitrate min"), 0)
property_int (video_bit_rate_max, _("video bitrate max"), 0)
59
property_int (video_bit_rate_tolerance, _("video bitrate tolerance"), -1)
60 61 62 63 64 65 66 67 68 69 70 71 72 73

property_int (keyint_min, _("keyint-min"), 0)
property_int (trellis, _("trellis"), 0)
property_int (qmin, _("qmin"), 0)
property_int (qmax, _("qmax"), 0)
property_int (max_qdiff, _("max_qdiff"), 0)
property_int (me_range, _("me_range"), 0)
property_int (max_b_frames, _("max_b_frames"), 0)
property_int (gop_size, _("gop-size"), 0)
property_double (qcompress, _("qcompress"), 0.0)
property_double (qblur, _("qblur"), 0.0)
property_double (i_quant_factor, _("i-quant-factor"), 0.0)
property_double (i_quant_offset, _("i-quant-offset"), 0.0)
property_int (me_subpel_quality, _("me-subpel-quality"), 0)
74
#endif
75

76

77 78
#else

79
#define GEGL_OP_SINK
80
#define GEGL_OP_NAME ff_save
81
#define GEGL_OP_C_SOURCE ff-save.c
82

83
#include "gegl-op.h"
84

85
#include <libavformat/avformat.h>
86 87 88 89
#include <libavcodec/avcodec.h>
#include <libavutil/avutil.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
90

91 92 93
/* remove if libavcodec_required_version is changed to > 56.41.100 */
#if LIBAVCODEC_VERSION_INT <= AV_VERSION_INT(56,41,100)
# define AV_CODEC_FLAG_GLOBAL_HEADER	CODEC_FLAG_GLOBAL_HEADER
94 95
# define AV_CODEC_CAP_VARIABLE_FRAME_SIZE	CODEC_CAP_VARIABLE_FRAME_SIZE
# define AV_CODEC_CAP_INTRA_ONLY	CODEC_CAP_INTRA_ONLY
96 97
#endif

98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
typedef struct
{
  gdouble    frame;
  gdouble    frames;
  gdouble    width;
  gdouble    height;
  GeglBuffer *input;

  AVOutputFormat *fmt;
  AVFormatContext *oc;
  AVStream *video_st;

  AVFrame  *picture, *tmp_picture;
  uint8_t  *video_outbuf;
  int       frame_count, video_outbuf_size;

114 115 116 117
    /** the rest is for audio handling within oxide, note that the interface
     * used passes all used functions in the oxide api through the reg_sym api
     * of gggl, this means that the ops should be usable by other applications
     * using gggl directly,. without needing to link with the oxide library
118 119 120 121 122 123 124 125 126
     */
  AVStream *audio_st;

  uint32_t  sample_rate;
  uint32_t  bits;
  uint32_t  channels;
  uint32_t  fragment_samples;
  uint32_t  fragment_size;

127
  int       bufsize;
128 129
  int       buffer_read_pos;
  int       buffer_write_pos;
130 131
  uint8_t  *buffer;

132 133
  int       audio_outbuf_size;
  int16_t  *samples;
134

135 136 137
  GList    *audio_track;
  long      audio_pos;
  long      audio_read_pos;
138

139
  int       next_apts;
140 141

  int       file_inited;
142 143
} Priv;

144 145 146 147 148 149
static void
clear_audio_track (GeglProperties *o)
{
  Priv *p = (Priv*)o->user_data;
  while (p->audio_track)
    {
150
      g_object_unref (p->audio_track->data);
151 152 153 154
      p->audio_track = g_list_remove (p->audio_track, p->audio_track->data);
    }
}

155
static int
156 157 158 159 160
samples_per_frame (int    frame,           /* frame no    */
                   double frame_rate,      /* frame rate  */
                   int    sample_rate,     /* sample rate */
                   int   *ceiled,          /* rounded up */
                   long  *start)           /* */
161 162 163
{
  double osamples;
  double samples = 0;
164
  double samples_per_frame = sample_rate / frame_rate;
165

166
  if (fabs(fmod (sample_rate, frame_rate)) < 0.0001f)
167 168
  {
    if (start)
169 170 171 172
      *start = (samples_per_frame) * frame;
    if (ceiled)
      *ceiled = samples_per_frame;
    return samples_per_frame;
173 174
  }

175 176
  samples = samples_per_frame * frame;

177
  osamples = samples;
178
  samples += samples_per_frame;
179
  if (start)
180 181 182
    (*start) = ceil(osamples);
  if (ceiled)
    *ceiled = ceil(samples_per_frame);
183 184 185
  return ceil(samples)-ceil(osamples);
}

186 187 188 189 190 191
static void get_sample_data (Priv *p, long sample_no, float *left, float *right)
{
  int to_remove = 0;
  GList *l;
  if (sample_no < 0)
    return;
192
  for (l = p->audio_track; l; l = l->next)
193
  {
194
    GeglAudioFragment *af = l->data;
195 196 197
    int channels = gegl_audio_fragment_get_channels (af);
    int pos = gegl_audio_fragment_get_pos (af);
    int sample_count = gegl_audio_fragment_get_sample_count (af);
198
    if (sample_no > pos + sample_count)
199 200 201 202
    {
      to_remove ++;
    }

203 204
    if (pos <= sample_no &&
        sample_no < pos + sample_count)
205
      {
206
        int i = sample_no - pos;
207
        *left  = af->data[0][i];
208
        if (channels == 1)
209 210 211 212
          *right = af->data[0][i];
        else
          *right = af->data[1][i];

213
        if (0 && to_remove)  /* consuming audiotrack */
214 215 216 217
        {
          again:
          for (l = p->audio_track; l; l = l->next)
          {
218
            GeglAudioFragment *af = l->data;
219 220
            int pos = gegl_audio_fragment_get_pos (af);
            int sample_count = gegl_audio_fragment_get_sample_count (af);
221
            if (sample_no > pos + sample_count)
222 223
            {
              p->audio_track = g_list_remove (p->audio_track, af);
224
              g_object_unref (af);
225
              goto again;
226
            }    
227 228 229 230 231 232 233 234
          }
        }
        return;
      }
  }
  *left  = 0;
  *right = 0;
}
235

236
static void
237
init (GeglProperties *o)
238
{
239
  static gint inited = 0; /*< this is actually meant to be static, only to be done once */
240
  Priv       *p = (Priv*)o->user_data;
241 242

  if (p == NULL)
243
    {
244
      p = g_new0 (Priv, 1);
245
      o->user_data = (void*) p;
246 247 248 249 250 251 252 253 254
    }

  if (!inited)
    {
      av_register_all ();
      avcodec_register_all ();
      inited = 1;
    }

255 256 257
  clear_audio_track (o);
  p->audio_pos = 0;
  p->audio_read_pos = 0;
258

259
  o->audio_sample_rate = -1; /* only do this if it hasn't been manually set? */
260 261

  av_log_set_level (AV_LOG_WARNING);
262 263
}

264 265 266 267 268 269
static void close_video       (Priv            *p,
                               AVFormatContext *oc,
                               AVStream        *st);
void        close_audio       (Priv            *p,
                               AVFormatContext *oc,
                               AVStream        *st);
270 271
static int  tfile             (GeglProperties  *o);
static void write_video_frame (GeglProperties  *o,
272 273
                               AVFormatContext *oc,
                               AVStream        *st);
274
static void write_audio_frame (GeglProperties      *o,
275 276
                               AVFormatContext *oc,
                               AVStream        *st);
277 278 279

#define STREAM_FRAME_RATE 25    /* 25 images/s */

280
#ifndef DISABLE_AUDIO
281 282
/* add an audio output stream */
static AVStream *
283
add_audio_stream (GeglProperties *o, AVFormatContext * oc, int codec_id)
284 285 286 287
{
  AVCodecContext *c;
  AVStream *st;

288
  st = avformat_new_stream (oc, NULL);
289 290 291 292 293 294 295 296
  if (!st)
    {
      fprintf (stderr, "Could not alloc stream\n");
      exit (1);
    }

  c = st->codec;
  c->codec_id = codec_id;
297
  c->codec_type = AVMEDIA_TYPE_AUDIO;
298

299
  if (oc->oformat->flags & AVFMT_GLOBALHEADER)
300
    c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
301

302 303
  return st;
}
304
#endif
305 306

static void
307
open_audio (GeglProperties *o, AVFormatContext * oc, AVStream * st)
308 309 310
{
  AVCodecContext *c;
  AVCodec  *codec;
311
  int i;
312 313 314 315 316 317 318 319 320 321

  c = st->codec;

  /* find the audio encoder */
  codec = avcodec_find_encoder (c->codec_id);
  if (!codec)
    {
      fprintf (stderr, "codec not found\n");
      exit (1);
    }
322
  c->bit_rate = o->audio_bit_rate * 1000;
323 324
  c->sample_fmt = codec->sample_fmts ? codec->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;

325
  if (o->audio_sample_rate == -1)
326 327 328
  {
    if (o->audio)
    {
329
      if (gegl_audio_fragment_get_sample_rate (o->audio) == 0)
330
      {
331
        gegl_audio_fragment_set_sample_rate (o->audio, 48000); // XXX: should skip adding audiostream instead
332
      }
333
      o->audio_sample_rate = gegl_audio_fragment_get_sample_rate (o->audio);
334 335
    }
  }
336
  c->sample_rate = o->audio_sample_rate;
337 338 339
  c->channel_layout = AV_CH_LAYOUT_STEREO;
  c->channels = 2;

340

341 342 343 344
  if (codec->supported_samplerates)
  {
    c->sample_rate = codec->supported_samplerates[0];
    for (i = 0; codec->supported_samplerates[i]; i++)
345
    {
346 347
      if (codec->supported_samplerates[i] == o->audio_sample_rate)
         c->sample_rate = o->audio_sample_rate;
348
    }
349
  }
350
  //st->time_base = (AVRational){1, c->sample_rate};
351
  st->time_base = (AVRational){1, o->audio_sample_rate};
352 353

  c->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; // ffmpeg AAC is not quite stable yet
354 355

  /* open it */
356
  if (avcodec_open2 (c, codec, NULL) < 0)
357 358 359 360 361 362
    {
      fprintf (stderr, "could not open codec\n");
      exit (1);
    }
}

363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
                                  uint64_t channel_layout,
                                  int sample_rate, int nb_samples)
{
  AVFrame *frame = av_frame_alloc();
  int ret;

  if (!frame) {
      fprintf(stderr, "Error allocating an audio frame\n");
      exit(1);
  }

  frame->format         = sample_fmt;
  frame->channel_layout = channel_layout;
  frame->sample_rate    = sample_rate;
  frame->nb_samples     = nb_samples;

  if (nb_samples) {
      ret = av_frame_get_buffer(frame, 0);
      if (ret < 0) {
          fprintf(stderr, "Error allocating an audio buffer\n");
          exit(1);
      }
  }
  return frame;
}

390
static void encode_audio_fragments (Priv *p, AVFormatContext *oc, AVStream *st, int frame_size)
391
{
392
  while (p->audio_pos - p->audio_read_pos > frame_size)
393
  {
394
    AVCodecContext *c = st->codec;
395
    long i;
396 397
    int ret;
    int got_packet = 0;
398
  static AVPacket  pkt = { 0 };  /* XXX: static, should be stored in instance somehow */
399
    AVFrame *frame = alloc_audio_frame (c->sample_fmt, c->channel_layout,
400 401 402 403 404 405
                                        c->sample_rate, frame_size);

  if (pkt.size == 0)
  {
    av_init_packet (&pkt);
  }
406

407
    av_frame_make_writable (frame);
408 409
    switch (c->sample_fmt) {
      case AV_SAMPLE_FMT_FLT:
410
        for (i = 0; i < frame_size; i++)
411 412 413 414 415 416 417 418
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((float*)frame->data[0])[c->channels*i+0] = left;
          ((float*)frame->data[0])[c->channels*i+1] = right;
        }
        break;
      case AV_SAMPLE_FMT_FLTP:
419
        for (i = 0; i < frame_size; i++)
420 421 422 423 424 425 426
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((float*)frame->data[0])[i] = left;
          ((float*)frame->data[1])[i] = right;
        }
        break;
427
      case AV_SAMPLE_FMT_S16:
428
        for (i = 0; i < frame_size; i++)
429 430 431 432 433 434 435
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int16_t*)frame->data[0])[c->channels*i+0] = left * (1<<15);
          ((int16_t*)frame->data[0])[c->channels*i+1] = right * (1<<15);
        }
        break;
436
      case AV_SAMPLE_FMT_S32:
437
        for (i = 0; i < frame_size; i++)
438 439 440 441 442 443 444 445
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int32_t*)frame->data[0])[c->channels*i+0] = left * (1<<31);
          ((int32_t*)frame->data[0])[c->channels*i+1] = right * (1<<31);
        }
        break;
      case AV_SAMPLE_FMT_S32P:
446
        for (i = 0; i < frame_size; i++)
447 448 449 450 451 452
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int32_t*)frame->data[0])[i] = left * (1<<31);
          ((int32_t*)frame->data[1])[i] = right * (1<<31);
        }
453 454
        break;
      case AV_SAMPLE_FMT_S16P:
455
        for (i = 0; i < frame_size; i++)
456 457 458 459 460 461
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int16_t*)frame->data[0])[i] = left * (1<<15);
          ((int16_t*)frame->data[1])[i] = right * (1<<15);
        }
462 463 464 465 466
        break;
      default:
        fprintf (stderr, "eeeek unhandled audio format\n");
        break;
    }
467
    frame->pts = p->next_apts;
468
    p->next_apts += frame_size;
469

470
    //ret = avcodec_send_frame (c, frame);
471
    ret = avcodec_encode_audio2 (c, &pkt, frame, &got_packet);
472

473 474 475 476
    if (ret < 0) {
      fprintf (stderr, "Error encoding audio frame: %s\n", av_err2str (ret));
    }
    if (got_packet)
477
    {
478
      av_packet_rescale_ts (&pkt, st->codec->time_base, st->time_base);
479 480
      pkt.stream_index = st->index;
      av_interleaved_write_frame (oc, &pkt);
481
      av_packet_unref (&pkt);
482 483
    }
    av_frame_free (&frame);
484
    p->audio_read_pos += frame_size;
485
  }
486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
  av_interleaved_write_frame (oc, NULL);
}

void
write_audio_frame (GeglProperties *o, AVFormatContext * oc, AVStream * st)
{
  Priv *p = (Priv*)o->user_data;
  AVCodecContext *c = st->codec;
  int sample_count = 100000;

  if (o->audio)
  {
    int i;
    int real_sample_count;
    GeglAudioFragment *af;
    real_sample_count = samples_per_frame (p->frame_count, o->frame_rate, o->audio_sample_rate, NULL, NULL);

    af = gegl_audio_fragment_new (gegl_audio_fragment_get_sample_rate (o->audio),
                                  gegl_audio_fragment_get_channels (o->audio),
                                  gegl_audio_fragment_get_channel_layout (o->audio),
                                  real_sample_count);
    gegl_audio_fragment_set_sample_count (af, real_sample_count);

    sample_count = gegl_audio_fragment_get_sample_count (o->audio);

    for (i = 0; i < real_sample_count; i++)
      {
        af->data[0][i] = (i<sample_count)?o->audio->data[0][i]:0.0f;
        af->data[1][i] = (i<sample_count)?o->audio->data[1][i]:0.0f;
      }

    gegl_audio_fragment_set_pos (af, p->audio_pos);
    sample_count = real_sample_count;
    p->audio_pos += real_sample_count;
    p->audio_track = g_list_append (p->audio_track, af);
  }
  else
  {
    int i;
    GeglAudioFragment *af;
    sample_count = samples_per_frame (p->frame_count, o->frame_rate, o->audio_sample_rate, NULL, NULL);
    af = gegl_audio_fragment_new (sample_count, 2, 0, sample_count);
    gegl_audio_fragment_set_sample_count (af, sample_count);
    gegl_audio_fragment_set_pos (af, p->audio_pos);
    for (i = 0; i < sample_count; i++)
      {
        af->data[0][i] = 0.0;
        af->data[1][i] = 0.0;
      }
    p->audio_pos += sample_count;
    p->audio_track = g_list_append (p->audio_track, af);
  }

  if (!(c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
  {
    sample_count = c->frame_size;
  }

  encode_audio_fragments (p, oc, st, sample_count);
545 546 547 548 549 550 551 552 553 554 555
}

void
close_audio (Priv * p, AVFormatContext * oc, AVStream * st)
{
  avcodec_close (st->codec);

}

/* add a video output stream */
static AVStream *
556
add_video_stream (GeglProperties *o, AVFormatContext * oc, int codec_id)
557
{
558
  Priv *p = (Priv*)o->user_data;
559 560 561 562

  AVCodecContext *c;
  AVStream *st;

563
  st = avformat_new_stream (oc, NULL);
564 565
  if (!st)
    {
566
      fprintf (stderr, "Could not alloc stream %p %p %i\n", o, oc, codec_id);
567 568 569 570 571
      exit (1);
    }

  c = st->codec;
  c->codec_id = codec_id;
572
  c->codec_type = AVMEDIA_TYPE_VIDEO;
573
  /* put sample propeters */
574
  c->bit_rate = o->video_bit_rate * 1000;
575
#ifdef USE_FINE_GRAINED_FFMPEG
576 577
  c->rc_min_rate = o->video_bit_rate_min * 1000;
  c->rc_max_rate = o->video_bit_rate_max * 1000;
578 579
  if (o->video_bit_rate_tolerance >= 0)
    c->bit_rate_tolerance = o->video_bit_rate_tolerance * 1000;
580
#endif
581 582 583 584
  /* resolution must be a multiple of two */
  c->width = p->width;
  c->height = p->height;
  /* frames per second */
585
  st->time_base =(AVRational){1000, o->frame_rate * 1000};
586
  c->time_base = st->time_base;
587

588
  c->pix_fmt = AV_PIX_FMT_YUV420P;
589

590
  if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO)
591
    {
592
      c->max_b_frames = 2;
593
    }
594

595 596
  if (c->codec_id == AV_CODEC_ID_H264)
   {
597 598 599 600
     c->qcompress = 0.6;  // qcomp=0.6
     c->me_range = 16;    // me_range=16
     c->gop_size = 250;   // g=250
     c->max_b_frames = 3; // bf=3
601 602
   }

603 604 605
  if (o->video_bufsize)
    c->rc_buffer_size = o->video_bufsize * 1000;
#if USE_FINE_GRAINED_FFMPEG
606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
  if (o->global_quality)
     c->global_quality = o->global_quality;
  if (o->qcompress != 0.0)
     c->qcompress = o->qcompress;
  if (o->qblur != 0.0)
     c->qblur = o->qblur;
  if (o->max_qdiff != 0)
     c->max_qdiff = o->max_qdiff;
  if (o->me_subpel_quality != 0)
     c->me_subpel_quality = o->me_subpel_quality;
  if (o->i_quant_factor != 0.0)
     c->i_quant_factor = o->i_quant_factor;
  if (o->i_quant_offset != 0.0)
     c->i_quant_offset = o->i_quant_offset;
  if (o->max_b_frames)
    c->max_b_frames = o->max_b_frames;
  if (o->me_range)
    c->me_range = o->me_range;
  if (o->noise_reduction)
    c->noise_reduction = o->noise_reduction;
  if (o->scenechange_threshold)
    c->scenechange_threshold = o->scenechange_threshold;
  if (o->trellis)
    c->trellis = o->trellis;
  if (o->qmin)
    c->qmin = o->qmin;
  if (o->qmax)
    c->qmax = o->qmax;
  if (o->gop_size)
    c->gop_size = o->gop_size;
  if (o->keyint_min)
    c->keyint_min = o->keyint_min;
638
#endif
639

640
   if (oc->oformat->flags & AVFMT_GLOBALHEADER)
641
     c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
642

643 644 645 646 647 648 649 650 651 652 653
  return st;
}


static AVFrame *
alloc_picture (int pix_fmt, int width, int height)
{
  AVFrame  *picture;
  uint8_t  *picture_buf;
  int       size;

654
  picture = av_frame_alloc ();
655 656
  if (!picture)
    return NULL;
657
  size = avpicture_get_size (pix_fmt, width + 1, height + 1);
658 659 660 661 662 663 664 665 666 667 668
  picture_buf = malloc (size);
  if (!picture_buf)
    {
      av_free (picture);
      return NULL;
    }
  avpicture_fill ((AVPicture *) picture, picture_buf, pix_fmt, width, height);
  return picture;
}

static void
669
open_video (GeglProperties *o, AVFormatContext * oc, AVStream * st)
670
{
671
  Priv           *p = (Priv*)o->user_data;
672 673
  AVCodec  *codec;
  AVCodecContext *c;
674
  AVDictionary *codec_options = {0};
675 676 677 678 679 680 681 682 683 684 685

  c = st->codec;

  /* find the video encoder */
  codec = avcodec_find_encoder (c->codec_id);
  if (!codec)
    {
      fprintf (stderr, "codec not found\n");
      exit (1);
    }

686 687 688 689 690 691 692 693 694 695
  if (codec->pix_fmts){
    int i = 0;
    c->pix_fmt = codec->pix_fmts[0];
    while (codec->pix_fmts[i] !=-1)
    {
      if (codec->pix_fmts[i] ==  AV_PIX_FMT_RGB24)
         c->pix_fmt = AV_PIX_FMT_RGB24;
      i++;
    }
  }
696 697 698 699
#if 0
  if (o->video_preset[0])
    av_dict_set (&codec_options, "preset", o->video_preset, 0);
#endif
700

701
  /* open the codec */
702
  if (avcodec_open2 (c, codec, &codec_options) < 0)
703 704 705 706 707 708
    {
      fprintf (stderr, "could not open codec\n");
      exit (1);
    }

  p->video_outbuf = NULL;
709
#if (LIBAVFORMAT_VERSION_MAJOR < 58) /* AVFMT_RAWPICTURE got removed from ffmpeg: "not used anymore" */
710
  if (!(oc->oformat->flags & AVFMT_RAWPICTURE))
711
#endif
712
    {
713 714
      /* allocate output buffer, 1 mb / frame, might fail for some codecs on UHD - but works for now */
      p->video_outbuf_size = 1024 * 1024;
715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
      p->video_outbuf = malloc (p->video_outbuf_size);
    }

  /* allocate the encoded raw picture */
  p->picture = alloc_picture (c->pix_fmt, c->width, c->height);
  if (!p->picture)
    {
      fprintf (stderr, "Could not allocate picture\n");
      exit (1);
    }

  /* if the output format is not YUV420P, then a temporary YUV420P
     picture is needed too. It is then converted to the required
     output format */
  p->tmp_picture = NULL;
730
  if (c->pix_fmt != AV_PIX_FMT_RGB24)
731
    {
732
      p->tmp_picture = alloc_picture (AV_PIX_FMT_RGB24, c->width, c->height);
733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751
      if (!p->tmp_picture)
        {
          fprintf (stderr, "Could not allocate temporary picture\n");
          exit (1);
        }
    }
}

static void
close_video (Priv * p, AVFormatContext * oc, AVStream * st)
{
  avcodec_close (st->codec);
  av_free (p->picture->data[0]);
  av_free (p->picture);
  if (p->tmp_picture)
    {
      av_free (p->tmp_picture->data[0]);
      av_free (p->tmp_picture);
    }
752
  free (p->video_outbuf);
753 754 755 756 757 758
}

#include "string.h"

/* prepare a dummy image */
static void
759
fill_rgb_image (GeglProperties *o,
760
                AVFrame *pict, int frame_index, int width, int height)
761
{
762
  Priv     *p = (Priv*)o->user_data;
763
  GeglRectangle rect={0,0,width,height};
764
  gegl_buffer_get (p->input, &rect, 1.0, babl_format ("R'G'B' u8"), pict->data[0], GEGL_AUTO_ROWSTRIDE, GEGL_ABYSS_NONE);
765 766 767
}

static void
768
write_video_frame (GeglProperties *o,
769
                   AVFormatContext *oc, AVStream *st)
770
{
771 772
  Priv           *p = (Priv*)o->user_data;
  int             out_size, ret;
773
  AVCodecContext *c;
774
  AVFrame        *picture_ptr;
775 776 777

  c = st->codec;

778
  if (c->pix_fmt != AV_PIX_FMT_RGB24)
779
    {
780
      struct SwsContext *img_convert_ctx;
781
      fill_rgb_image (o, p->tmp_picture, p->frame_count, c->width,
782
                      c->height);
783

784
      img_convert_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_RGB24,
785
                                       c->width, c->height, c->pix_fmt,
786 787 788 789 790 791 792 793 794
                                       SWS_BICUBIC, NULL, NULL, NULL);

      if (img_convert_ctx == NULL)
        {
          fprintf(stderr, "ff_save: Cannot initialize conversion context.");
        }
      else
        {
          sws_scale(img_convert_ctx,
795
                    (void*)p->tmp_picture->data,
796 797 798 799 800
                    p->tmp_picture->linesize,
                    0,
                    c->height,
                    p->picture->data,
                    p->picture->linesize);
801 802 803
         p->picture->format = c->pix_fmt;
         p->picture->width = c->width;
         p->picture->height = c->height;
804
         sws_freeContext (img_convert_ctx);
805
        }
806 807 808
    }
  else
    {
809
      fill_rgb_image (o, p->picture, p->frame_count, c->width, c->height);
810
    }
811

812
  picture_ptr      = p->picture;
813
  picture_ptr->pts = p->frame_count;
814

815
	#if (LIBAVFORMAT_VERSION_MAJOR < 58) /* AVFMT_RAWPICTURE got removed from ffmpeg: "not used anymore" */
816 817 818 819 820 821 822
  if (oc->oformat->flags & AVFMT_RAWPICTURE)
    {
      /* raw video case. The API will change slightly in the near
         future for that */
      AVPacket  pkt;
      av_init_packet (&pkt);

823
      pkt.flags |= AV_PKT_FLAG_KEY;
824 825 826
      pkt.stream_index = st->index;
      pkt.data = (uint8_t *) picture_ptr;
      pkt.size = sizeof (AVPicture);
827
      pkt.pts = picture_ptr->pts;
828
      pkt.dts = picture_ptr->pts;
829
      av_packet_rescale_ts (&pkt, c->time_base, st->time_base);
830 831 832 833

      ret = av_write_frame (oc, &pkt);
    }
  else
834
#endif
835 836
    {
      /* encode the image */
837 838 839 840 841 842 843
      AVPacket pkt2;
      int got_packet = 0;
      av_init_packet(&pkt2);
      pkt2.data = p->video_outbuf;
      pkt2.size = p->video_outbuf_size;

      out_size = avcodec_encode_video2(c, &pkt2, picture_ptr, &got_packet);
844

845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863
      if (!out_size && got_packet && c->coded_frame)
        {
          c->coded_frame->pts       = pkt2.pts;
          c->coded_frame->key_frame = !!(pkt2.flags & AV_PKT_FLAG_KEY);
          if (c->codec->capabilities & AV_CODEC_CAP_INTRA_ONLY)
              c->coded_frame->pict_type = AV_PICTURE_TYPE_I;
        }

      if (pkt2.side_data_elems > 0)
        {
          int i;
          for (i = 0; i < pkt2.side_data_elems; i++)
            av_free(pkt2.side_data[i].data);
          av_freep(&pkt2.side_data);
          pkt2.side_data_elems = 0;
        }

      if (!out_size)
        out_size = pkt2.size;
864 865 866 867 868 869 870

      /* if zero size, it means the image was buffered */
      if (out_size != 0)
        {
          AVPacket  pkt;
          av_init_packet (&pkt);
          if (c->coded_frame->key_frame)
871
            pkt.flags |= AV_PKT_FLAG_KEY;
872 873 874
          pkt.stream_index = st->index;
          pkt.data = p->video_outbuf;
          pkt.size = out_size;
875
          pkt.pts = picture_ptr->pts;
876
          pkt.dts = picture_ptr->pts;
877
          av_packet_rescale_ts (&pkt, c->time_base, st->time_base);
878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894
          /* write the compressed frame in the media file */
          ret = av_write_frame (oc, &pkt);
        }
      else
        {
          ret = 0;
        }
    }
  if (ret != 0)
    {
      fprintf (stderr, "Error while writing video frame\n");
      exit (1);
    }
  p->frame_count++;
}

static int
895
tfile (GeglProperties *o)
896
{
897
  Priv *p = (Priv*)o->user_data;
898

899 900 901 902 903
  if (strcmp (o->container_format, "auto"))
    p->fmt = av_guess_format (o->container_format, o->path, NULL);
  else
    p->fmt = av_guess_format (NULL, o->path, NULL);

904 905 906 907 908
  if (!p->fmt)
    {
      fprintf (stderr,
               "ff_save couldn't deduce outputformat from file extension: using MPEG.\n%s",
               "");
909
      p->fmt = av_guess_format ("mpeg", NULL, NULL);
910
    }
911
  p->oc = avformat_alloc_context ();
912 913 914 915 916 917 918 919
  if (!p->oc)
    {
      fprintf (stderr, "memory error\n%s", "");
      return -1;
    }

  p->oc->oformat = p->fmt;

920
  snprintf (p->oc->filename, sizeof (p->oc->filename), "%s", o->path);
921 922 923

  p->video_st = NULL;
  p->audio_st = NULL;
924

925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956
  if (strcmp (o->video_codec, "auto"))
  {
    AVCodec *codec = avcodec_find_encoder_by_name (o->video_codec);
    p->fmt->video_codec = AV_CODEC_ID_NONE;
    if (codec)
      p->fmt->video_codec = codec->id;
    else
      {
        fprintf (stderr, "didn't find video encoder \"%s\"\navailable codecs: ", o->video_codec);
        while ((codec = av_codec_next (codec)))
          if (av_codec_is_encoder (codec) &&
              avcodec_get_type (codec->id) == AVMEDIA_TYPE_VIDEO)
          fprintf (stderr, "%s ", codec->name);
        fprintf (stderr, "\n");
      }
  }
  if (strcmp (o->audio_codec, "auto"))
  {
    AVCodec *codec = avcodec_find_encoder_by_name (o->audio_codec);
    p->fmt->audio_codec = AV_CODEC_ID_NONE;
    if (codec)
      p->fmt->audio_codec = codec->id;
    else
      {
        fprintf (stderr, "didn't find audio encoder \"%s\"\navailable codecs: ", o->audio_codec);
        while ((codec = av_codec_next (codec)))
          if (av_codec_is_encoder (codec) &&
              avcodec_get_type (codec->id) == AVMEDIA_TYPE_AUDIO)
                fprintf (stderr, "%s ", codec->name);
        fprintf (stderr, "\n");
      }
  }
957

958
  if (p->fmt->video_codec != AV_CODEC_ID_NONE)
959
    {
960
      p->video_st = add_video_stream (o, p->oc, p->fmt->video_codec);
961
    }
962
  if (p->fmt->audio_codec != AV_CODEC_ID_NONE)
963
    {
964
     p->audio_st = add_audio_stream (o, p->oc, p->fmt->audio_codec);
965 966 967 968
    }


  if (p->video_st)
969
    open_video (o, p->oc, p->video_st);
970

971
  if (p->audio_st)
972
    open_audio (o, p->oc, p->audio_st);
973

974 975
  av_dump_format (p->oc, 0, o->path, 1);

976
  if (avio_open (&p->oc->pb, o->path, AVIO_FLAG_WRITE) < 0)
977
    {
978
      fprintf (stderr, "couldn't open '%s'\n", o->path);
979 980 981
      return -1;
    }

982 983 984 985 986
  if (avformat_write_header (p->oc, NULL) < 0)
  {
    fprintf (stderr, "'%s' error writing header\n", o->path);
    return -1;
  }
987 988 989
  return 0;
}

990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016

static void flush_audio (GeglProperties *o)
{
  Priv *p = (Priv*)o->user_data;
  AVPacket  pkt = { 0 };
  int ret;

  int got_packet = 0;
  if (!p->audio_st)
    return;

  got_packet = 0;
  av_init_packet (&pkt);
  ret = avcodec_encode_audio2 (p->audio_st->codec, &pkt, NULL, &got_packet);
  if (ret < 0)
  {
    fprintf (stderr, "audio enc trouble\n");
  }
  if (got_packet)
    {
      pkt.stream_index = p->audio_st->index;
      av_packet_rescale_ts (&pkt, p->audio_st->codec->time_base, p->audio_st->time_base);
      av_interleaved_write_frame (p->oc, &pkt);
      av_packet_unref (&pkt);
    }
}

1017 1018 1019
static gboolean
process (GeglOperation       *operation,
         GeglBuffer          *input,
1020 1021
         const GeglRectangle *result,
         gint                 level)
1022
{
1023
  GeglProperties *o = GEGL_PROPERTIES (operation);
1024
  Priv *p = (Priv*)o->user_data;
1025 1026 1027 1028 1029

  g_assert (input);

  if (p == NULL)
    init (o);
1030
  p = (Priv*)o->user_data;
1031 1032 1033 1034 1035

  p->width = result->width;
  p->height = result->height;
  p->input = input;

1036
  if (!p->file_inited)
1037 1038
    {
      tfile (o);
1039
      p->file_inited = 1;
1040 1041 1042 1043
    }

  write_video_frame (o, p->oc, p->video_st);
  if (p->audio_st)
1044
  {
1045
    write_audio_frame (o, p->oc, p->audio_st);
1046 1047
    //flush_audio (o);
  }
1048 1049 1050 1051

  return  TRUE;
}

1052 1053 1054 1055 1056

static void flush_video (GeglProperties *o)
{
  Priv *p = (Priv*)o->user_data;
  int got_packet = 0;
1057
  long ts = p->frame_count;
1058 1059 1060 1061 1062 1063 1064 1065
  do {
    AVPacket  pkt = { 0 };
    int ret;
    got_packet = 0;
    av_init_packet (&pkt);
    ret = avcodec_encode_video2 (p->video_st->codec, &pkt, NULL, &got_packet);
    if (ret < 0)
      return;
1066

1067 1068 1069
     if (got_packet)
     {
       pkt.stream_index = p->video_st->index;
1070 1071
       pkt.pts = ts;
       pkt.dts = ts++;
1072 1073
       av_packet_rescale_ts (&pkt, p->video_st->codec->time_base, p->video_st->time_base);
       av_interleaved_write_frame (p->oc, &pkt);
1074
       av_packet_unref (&pkt);
1075 1076 1077 1078
     }
  } while (got_packet);
}

1079 1080 1081
static void
finalize (GObject *object)
{
1082 1083
  GeglProperties *o = GEGL_PROPERTIES (object);
  if (o->user_data)
1084
    {
1085
      Priv *p = (Priv*)o->user_data;
1086 1087
      flush_audio (o);
      flush_video (o);
1088

1089
      av_write_trailer (p->oc);
1090

1091 1092 1093 1094
      if (p->video_st)
        close_video (p, p->oc, p->video_st);
      if (p->audio_st)
        close_audio (p, p->oc, p->audio_st);
1095

1096 1097
      avio_closep (&p->oc->pb);
      avformat_free_context (p->oc);
1098

Debarshi Ray's avatar
Debarshi Ray committed
1099
      g_clear_pointer (&o->user_data, g_free);
1100 1101 1102 1103 1104 1105 1106
    }

  G_OBJECT_CLASS (g_type_class_peek_parent (G_OBJECT_GET_CLASS (object)))->finalize (object);
}


static void
1107
gegl_op_class_init (GeglOpClass *klass)
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
{
  GeglOperationClass     *operation_class;
  GeglOperationSinkClass *sink_class;

  G_OBJECT_CLASS (klass)->finalize = finalize;

  operation_class = GEGL_OPERATION_CLASS (klass);
  sink_class      = GEGL_OPERATION_SINK_CLASS (klass);

  sink_class->process = process;
  sink_class->needs_full = TRUE;

1120 1121
  gegl_operation_class_set_keys (operation_class,
    "name"        , "gegl:ff-save",
1122
    "title"       , _("FFmpeg Frame Saver"),
1123 1124 1125
    "categories"  , "output:video",
    "description" , _("FFmpeg video output sink"),
    NULL);
1126 1127
}

1128
#endif