ff-save.c 31 KB
Newer Older
1 2 3 4 5
/* This file is an image processing operation for GEGL
 *
 * GEGL is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
6
 * version 3 of the License, or (at your option) any later version.
7 8 9 10 11 12 13
 *
 * GEGL is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
14
 * License along with GEGL; if not, see <https://www.gnu.org/licenses/>.
15
 *
16
 * Copyright 2003,2004,2007, 2015 Øyvind Kolås <pippin@gimp.org>
17
 */
18 19

#include "config.h"
Martin Nordholts's avatar
Martin Nordholts committed
20 21 22

#include <stdlib.h>

23 24
#include <glib/gi18n-lib.h>

25
/* #define USE_FINE_GRAINED_FFMPEG 1 */
26

27
#ifdef GEGL_PROPERTIES
28

29
property_string (path, _("File"), "/tmp/fnord.ogv")
30
    description (_("Target path and filename, use '-' for stdout."))
31

32 33
property_audio_fragment (audio, _("audio"), 0)
property_string (audio_codec, _("Audio codec"), "auto")
34
   description (_("Audio codec to use, or auto to use a good default based on container format."))
35 36 37
property_int (audio_sample_rate, _("audio sample rate"), -1)
    description (_("-1 means autodetect on first audio fragment"))

38 39 40
property_int (audio_bit_rate, _("audio bitrate in kb/s"), 64)
    description (_("Target encoded video bitrate in kb/s"))

41 42
property_double (frame_rate, _("Frames/second"), 25.0)
    value_range (0.0, 100.0)
43

44
property_string (video_codec, _("Video codec"), "auto")
45
   description (_("Video codec to use, or auto to use a good default based on container format."))
46 47
property_int (video_bit_rate, _("video bitrate in kb/s"), 128)
    description (_("Target encoded video bitrate in kb/s"))
48
property_int (video_bufsize, _("Video bufsize"), 0)
49

50
property_string (container_format, _("Container format"), "auto")
51
   description (_("Container format to use, or auto to autodetect based on file extension."))
52

53
#ifdef USE_FINE_GRAINED_FFMPEG
54 55 56 57 58
property_int (global_quality, _("global quality"), 0)
property_int (noise_reduction, _("noise reduction"), 0)
property_int (scenechange_threshold, _("scenechange threshold"), 0)
property_int (video_bit_rate_min, _("video bitrate min"), 0)
property_int (video_bit_rate_max, _("video bitrate max"), 0)
59
property_int (video_bit_rate_tolerance, _("video bitrate tolerance"), -1)
60 61 62 63 64 65 66 67 68 69 70 71 72 73

property_int (keyint_min, _("keyint-min"), 0)
property_int (trellis, _("trellis"), 0)
property_int (qmin, _("qmin"), 0)
property_int (qmax, _("qmax"), 0)
property_int (max_qdiff, _("max_qdiff"), 0)
property_int (me_range, _("me_range"), 0)
property_int (max_b_frames, _("max_b_frames"), 0)
property_int (gop_size, _("gop-size"), 0)
property_double (qcompress, _("qcompress"), 0.0)
property_double (qblur, _("qblur"), 0.0)
property_double (i_quant_factor, _("i-quant-factor"), 0.0)
property_double (i_quant_offset, _("i-quant-offset"), 0.0)
property_int (me_subpel_quality, _("me-subpel-quality"), 0)
74
#endif
75

76

77 78
#else

79
#define GEGL_OP_SINK
80
#define GEGL_OP_NAME ff_save
81
#define GEGL_OP_C_SOURCE ff-save.c
82

83
#include "gegl-op.h"
84

85
#include <libavformat/avformat.h>
86 87 88 89
#include <libavcodec/avcodec.h>
#include <libavutil/avutil.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
90

91 92 93
/* remove if libavcodec_required_version is changed to > 56.41.100 */
#if LIBAVCODEC_VERSION_INT <= AV_VERSION_INT(56,41,100)
# define AV_CODEC_FLAG_GLOBAL_HEADER	CODEC_FLAG_GLOBAL_HEADER
94 95
# define AV_CODEC_CAP_VARIABLE_FRAME_SIZE	CODEC_CAP_VARIABLE_FRAME_SIZE
# define AV_CODEC_CAP_INTRA_ONLY	CODEC_CAP_INTRA_ONLY
96 97
#endif

98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
typedef struct
{
  gdouble    frame;
  gdouble    frames;
  gdouble    width;
  gdouble    height;
  GeglBuffer *input;

  AVOutputFormat *fmt;
  AVFormatContext *oc;
  AVStream *video_st;

  AVFrame  *picture, *tmp_picture;
  uint8_t  *video_outbuf;
  int       frame_count, video_outbuf_size;

114 115 116 117
    /** the rest is for audio handling within oxide, note that the interface
     * used passes all used functions in the oxide api through the reg_sym api
     * of gggl, this means that the ops should be usable by other applications
     * using gggl directly,. without needing to link with the oxide library
118 119 120 121 122 123 124 125 126
     */
  AVStream *audio_st;

  uint32_t  sample_rate;
  uint32_t  bits;
  uint32_t  channels;
  uint32_t  fragment_samples;
  uint32_t  fragment_size;

127
  int       bufsize;
128 129
  int       buffer_read_pos;
  int       buffer_write_pos;
130 131
  uint8_t  *buffer;

132 133
  int       audio_outbuf_size;
  int16_t  *samples;
134

135 136 137
  GList    *audio_track;
  long      audio_pos;
  long      audio_read_pos;
138

139
  int       next_apts;
140 141

  int       file_inited;
142 143
} Priv;

144 145 146 147 148 149
static void
clear_audio_track (GeglProperties *o)
{
  Priv *p = (Priv*)o->user_data;
  while (p->audio_track)
    {
150
      g_object_unref (p->audio_track->data);
151 152 153 154
      p->audio_track = g_list_remove (p->audio_track, p->audio_track->data);
    }
}

155
static int
156 157 158 159 160
samples_per_frame (int    frame,           /* frame no    */
                   double frame_rate,      /* frame rate  */
                   int    sample_rate,     /* sample rate */
                   int   *ceiled,          /* rounded up */
                   long  *start)           /* */
161 162 163
{
  double osamples;
  double samples = 0;
164
  double samples_per_frame = sample_rate / frame_rate;
165

166
  if (fabs(fmod (sample_rate, frame_rate)) < 0.0001f)
167 168
  {
    if (start)
169 170 171 172
      *start = (samples_per_frame) * frame;
    if (ceiled)
      *ceiled = samples_per_frame;
    return samples_per_frame;
173 174
  }

175 176
  samples = samples_per_frame * frame;

177
  osamples = samples;
178
  samples += samples_per_frame;
179
  if (start)
180 181 182
    (*start) = ceil(osamples);
  if (ceiled)
    *ceiled = ceil(samples_per_frame);
183 184 185
  return ceil(samples)-ceil(osamples);
}

186 187 188 189 190 191
static void get_sample_data (Priv *p, long sample_no, float *left, float *right)
{
  int to_remove = 0;
  GList *l;
  if (sample_no < 0)
    return;
192
  for (l = p->audio_track; l; l = l->next)
193
  {
194
    GeglAudioFragment *af = l->data;
195 196 197
    int channels = gegl_audio_fragment_get_channels (af);
    int pos = gegl_audio_fragment_get_pos (af);
    int sample_count = gegl_audio_fragment_get_sample_count (af);
198
    if (sample_no > pos + sample_count)
199 200 201 202
    {
      to_remove ++;
    }

203 204
    if (pos <= sample_no &&
        sample_no < pos + sample_count)
205
      {
206
        int i = sample_no - pos;
207
        *left  = af->data[0][i];
208
        if (channels == 1)
209 210 211 212
          *right = af->data[0][i];
        else
          *right = af->data[1][i];

213
        if (0 && to_remove)  /* consuming audiotrack */
214 215 216 217
        {
          again:
          for (l = p->audio_track; l; l = l->next)
          {
218
            GeglAudioFragment *af = l->data;
219 220
            int pos = gegl_audio_fragment_get_pos (af);
            int sample_count = gegl_audio_fragment_get_sample_count (af);
221
            if (sample_no > pos + sample_count)
222 223
            {
              p->audio_track = g_list_remove (p->audio_track, af);
224
              g_object_unref (af);
225
              goto again;
226
            }    
227 228 229 230 231 232 233 234
          }
        }
        return;
      }
  }
  *left  = 0;
  *right = 0;
}
235

236
static void
237
init (GeglProperties *o)
238
{
239
  static gint inited = 0; /*< this is actually meant to be static, only to be done once */
240
  Priv       *p = (Priv*)o->user_data;
241 242

  if (p == NULL)
243
    {
244
      p = g_new0 (Priv, 1);
245
      o->user_data = (void*) p;
246 247 248 249 250 251 252 253 254
    }

  if (!inited)
    {
      av_register_all ();
      avcodec_register_all ();
      inited = 1;
    }

255 256 257
  clear_audio_track (o);
  p->audio_pos = 0;
  p->audio_read_pos = 0;
258

259
  o->audio_sample_rate = -1; /* only do this if it hasn't been manually set? */
260 261

  av_log_set_level (AV_LOG_WARNING);
262 263
}

264 265 266 267 268 269
static void close_video       (Priv            *p,
                               AVFormatContext *oc,
                               AVStream        *st);
void        close_audio       (Priv            *p,
                               AVFormatContext *oc,
                               AVStream        *st);
270 271
static int  tfile             (GeglProperties  *o);
static void write_video_frame (GeglProperties  *o,
272 273
                               AVFormatContext *oc,
                               AVStream        *st);
274
static void write_audio_frame (GeglProperties      *o,
275 276
                               AVFormatContext *oc,
                               AVStream        *st);
277 278 279

#define STREAM_FRAME_RATE 25    /* 25 images/s */

280
#ifndef DISABLE_AUDIO
281 282
/* add an audio output stream */
static AVStream *
283
add_audio_stream (GeglProperties *o, AVFormatContext * oc, int codec_id)
284 285 286 287
{
  AVCodecContext *c;
  AVStream *st;

288
  st = avformat_new_stream (oc, NULL);
289 290 291 292 293 294 295 296
  if (!st)
    {
      fprintf (stderr, "Could not alloc stream\n");
      exit (1);
    }

  c = st->codec;
  c->codec_id = codec_id;
297
  c->codec_type = AVMEDIA_TYPE_AUDIO;
298

299
  if (oc->oformat->flags & AVFMT_GLOBALHEADER)
300
    c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
301

302 303
  return st;
}
304
#endif
305

306
static gboolean
307
open_audio (GeglProperties *o, AVFormatContext * oc, AVStream * st)
308 309 310
{
  AVCodecContext *c;
  AVCodec  *codec;
311
  int i;
312 313 314 315 316 317 318 319

  c = st->codec;

  /* find the audio encoder */
  codec = avcodec_find_encoder (c->codec_id);
  if (!codec)
    {
      fprintf (stderr, "codec not found\n");
320
      return FALSE;
321
    }
322
  c->bit_rate = o->audio_bit_rate * 1000;
323 324
  c->sample_fmt = codec->sample_fmts ? codec->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;

325
  if (o->audio_sample_rate == -1)
326 327 328
  {
    if (o->audio)
    {
329
      if (gegl_audio_fragment_get_sample_rate (o->audio) == 0)
330
      {
331
        gegl_audio_fragment_set_sample_rate (o->audio, 48000); // XXX: should skip adding audiostream instead
332
      }
333
      o->audio_sample_rate = gegl_audio_fragment_get_sample_rate (o->audio);
334 335
    }
  }
336
  c->sample_rate = o->audio_sample_rate;
337 338 339
  c->channel_layout = AV_CH_LAYOUT_STEREO;
  c->channels = 2;

340

341 342 343 344
  if (codec->supported_samplerates)
  {
    c->sample_rate = codec->supported_samplerates[0];
    for (i = 0; codec->supported_samplerates[i]; i++)
345
    {
346 347
      if (codec->supported_samplerates[i] == o->audio_sample_rate)
         c->sample_rate = o->audio_sample_rate;
348
    }
349
  }
350
  //st->time_base = (AVRational){1, c->sample_rate};
351
  st->time_base = (AVRational){1, o->audio_sample_rate};
352 353

  c->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; // ffmpeg AAC is not quite stable yet
354 355

  /* open it */
356
  if (avcodec_open2 (c, codec, NULL) < 0)
357 358
    {
      fprintf (stderr, "could not open codec\n");
359
      return FALSE;
360
    }
361 362

  return TRUE;
363 364
}

365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
                                  uint64_t channel_layout,
                                  int sample_rate, int nb_samples)
{
  AVFrame *frame = av_frame_alloc();
  int ret;

  if (!frame) {
      fprintf(stderr, "Error allocating an audio frame\n");
      exit(1);
  }

  frame->format         = sample_fmt;
  frame->channel_layout = channel_layout;
  frame->sample_rate    = sample_rate;
  frame->nb_samples     = nb_samples;

  if (nb_samples) {
      ret = av_frame_get_buffer(frame, 0);
      if (ret < 0) {
          fprintf(stderr, "Error allocating an audio buffer\n");
          exit(1);
      }
  }
  return frame;
}

392
static void encode_audio_fragments (Priv *p, AVFormatContext *oc, AVStream *st, int frame_size)
393
{
394
  while (p->audio_pos - p->audio_read_pos > frame_size)
395
  {
396
    AVCodecContext *c = st->codec;
397
    long i;
398 399
    int ret;
    int got_packet = 0;
400
  static AVPacket  pkt = { 0 };  /* XXX: static, should be stored in instance somehow */
401
    AVFrame *frame = alloc_audio_frame (c->sample_fmt, c->channel_layout,
402 403 404 405 406 407
                                        c->sample_rate, frame_size);

  if (pkt.size == 0)
  {
    av_init_packet (&pkt);
  }
408

409
    av_frame_make_writable (frame);
410 411
    switch (c->sample_fmt) {
      case AV_SAMPLE_FMT_FLT:
412
        for (i = 0; i < frame_size; i++)
413 414 415 416 417 418 419 420
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((float*)frame->data[0])[c->channels*i+0] = left;
          ((float*)frame->data[0])[c->channels*i+1] = right;
        }
        break;
      case AV_SAMPLE_FMT_FLTP:
421
        for (i = 0; i < frame_size; i++)
422 423 424 425 426 427 428
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((float*)frame->data[0])[i] = left;
          ((float*)frame->data[1])[i] = right;
        }
        break;
429
      case AV_SAMPLE_FMT_S16:
430
        for (i = 0; i < frame_size; i++)
431 432 433 434 435 436 437
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int16_t*)frame->data[0])[c->channels*i+0] = left * (1<<15);
          ((int16_t*)frame->data[0])[c->channels*i+1] = right * (1<<15);
        }
        break;
438
      case AV_SAMPLE_FMT_S32:
439
        for (i = 0; i < frame_size; i++)
440 441 442 443 444 445 446 447
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int32_t*)frame->data[0])[c->channels*i+0] = left * (1<<31);
          ((int32_t*)frame->data[0])[c->channels*i+1] = right * (1<<31);
        }
        break;
      case AV_SAMPLE_FMT_S32P:
448
        for (i = 0; i < frame_size; i++)
449 450 451 452 453 454
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int32_t*)frame->data[0])[i] = left * (1<<31);
          ((int32_t*)frame->data[1])[i] = right * (1<<31);
        }
455 456
        break;
      case AV_SAMPLE_FMT_S16P:
457
        for (i = 0; i < frame_size; i++)
458 459 460 461 462 463
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int16_t*)frame->data[0])[i] = left * (1<<15);
          ((int16_t*)frame->data[1])[i] = right * (1<<15);
        }
464 465 466 467 468
        break;
      default:
        fprintf (stderr, "eeeek unhandled audio format\n");
        break;
    }
469
    frame->pts = p->next_apts;
470
    p->next_apts += frame_size;
471

472
    //ret = avcodec_send_frame (c, frame);
473
    ret = avcodec_encode_audio2 (c, &pkt, frame, &got_packet);
474

475 476 477 478
    if (ret < 0) {
      fprintf (stderr, "Error encoding audio frame: %s\n", av_err2str (ret));
    }
    if (got_packet)
479
    {
480
      av_packet_rescale_ts (&pkt, st->codec->time_base, st->time_base);
481 482
      pkt.stream_index = st->index;
      av_interleaved_write_frame (oc, &pkt);
483
      av_packet_unref (&pkt);
484 485
    }
    av_frame_free (&frame);
486
    p->audio_read_pos += frame_size;
487
  }
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
  av_interleaved_write_frame (oc, NULL);
}

void
write_audio_frame (GeglProperties *o, AVFormatContext * oc, AVStream * st)
{
  Priv *p = (Priv*)o->user_data;
  AVCodecContext *c = st->codec;
  int sample_count = 100000;

  if (o->audio)
  {
    int i;
    int real_sample_count;
    GeglAudioFragment *af;
    real_sample_count = samples_per_frame (p->frame_count, o->frame_rate, o->audio_sample_rate, NULL, NULL);

    af = gegl_audio_fragment_new (gegl_audio_fragment_get_sample_rate (o->audio),
                                  gegl_audio_fragment_get_channels (o->audio),
                                  gegl_audio_fragment_get_channel_layout (o->audio),
                                  real_sample_count);
    gegl_audio_fragment_set_sample_count (af, real_sample_count);

    sample_count = gegl_audio_fragment_get_sample_count (o->audio);

    for (i = 0; i < real_sample_count; i++)
      {
        af->data[0][i] = (i<sample_count)?o->audio->data[0][i]:0.0f;
        af->data[1][i] = (i<sample_count)?o->audio->data[1][i]:0.0f;
      }

    gegl_audio_fragment_set_pos (af, p->audio_pos);
    sample_count = real_sample_count;
    p->audio_pos += real_sample_count;
    p->audio_track = g_list_append (p->audio_track, af);
  }
  else
  {
    int i;
    GeglAudioFragment *af;
    sample_count = samples_per_frame (p->frame_count, o->frame_rate, o->audio_sample_rate, NULL, NULL);
    af = gegl_audio_fragment_new (sample_count, 2, 0, sample_count);
    gegl_audio_fragment_set_sample_count (af, sample_count);
    gegl_audio_fragment_set_pos (af, p->audio_pos);
    for (i = 0; i < sample_count; i++)
      {
        af->data[0][i] = 0.0;
        af->data[1][i] = 0.0;
      }
    p->audio_pos += sample_count;
    p->audio_track = g_list_append (p->audio_track, af);
  }

  if (!(c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
  {
    sample_count = c->frame_size;
  }

  encode_audio_fragments (p, oc, st, sample_count);
547 548 549 550 551 552 553 554 555 556 557
}

void
close_audio (Priv * p, AVFormatContext * oc, AVStream * st)
{
  avcodec_close (st->codec);

}

/* add a video output stream */
static AVStream *
558
add_video_stream (GeglProperties *o, AVFormatContext * oc, int codec_id)
559
{
560
  Priv *p = (Priv*)o->user_data;
561 562 563 564

  AVCodecContext *c;
  AVStream *st;

565
  st = avformat_new_stream (oc, NULL);
566 567
  if (!st)
    {
568
      fprintf (stderr, "Could not alloc stream %p %p %i\n", o, oc, codec_id);
569 570 571 572 573
      exit (1);
    }

  c = st->codec;
  c->codec_id = codec_id;
574
  c->codec_type = AVMEDIA_TYPE_VIDEO;
575
  /* put sample propeters */
576
  c->bit_rate = o->video_bit_rate * 1000;
577
#ifdef USE_FINE_GRAINED_FFMPEG
578 579
  c->rc_min_rate = o->video_bit_rate_min * 1000;
  c->rc_max_rate = o->video_bit_rate_max * 1000;
580 581
  if (o->video_bit_rate_tolerance >= 0)
    c->bit_rate_tolerance = o->video_bit_rate_tolerance * 1000;
582
#endif
583 584 585 586
  /* resolution must be a multiple of two */
  c->width = p->width;
  c->height = p->height;
  /* frames per second */
587
  st->time_base =(AVRational){1000, o->frame_rate * 1000};
588
  c->time_base = st->time_base;
589

590
  c->pix_fmt = AV_PIX_FMT_YUV420P;
591

592
  if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO)
593
    {
594
      c->max_b_frames = 2;
595
    }
596

597 598
  if (c->codec_id == AV_CODEC_ID_H264)
   {
599 600 601 602
     c->qcompress = 0.6;  // qcomp=0.6
     c->me_range = 16;    // me_range=16
     c->gop_size = 250;   // g=250
     c->max_b_frames = 3; // bf=3
603 604
   }

605 606 607
  if (o->video_bufsize)
    c->rc_buffer_size = o->video_bufsize * 1000;
#if USE_FINE_GRAINED_FFMPEG
608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639
  if (o->global_quality)
     c->global_quality = o->global_quality;
  if (o->qcompress != 0.0)
     c->qcompress = o->qcompress;
  if (o->qblur != 0.0)
     c->qblur = o->qblur;
  if (o->max_qdiff != 0)
     c->max_qdiff = o->max_qdiff;
  if (o->me_subpel_quality != 0)
     c->me_subpel_quality = o->me_subpel_quality;
  if (o->i_quant_factor != 0.0)
     c->i_quant_factor = o->i_quant_factor;
  if (o->i_quant_offset != 0.0)
     c->i_quant_offset = o->i_quant_offset;
  if (o->max_b_frames)
    c->max_b_frames = o->max_b_frames;
  if (o->me_range)
    c->me_range = o->me_range;
  if (o->noise_reduction)
    c->noise_reduction = o->noise_reduction;
  if (o->scenechange_threshold)
    c->scenechange_threshold = o->scenechange_threshold;
  if (o->trellis)
    c->trellis = o->trellis;
  if (o->qmin)
    c->qmin = o->qmin;
  if (o->qmax)
    c->qmax = o->qmax;
  if (o->gop_size)
    c->gop_size = o->gop_size;
  if (o->keyint_min)
    c->keyint_min = o->keyint_min;
640
#endif
641

642
   if (oc->oformat->flags & AVFMT_GLOBALHEADER)
643
     c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
644

645 646 647 648 649 650 651 652 653 654 655
  return st;
}


static AVFrame *
alloc_picture (int pix_fmt, int width, int height)
{
  AVFrame  *picture;
  uint8_t  *picture_buf;
  int       size;

656
  picture = av_frame_alloc ();
657 658
  if (!picture)
    return NULL;
659
  size = avpicture_get_size (pix_fmt, width + 1, height + 1);
660 661 662 663 664 665 666 667 668 669
  picture_buf = malloc (size);
  if (!picture_buf)
    {
      av_free (picture);
      return NULL;
    }
  avpicture_fill ((AVPicture *) picture, picture_buf, pix_fmt, width, height);
  return picture;
}

670
static gboolean
671
open_video (GeglProperties *o, AVFormatContext * oc, AVStream * st)
672
{
673
  Priv           *p = (Priv*)o->user_data;
674 675
  AVCodec  *codec;
  AVCodecContext *c;
676
  AVDictionary *codec_options = {0};
677
  int           ret;
678 679 680 681 682 683 684 685

  c = st->codec;

  /* find the video encoder */
  codec = avcodec_find_encoder (c->codec_id);
  if (!codec)
    {
      fprintf (stderr, "codec not found\n");
686
      return FALSE;
687 688
    }

689 690 691 692 693 694 695 696 697 698
  if (codec->pix_fmts){
    int i = 0;
    c->pix_fmt = codec->pix_fmts[0];
    while (codec->pix_fmts[i] !=-1)
    {
      if (codec->pix_fmts[i] ==  AV_PIX_FMT_RGB24)
         c->pix_fmt = AV_PIX_FMT_RGB24;
      i++;
    }
  }
699 700 701 702
#if 0
  if (o->video_preset[0])
    av_dict_set (&codec_options, "preset", o->video_preset, 0);
#endif
703

704
  /* open the codec */
705
  if ((ret = avcodec_open2 (c, codec, &codec_options)) < 0)
706
    {
707 708
      fprintf (stderr, "could not open codec: %s\n", av_err2str (ret));
      return FALSE;
709 710 711
    }

  p->video_outbuf = NULL;
712
#if (LIBAVFORMAT_VERSION_MAJOR < 58) /* AVFMT_RAWPICTURE got removed from ffmpeg: "not used anymore" */
713
  if (!(oc->oformat->flags & AVFMT_RAWPICTURE))
714
#endif
715
    {
716 717
      /* allocate output buffer, 1 mb / frame, might fail for some codecs on UHD - but works for now */
      p->video_outbuf_size = 1024 * 1024;
718 719 720 721 722 723 724 725
      p->video_outbuf = malloc (p->video_outbuf_size);
    }

  /* allocate the encoded raw picture */
  p->picture = alloc_picture (c->pix_fmt, c->width, c->height);
  if (!p->picture)
    {
      fprintf (stderr, "Could not allocate picture\n");
726
      return FALSE;
727 728 729 730 731 732
    }

  /* if the output format is not YUV420P, then a temporary YUV420P
     picture is needed too. It is then converted to the required
     output format */
  p->tmp_picture = NULL;
733
  if (c->pix_fmt != AV_PIX_FMT_RGB24)
734
    {
735
      p->tmp_picture = alloc_picture (AV_PIX_FMT_RGB24, c->width, c->height);
736 737 738
      if (!p->tmp_picture)
        {
          fprintf (stderr, "Could not allocate temporary picture\n");
739
          return FALSE;
740 741
        }
    }
742 743

  return TRUE;
744 745 746 747 748 749 750 751 752 753 754 755 756
}

static void
close_video (Priv * p, AVFormatContext * oc, AVStream * st)
{
  avcodec_close (st->codec);
  av_free (p->picture->data[0]);
  av_free (p->picture);
  if (p->tmp_picture)
    {
      av_free (p->tmp_picture->data[0]);
      av_free (p->tmp_picture);
    }
757
  free (p->video_outbuf);
758 759 760 761 762 763
}

#include "string.h"

/* prepare a dummy image */
static void
764
fill_rgb_image (GeglProperties *o,
765
                AVFrame *pict, int frame_index, int width, int height)
766
{
767
  Priv     *p = (Priv*)o->user_data;
768
  GeglRectangle rect={0,0,width,height};
769
  gegl_buffer_get (p->input, &rect, 1.0, babl_format ("R'G'B' u8"), pict->data[0], GEGL_AUTO_ROWSTRIDE, GEGL_ABYSS_NONE);
770 771 772
}

static void
773
write_video_frame (GeglProperties *o,
774
                   AVFormatContext *oc, AVStream *st)
775
{
776 777
  Priv           *p = (Priv*)o->user_data;
  int             out_size, ret;
778
  AVCodecContext *c;
779
  AVFrame        *picture_ptr;
780 781 782

  c = st->codec;

783
  if (c->pix_fmt != AV_PIX_FMT_RGB24)
784
    {
785
      struct SwsContext *img_convert_ctx;
786
      fill_rgb_image (o, p->tmp_picture, p->frame_count, c->width,
787
                      c->height);
788

789
      img_convert_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_RGB24,
790
                                       c->width, c->height, c->pix_fmt,
791 792 793 794 795 796 797 798 799
                                       SWS_BICUBIC, NULL, NULL, NULL);

      if (img_convert_ctx == NULL)
        {
          fprintf(stderr, "ff_save: Cannot initialize conversion context.");
        }
      else
        {
          sws_scale(img_convert_ctx,
800
                    (void*)p->tmp_picture->data,
801 802 803 804 805
                    p->tmp_picture->linesize,
                    0,
                    c->height,
                    p->picture->data,
                    p->picture->linesize);
806 807 808
         p->picture->format = c->pix_fmt;
         p->picture->width = c->width;
         p->picture->height = c->height;
809
         sws_freeContext (img_convert_ctx);
810
        }
811 812 813
    }
  else
    {
814
      fill_rgb_image (o, p->picture, p->frame_count, c->width, c->height);
815
    }
816

817
  picture_ptr      = p->picture;
818
  picture_ptr->pts = p->frame_count;
819

820
	#if (LIBAVFORMAT_VERSION_MAJOR < 58) /* AVFMT_RAWPICTURE got removed from ffmpeg: "not used anymore" */
821 822 823 824 825 826 827
  if (oc->oformat->flags & AVFMT_RAWPICTURE)
    {
      /* raw video case. The API will change slightly in the near
         future for that */
      AVPacket  pkt;
      av_init_packet (&pkt);

828
      pkt.flags |= AV_PKT_FLAG_KEY;
829 830 831
      pkt.stream_index = st->index;
      pkt.data = (uint8_t *) picture_ptr;
      pkt.size = sizeof (AVPicture);
832
      pkt.pts = picture_ptr->pts;
833
      pkt.dts = picture_ptr->pts;
834
      av_packet_rescale_ts (&pkt, c->time_base, st->time_base);
835 836 837 838

      ret = av_write_frame (oc, &pkt);
    }
  else
839
#endif
840 841
    {
      /* encode the image */
842 843 844 845 846 847 848
      AVPacket pkt2;
      int got_packet = 0;
      av_init_packet(&pkt2);
      pkt2.data = p->video_outbuf;
      pkt2.size = p->video_outbuf_size;

      out_size = avcodec_encode_video2(c, &pkt2, picture_ptr, &got_packet);
849

850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868
      if (!out_size && got_packet && c->coded_frame)
        {
          c->coded_frame->pts       = pkt2.pts;
          c->coded_frame->key_frame = !!(pkt2.flags & AV_PKT_FLAG_KEY);
          if (c->codec->capabilities & AV_CODEC_CAP_INTRA_ONLY)
              c->coded_frame->pict_type = AV_PICTURE_TYPE_I;
        }

      if (pkt2.side_data_elems > 0)
        {
          int i;
          for (i = 0; i < pkt2.side_data_elems; i++)
            av_free(pkt2.side_data[i].data);
          av_freep(&pkt2.side_data);
          pkt2.side_data_elems = 0;
        }

      if (!out_size)
        out_size = pkt2.size;
869 870 871 872 873 874 875

      /* if zero size, it means the image was buffered */
      if (out_size != 0)
        {
          AVPacket  pkt;
          av_init_packet (&pkt);
          if (c->coded_frame->key_frame)
876
            pkt.flags |= AV_PKT_FLAG_KEY;
877 878 879
          pkt.stream_index = st->index;
          pkt.data = p->video_outbuf;
          pkt.size = out_size;
880
          pkt.pts = picture_ptr->pts;
881
          pkt.dts = picture_ptr->pts;
882
          av_packet_rescale_ts (&pkt, c->time_base, st->time_base);
883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899
          /* write the compressed frame in the media file */
          ret = av_write_frame (oc, &pkt);
        }
      else
        {
          ret = 0;
        }
    }
  if (ret != 0)
    {
      fprintf (stderr, "Error while writing video frame\n");
      exit (1);
    }
  p->frame_count++;
}

static int
900
tfile (GeglProperties *o)
901
{
902
  Priv *p = (Priv*)o->user_data;
903

904 905 906 907 908
  if (strcmp (o->container_format, "auto"))
    p->fmt = av_guess_format (o->container_format, o->path, NULL);
  else
    p->fmt = av_guess_format (NULL, o->path, NULL);

909 910 911 912 913
  if (!p->fmt)
    {
      fprintf (stderr,
               "ff_save couldn't deduce outputformat from file extension: using MPEG.\n%s",
               "");
914
      p->fmt = av_guess_format ("mpeg", NULL, NULL);
915
    }
916
  p->oc = avformat_alloc_context ();
917 918 919 920 921 922 923 924
  if (!p->oc)
    {
      fprintf (stderr, "memory error\n%s", "");
      return -1;
    }

  p->oc->oformat = p->fmt;

925
  snprintf (p->oc->filename, sizeof (p->oc->filename), "%s", o->path);
926 927 928

  p->video_st = NULL;
  p->audio_st = NULL;
929

930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961
  if (strcmp (o->video_codec, "auto"))
  {
    AVCodec *codec = avcodec_find_encoder_by_name (o->video_codec);
    p->fmt->video_codec = AV_CODEC_ID_NONE;
    if (codec)
      p->fmt->video_codec = codec->id;
    else
      {
        fprintf (stderr, "didn't find video encoder \"%s\"\navailable codecs: ", o->video_codec);
        while ((codec = av_codec_next (codec)))
          if (av_codec_is_encoder (codec) &&
              avcodec_get_type (codec->id) == AVMEDIA_TYPE_VIDEO)
          fprintf (stderr, "%s ", codec->name);
        fprintf (stderr, "\n");
      }
  }
  if (strcmp (o->audio_codec, "auto"))
  {
    AVCodec *codec = avcodec_find_encoder_by_name (o->audio_codec);
    p->fmt->audio_codec = AV_CODEC_ID_NONE;
    if (codec)
      p->fmt->audio_codec = codec->id;
    else
      {
        fprintf (stderr, "didn't find audio encoder \"%s\"\navailable codecs: ", o->audio_codec);
        while ((codec = av_codec_next (codec)))
          if (av_codec_is_encoder (codec) &&
              avcodec_get_type (codec->id) == AVMEDIA_TYPE_AUDIO)
                fprintf (stderr, "%s ", codec->name);
        fprintf (stderr, "\n");
      }
  }
962

963
  if (p->fmt->video_codec != AV_CODEC_ID_NONE)
964
    {
965
      p->video_st = add_video_stream (o, p->oc, p->fmt->video_codec);
966
    }
967
  if (p->fmt->audio_codec != AV_CODEC_ID_NONE)
968
    {
969
     p->audio_st = add_audio_stream (o, p->oc, p->fmt->audio_codec);
970 971 972
    }


973 974
  if (p->video_st && ! open_video (o, p->oc, p->video_st))
    return -1;
975

976 977
  if (p->audio_st && ! open_audio (o, p->oc, p->audio_st))
    return -1;
978

979 980
  av_dump_format (p->oc, 0, o->path, 1);

981
  if (avio_open (&p->oc->pb, o->path, AVIO_FLAG_WRITE) < 0)
982
    {
983
      fprintf (stderr, "couldn't open '%s'\n", o->path);
984 985 986
      return -1;
    }

987 988 989 990 991
  if (avformat_write_header (p->oc, NULL) < 0)
  {
    fprintf (stderr, "'%s' error writing header\n", o->path);
    return -1;
  }
992 993 994
  return 0;
}

995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021

static void flush_audio (GeglProperties *o)
{
  Priv *p = (Priv*)o->user_data;
  AVPacket  pkt = { 0 };
  int ret;

  int got_packet = 0;
  if (!p->audio_st)
    return;

  got_packet = 0;
  av_init_packet (&pkt);
  ret = avcodec_encode_audio2 (p->audio_st->codec, &pkt, NULL, &got_packet);
  if (ret < 0)
  {
    fprintf (stderr, "audio enc trouble\n");
  }
  if (got_packet)
    {
      pkt.stream_index = p->audio_st->index;
      av_packet_rescale_ts (&pkt, p->audio_st->codec->time_base, p->audio_st->time_base);
      av_interleaved_write_frame (p->oc, &pkt);
      av_packet_unref (&pkt);
    }
}

1022 1023 1024
static gboolean
process (GeglOperation       *operation,
         GeglBuffer          *input,
1025 1026
         const GeglRectangle *result,
         gint                 level)
1027
{
1028
  GeglProperties *o = GEGL_PROPERTIES (operation);
1029
  Priv *p = (Priv*)o->user_data;
1030 1031 1032 1033 1034

  g_assert (input);

  if (p == NULL)
    init (o);
1035
  p = (Priv*)o->user_data;
1036 1037 1038 1039 1040

  p->width = result->width;
  p->height = result->height;
  p->input = input;

1041
  if (!p->file_inited)
1042
    {
1043 1044
      if (tfile (o) == 0)
        p->file_inited = 1;
1045 1046
    }

1047 1048 1049 1050 1051 1052 1053 1054
  if (p->file_inited)
    {
      write_video_frame (o, p->oc, p->video_st);
      if (p->audio_st)
        {
          write_audio_frame (o, p->oc, p->audio_st);
          //flush_audio (o);
        }
1055

1056 1057 1058
      return  TRUE;
    }
  return FALSE;
1059 1060
}

1061 1062 1063 1064 1065

static void flush_video (GeglProperties *o)
{
  Priv *p = (Priv*)o->user_data;
  int got_packet = 0;
1066
  long ts = p->frame_count;
1067 1068 1069 1070 1071 1072 1073 1074
  do {
    AVPacket  pkt = { 0 };
    int ret;
    got_packet = 0;
    av_init_packet (&pkt);
    ret = avcodec_encode_video2 (p->video_st->codec, &pkt, NULL, &got_packet);
    if (ret < 0)
      return;
1075

1076 1077 1078
     if (got_packet)
     {
       pkt.stream_index = p->video_st->index;
1079 1080
       pkt.pts = ts;
       pkt.dts = ts++;
1081 1082
       av_packet_rescale_ts (&pkt, p->video_st->codec->time_base, p->video_st->time_base);
       av_interleaved_write_frame (p->oc, &pkt);
1083
       av_packet_unref (&pkt);
1084 1085 1086 1087
     }
  } while (got_packet);
}

1088 1089 1090
static void
finalize (GObject *object)
{
1091 1092
  GeglProperties *o = GEGL_PROPERTIES (object);
  if (o->user_data)
1093
    {
1094
      Priv *p = (Priv*)o->user_data;
1095

1096 1097 1098 1099 1100 1101
      if (p->file_inited)
        {
          flush_audio (o);
          flush_video (o);

          av_write_trailer (p->oc);
1102

1103 1104 1105 1106 1107
          if (p->video_st)
            close_video (p, p->oc, p->video_st);
          if (p->audio_st)
            close_audio (p, p->oc, p->audio_st);
        }
1108

1109 1110
      avio_closep (&p->oc->pb);
      avformat_free_context (p->oc);
1111

Debarshi Ray's avatar
Debarshi Ray committed
1112
      g_clear_pointer (&o->user_data, g_free);
1113 1114 1115 1116 1117 1118 1119
    }

  G_OBJECT_CLASS (g_type_class_peek_parent (G_OBJECT_GET_CLASS (object)))->finalize (object);
}


static void
1120
gegl_op_class_init (GeglOpClass *klass)
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
{
  GeglOperationClass     *operation_class;
  GeglOperationSinkClass *sink_class;

  G_OBJECT_CLASS (klass)->finalize = finalize;

  operation_class = GEGL_OPERATION_CLASS (klass);
  sink_class      = GEGL_OPERATION_SINK_CLASS (klass);

  sink_class->process = process;
  sink_class->needs_full = TRUE;

1133 1134
  gegl_operation_class_set_keys (operation_class,
    "name"        , "gegl:ff-save",
1135
    "title"       , _("FFmpeg Frame Saver"),
1136 1137 1138
    "categories"  , "output:video",
    "description" , _("FFmpeg video output sink"),
    NULL);
1139 1140
}

1141
#endif