ff-save.c 28.9 KB
Newer Older
1 2 3 4 5
/* This file is an image processing operation for GEGL
 *
 * GEGL is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
6
 * version 3 of the License, or (at your option) any later version.
7 8 9 10 11 12 13
 *
 * GEGL is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
14
 * License along with GEGL; if not, see <http://www.gnu.org/licenses/>.
15
 *
16
 * Copyright 2003,2004,2007, 2015 Øyvind Kolås <pippin@gimp.org>
17
 */
18 19

#include "config.h"
Martin Nordholts's avatar
Martin Nordholts committed
20 21 22

#include <stdlib.h>

23 24
#include <glib/gi18n-lib.h>

25 26
//#define USE_FINE_GRAINDED_FFMPEG 1

27
#ifdef GEGL_PROPERTIES
28

29
property_string (path, _("File"), "/tmp/fnord.ogv")
30
    description (_("Target path and filename, use '-' for stdout."))
31

32 33
property_audio_fragment (audio, _("audio"), 0)
property_string (audio_codec, _("Audio codec"), "auto")
34
   description (_("Audio codec to use, or auto to use a good default based on container format."))
35 36 37
property_int (audio_sample_rate, _("audio sample rate"), -1)
    description (_("-1 means autodetect on first audio fragment"))

38 39 40
property_int (audio_bit_rate, _("audio bitrate in kb/s"), 64)
    description (_("Target encoded video bitrate in kb/s"))

41 42
property_double (frame_rate, _("Frames/second"), 25.0)
    value_range (0.0, 100.0)
43

44
property_string (video_codec, _("Video codec"), "auto")
45
   description (_("Video codec to use, or auto to use a good default based on container format."))
46 47
property_int (video_bit_rate, _("video bitrate in kb/s"), 128)
    description (_("Target encoded video bitrate in kb/s"))
48
property_int (video_bufsize, _("Video bufsize"), 0)
49

50
property_string (container_format, _("Container format"), "auto")
51
   description (_("Container format to use, or auto to autodetect based on file extension."))
52

53
#ifdef USE_FINE_GRAINED_FFMPEG
54 55 56 57 58
property_int (global_quality, _("global quality"), 0)
property_int (noise_reduction, _("noise reduction"), 0)
property_int (scenechange_threshold, _("scenechange threshold"), 0)
property_int (video_bit_rate_min, _("video bitrate min"), 0)
property_int (video_bit_rate_max, _("video bitrate max"), 0)
59
property_int (video_bit_rate_tolerance, _("video bitrate tolerance"), -1)
60 61 62 63 64 65 66 67 68 69 70 71 72 73

property_int (keyint_min, _("keyint-min"), 0)
property_int (trellis, _("trellis"), 0)
property_int (qmin, _("qmin"), 0)
property_int (qmax, _("qmax"), 0)
property_int (max_qdiff, _("max_qdiff"), 0)
property_int (me_range, _("me_range"), 0)
property_int (max_b_frames, _("max_b_frames"), 0)
property_int (gop_size, _("gop-size"), 0)
property_double (qcompress, _("qcompress"), 0.0)
property_double (qblur, _("qblur"), 0.0)
property_double (i_quant_factor, _("i-quant-factor"), 0.0)
property_double (i_quant_offset, _("i-quant-offset"), 0.0)
property_int (me_subpel_quality, _("me-subpel-quality"), 0)
74
#endif
75

76

77 78
#else

79
#define GEGL_OP_SINK
80
#define GEGL_OP_C_SOURCE ff-save.c
81

82
#include "gegl-op.h"
83

84
#include <libavformat/avformat.h>
85 86 87 88
#include <libavcodec/avcodec.h>
#include <libavutil/avutil.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105

typedef struct
{
  gdouble    frame;
  gdouble    frames;
  gdouble    width;
  gdouble    height;
  GeglBuffer *input;

  AVOutputFormat *fmt;
  AVFormatContext *oc;
  AVStream *video_st;

  AVFrame  *picture, *tmp_picture;
  uint8_t  *video_outbuf;
  int       frame_count, video_outbuf_size;

106 107 108 109
    /** the rest is for audio handling within oxide, note that the interface
     * used passes all used functions in the oxide api through the reg_sym api
     * of gggl, this means that the ops should be usable by other applications
     * using gggl directly,. without needing to link with the oxide library
110 111 112 113 114 115 116 117 118
     */
  AVStream *audio_st;

  uint32_t  sample_rate;
  uint32_t  bits;
  uint32_t  channels;
  uint32_t  fragment_samples;
  uint32_t  fragment_size;

119
  int       bufsize;
120 121
  int       buffer_read_pos;
  int       buffer_write_pos;
122 123
  uint8_t  *buffer; 
                   
124 125
  int       audio_outbuf_size;
  int16_t  *samples;
126

127 128 129
  GList    *audio_track;
  long      audio_pos;
  long      audio_read_pos;
130 131
  
  int       next_apts;
132 133
} Priv;

134 135 136 137 138 139
static void
clear_audio_track (GeglProperties *o)
{
  Priv *p = (Priv*)o->user_data;
  while (p->audio_track)
    {
140
      g_object_unref (p->audio_track->data);
141 142 143 144
      p->audio_track = g_list_remove (p->audio_track, p->audio_track->data);
    }
}

145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
static int
samples_per_frame (int    frame,
                   double frame_rate,
                   int    sample_rate,
                   long  *start)
{
  double osamples;
  double samples = 0;
  int f = 0;

  if (fabs(fmod (sample_rate, frame_rate)) < 0.0001)
  {
    if (start)
      *start = (sample_rate / frame_rate) * frame;
    return sample_rate / frame_rate;
  }

  for (f = 0; f < frame; f++) 
  {
    samples += sample_rate / frame_rate;
  }
  osamples = samples;
  samples += sample_rate / frame_rate;
  if (start)
     (*start) = ceil(osamples);
  return ceil(samples)-ceil(osamples);
}

173 174 175 176 177 178
static void get_sample_data (Priv *p, long sample_no, float *left, float *right)
{
  int to_remove = 0;
  GList *l;
  if (sample_no < 0)
    return;
179
  for (l = p->audio_track; l; l = l->next)
180
  {
181
    GeglAudioFragment *af = l->data;
182 183 184
    int channels = gegl_audio_fragment_get_channels (af);
    int pos = gegl_audio_fragment_get_pos (af);
    int sample_count = gegl_audio_fragment_get_sample_count (af);
185
    if (sample_no > pos + sample_count)
186 187 188 189
    {
      to_remove ++;
    }

190 191
    if (pos <= sample_no &&
        sample_no < pos + sample_count)
192
      {
193
        int i = sample_no - pos;
194
        *left  = af->data[0][i];
195
        if (channels == 1)
196 197 198 199 200 201 202 203 204
          *right = af->data[0][i];
        else
          *right = af->data[1][i];

	if (to_remove)  /* consuming audiotrack */
        {
          again:
          for (l = p->audio_track; l; l = l->next)
          {
205
            GeglAudioFragment *af = l->data;
206 207
            int pos = gegl_audio_fragment_get_pos (af);
            int sample_count = gegl_audio_fragment_get_sample_count (af);
208
            if (sample_no > pos + sample_count)
209 210
            {
              p->audio_track = g_list_remove (p->audio_track, af);
211
              g_object_unref (af);
212 213 214 215 216 217 218 219 220 221
              goto again;
            }
          }
        }
        return;
      }
  }
  *left  = 0;
  *right = 0;
}
222

Kevin Cozens's avatar
Kevin Cozens committed
223
static void
224
init (GeglProperties *o)
225
{
Kevin Cozens's avatar
Kevin Cozens committed
226
  static gint inited = 0; /*< this is actually meant to be static, only to be done once */
227
  Priv       *p = (Priv*)o->user_data;
Kevin Cozens's avatar
Kevin Cozens committed
228 229

  if (p == NULL)
230
    {
Kevin Cozens's avatar
Kevin Cozens committed
231
      p = g_new0 (Priv, 1);
232
      o->user_data = (void*) p;
233 234 235 236 237 238 239 240 241
    }

  if (!inited)
    {
      av_register_all ();
      avcodec_register_all ();
      inited = 1;
    }

242 243 244
  clear_audio_track (o);
  p->audio_pos = 0;
  p->audio_read_pos = 0;
245

246
  o->audio_sample_rate = -1; /* only do this if it hasn't been manually set? */
247 248

  av_log_set_level (AV_LOG_WARNING);
249 250
}

Kevin Cozens's avatar
Kevin Cozens committed
251 252 253 254 255 256
static void close_video       (Priv            *p,
                               AVFormatContext *oc,
                               AVStream        *st);
void        close_audio       (Priv            *p,
                               AVFormatContext *oc,
                               AVStream        *st);
257 258
static int  tfile             (GeglProperties  *o);
static void write_video_frame (GeglProperties  *o,
Kevin Cozens's avatar
Kevin Cozens committed
259 260
                               AVFormatContext *oc,
                               AVStream        *st);
261
static void write_audio_frame (GeglProperties      *o,
Kevin Cozens's avatar
Kevin Cozens committed
262 263
                               AVFormatContext *oc,
                               AVStream        *st);
264 265 266

#define STREAM_FRAME_RATE 25    /* 25 images/s */

267
#ifndef DISABLE_AUDIO
268 269
/* add an audio output stream */
static AVStream *
270
add_audio_stream (GeglProperties *o, AVFormatContext * oc, int codec_id)
271 272 273 274
{
  AVCodecContext *c;
  AVStream *st;

275
  st = avformat_new_stream (oc, NULL);
276 277 278 279 280 281 282 283
  if (!st)
    {
      fprintf (stderr, "Could not alloc stream\n");
      exit (1);
    }

  c = st->codec;
  c->codec_id = codec_id;
284
  c->codec_type = AVMEDIA_TYPE_AUDIO;
285

286 287 288
  if (oc->oformat->flags & AVFMT_GLOBALHEADER)
    c->flags |= CODEC_FLAG_GLOBAL_HEADER;

289 290
  return st;
}
291
#endif
292 293

static void
294
open_audio (GeglProperties *o, AVFormatContext * oc, AVStream * st)
295 296 297
{
  AVCodecContext *c;
  AVCodec  *codec;
298
  int i;
299 300 301 302 303 304 305 306 307 308

  c = st->codec;

  /* find the audio encoder */
  codec = avcodec_find_encoder (c->codec_id);
  if (!codec)
    {
      fprintf (stderr, "codec not found\n");
      exit (1);
    }
309
  c->bit_rate = o->audio_bit_rate * 1000;
310 311
  c->sample_fmt = codec->sample_fmts ? codec->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;

312
  if (o->audio_sample_rate == -1)
313 314 315
  {
    if (o->audio)
    {
316
      if (gegl_audio_fragment_get_sample_rate (o->audio) == 0)
317
      {
318
        gegl_audio_fragment_set_sample_rate (o->audio, 48000); // XXX: should skip adding audiostream instead
319
      }
320
      o->audio_sample_rate = gegl_audio_fragment_get_sample_rate (o->audio);
321 322
    }
  }
323
  c->sample_rate = o->audio_sample_rate;
324 325 326
  c->channel_layout = AV_CH_LAYOUT_STEREO;
  c->channels = 2;

327

328 329 330 331
  if (codec->supported_samplerates)
  {
    c->sample_rate = codec->supported_samplerates[0];
    for (i = 0; codec->supported_samplerates[i]; i++)
332
    {
333 334
      if (codec->supported_samplerates[i] == o->audio_sample_rate)
         c->sample_rate = o->audio_sample_rate;
335
    }
336
  }
337
  //st->time_base = (AVRational){1, c->sample_rate};
338
  st->time_base = (AVRational){1, o->audio_sample_rate};
339 340

  c->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; // ffmpeg AAC is not quite stable yet
341 342

  /* open it */
343
  if (avcodec_open2 (c, codec, NULL) < 0)
344 345 346 347 348 349
    {
      fprintf (stderr, "could not open codec\n");
      exit (1);
    }
}

350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
                                  uint64_t channel_layout,
                                  int sample_rate, int nb_samples)
{
  AVFrame *frame = av_frame_alloc();
  int ret;

  if (!frame) {
      fprintf(stderr, "Error allocating an audio frame\n");
      exit(1);
  }

  frame->format         = sample_fmt;
  frame->channel_layout = channel_layout;
  frame->sample_rate    = sample_rate;
  frame->nb_samples     = nb_samples;

  if (nb_samples) {
      ret = av_frame_get_buffer(frame, 0);
      if (ret < 0) {
          fprintf(stderr, "Error allocating an audio buffer\n");
          exit(1);
      }
  }
  return frame;
}

377
void
378
write_audio_frame (GeglProperties *o, AVFormatContext * oc, AVStream * st)
379
{
380
  Priv *p = (Priv*)o->user_data;
381 382 383 384 385 386 387 388
  AVCodecContext *c = st->codec;
  int sample_count = 100000;
  static AVPacket  pkt = { 0 };

  if (pkt.size == 0)
  {
    av_init_packet (&pkt);
  }
389

390
  /* first we add incoming frames audio samples */
391
  if (o->audio)
392 393
  {
    int i;
394 395 396
    GeglAudioFragment *af;
    sample_count = gegl_audio_fragment_get_sample_count (o->audio);
    af = gegl_audio_fragment_new (gegl_audio_fragment_get_sample_rate (o->audio),
397 398 399
                                                     gegl_audio_fragment_get_channels (o->audio),
                                                     gegl_audio_fragment_get_channel_layout (o->audio),
                                                     sample_count);
400
    gegl_audio_fragment_set_sample_count (af, sample_count);
401
    for (i = 0; i < sample_count; i++)
402
      {
403 404
        af->data[0][i] = o->audio->data[0][i];
        af->data[1][i] = o->audio->data[1][i];
405
      }
406 407
    gegl_audio_fragment_set_pos (af, p->audio_pos);
    p->audio_pos += sample_count;
408 409
    p->audio_track = g_list_append (p->audio_track, af);
  }
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
  else
  {
    int i;
    GeglAudioFragment *af;
    sample_count = samples_per_frame (p->frame_count, o->frame_rate, o->audio_sample_rate, NULL);
    af = gegl_audio_fragment_new (sample_count,
                                                     2,
                                                     0,
                                                     sample_count);
    gegl_audio_fragment_set_sample_count (af, sample_count);
    gegl_audio_fragment_set_pos (af, p->audio_pos);
    for (i = 0; i < sample_count; i++)
      {
        af->data[0][i] = 0.0;
        af->data[1][i] = 0.0;
      }
    p->audio_pos += sample_count;
    p->audio_track = g_list_append (p->audio_track, af);
  }
429 430 431
  if (!(c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
    sample_count = c->frame_size;

432 433
  /* then we encode as much as we can in a loop using the codec frame size */
  
434
  while (p->audio_pos - p->audio_read_pos > sample_count)
435 436
  {
    long i;
437 438
    int ret;
    int got_packet = 0;
439
    AVFrame *frame = alloc_audio_frame (c->sample_fmt, c->channel_layout,
440
                                        c->sample_rate, sample_count);
441

442 443
    switch (c->sample_fmt) {
      case AV_SAMPLE_FMT_FLT:
444
        for (i = 0; i < sample_count; i++)
445 446 447 448 449 450 451 452
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((float*)frame->data[0])[c->channels*i+0] = left;
          ((float*)frame->data[0])[c->channels*i+1] = right;
        }
        break;
      case AV_SAMPLE_FMT_FLTP:
453
        for (i = 0; i < sample_count; i++)
454 455 456 457 458 459 460
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((float*)frame->data[0])[i] = left;
          ((float*)frame->data[1])[i] = right;
        }
        break;
461
      case AV_SAMPLE_FMT_S16:
462
        for (i = 0; i < sample_count; i++)
463 464 465 466 467 468 469
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int16_t*)frame->data[0])[c->channels*i+0] = left * (1<<15);
          ((int16_t*)frame->data[0])[c->channels*i+1] = right * (1<<15);
        }
        break;
470
      case AV_SAMPLE_FMT_S32:
471
        for (i = 0; i < sample_count; i++)
472 473 474 475 476 477 478 479
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int32_t*)frame->data[0])[c->channels*i+0] = left * (1<<31);
          ((int32_t*)frame->data[0])[c->channels*i+1] = right * (1<<31);
        }
        break;
      case AV_SAMPLE_FMT_S32P:
480
        for (i = 0; i < sample_count; i++)
481 482 483 484 485 486
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int32_t*)frame->data[0])[i] = left * (1<<31);
          ((int32_t*)frame->data[1])[i] = right * (1<<31);
        }
487 488
        break;
      case AV_SAMPLE_FMT_S16P:
489
        for (i = 0; i < sample_count; i++)
490 491 492 493 494 495
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int16_t*)frame->data[0])[i] = left * (1<<15);
          ((int16_t*)frame->data[1])[i] = right * (1<<15);
        }
496 497 498 499 500
        break;
      default:
        fprintf (stderr, "eeeek unhandled audio format\n");
        break;
    }
501
    frame->pts = p->next_apts;
502
    p->next_apts += sample_count;
503 504 505

    av_frame_make_writable (frame);
    ret = avcodec_encode_audio2 (c, &pkt, frame, &got_packet);
506 507

    av_packet_rescale_ts (&pkt, st->codec->time_base, st->time_base);
508 509 510 511 512
    if (ret < 0) {
      fprintf (stderr, "Error encoding audio frame: %s\n", av_err2str (ret));
    }

    if (got_packet)
513
    {
514 515
      pkt.stream_index = st->index;
      av_interleaved_write_frame (oc, &pkt);
516
      av_free_packet (&pkt);
517 518 519
    }

    av_frame_free (&frame);
520
    p->audio_read_pos += sample_count;
521
  }
522 523 524 525 526 527 528 529 530 531 532
}

void
close_audio (Priv * p, AVFormatContext * oc, AVStream * st)
{
  avcodec_close (st->codec);

}

/* add a video output stream */
static AVStream *
533
add_video_stream (GeglProperties *o, AVFormatContext * oc, int codec_id)
534
{
535
  Priv *p = (Priv*)o->user_data;
536 537 538 539

  AVCodecContext *c;
  AVStream *st;

540
  st = avformat_new_stream (oc, NULL);
541 542
  if (!st)
    {
543
      fprintf (stderr, "Could not alloc stream %p %p %i\n", o, oc, codec_id);
544 545 546 547 548
      exit (1);
    }

  c = st->codec;
  c->codec_id = codec_id;
549
  c->codec_type = AVMEDIA_TYPE_VIDEO;
550
  /* put sample propeters */
551
  c->bit_rate = o->video_bit_rate * 1000;
552
#ifdef USE_FINE_GRAINED_FFMPEG
553 554
  c->rc_min_rate = o->video_bit_rate_min * 1000;
  c->rc_max_rate = o->video_bit_rate_max * 1000;
555 556
  if (o->video_bit_rate_tolerance >= 0)
    c->bit_rate_tolerance = o->video_bit_rate_tolerance * 1000;
557
#endif
558 559 560 561
  /* resolution must be a multiple of two */
  c->width = p->width;
  c->height = p->height;
  /* frames per second */
562
  st->time_base =(AVRational){1000, o->frame_rate * 1000};
563
  c->time_base = st->time_base;
564

565
  c->pix_fmt = AV_PIX_FMT_YUV420P;
566
  
567

568
  if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO)
569
    {
570
      c->max_b_frames = 2;
571
    }
572

573 574
  if (c->codec_id == AV_CODEC_ID_H264)
   {
575 576 577 578
     c->qcompress = 0.6;  // qcomp=0.6
     c->me_range = 16;    // me_range=16
     c->gop_size = 250;   // g=250
     c->max_b_frames = 3; // bf=3
579 580
   }

581 582 583
  if (o->video_bufsize)
    c->rc_buffer_size = o->video_bufsize * 1000;
#if USE_FINE_GRAINED_FFMPEG
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
  if (o->global_quality)
     c->global_quality = o->global_quality;
  if (o->qcompress != 0.0)
     c->qcompress = o->qcompress;
  if (o->qblur != 0.0)
     c->qblur = o->qblur;
  if (o->max_qdiff != 0)
     c->max_qdiff = o->max_qdiff;
  if (o->me_subpel_quality != 0)
     c->me_subpel_quality = o->me_subpel_quality;
  if (o->i_quant_factor != 0.0)
     c->i_quant_factor = o->i_quant_factor;
  if (o->i_quant_offset != 0.0)
     c->i_quant_offset = o->i_quant_offset;
  if (o->max_b_frames)
    c->max_b_frames = o->max_b_frames;
  if (o->me_range)
    c->me_range = o->me_range;
  if (o->noise_reduction)
    c->noise_reduction = o->noise_reduction;
  if (o->scenechange_threshold)
    c->scenechange_threshold = o->scenechange_threshold;
  if (o->trellis)
    c->trellis = o->trellis;
  if (o->qmin)
    c->qmin = o->qmin;
  if (o->qmax)
    c->qmax = o->qmax;
  if (o->gop_size)
    c->gop_size = o->gop_size;
  if (o->keyint_min)
    c->keyint_min = o->keyint_min;
616
#endif
617

618 619 620
   if (oc->oformat->flags & AVFMT_GLOBALHEADER)
     c->flags |= CODEC_FLAG_GLOBAL_HEADER;

621 622 623 624 625 626 627 628 629 630 631
  return st;
}


static AVFrame *
alloc_picture (int pix_fmt, int width, int height)
{
  AVFrame  *picture;
  uint8_t  *picture_buf;
  int       size;

632
  picture = av_frame_alloc ();
633 634
  if (!picture)
    return NULL;
635
  size = avpicture_get_size (pix_fmt, width + 1, height + 1);
636 637 638 639 640 641 642 643 644 645 646
  picture_buf = malloc (size);
  if (!picture_buf)
    {
      av_free (picture);
      return NULL;
    }
  avpicture_fill ((AVPicture *) picture, picture_buf, pix_fmt, width, height);
  return picture;
}

static void
647
open_video (GeglProperties *o, AVFormatContext * oc, AVStream * st)
648
{
649
  Priv           *p = (Priv*)o->user_data;
650 651
  AVCodec  *codec;
  AVCodecContext *c;
652
  AVDictionary *codec_options = {0};
653 654 655 656 657 658 659 660 661 662 663

  c = st->codec;

  /* find the video encoder */
  codec = avcodec_find_encoder (c->codec_id);
  if (!codec)
    {
      fprintf (stderr, "codec not found\n");
      exit (1);
    }

664 665 666 667 668 669 670 671 672 673
  if (codec->pix_fmts){
    int i = 0;
    c->pix_fmt = codec->pix_fmts[0];
    while (codec->pix_fmts[i] !=-1)
    {
      if (codec->pix_fmts[i] ==  AV_PIX_FMT_RGB24)
         c->pix_fmt = AV_PIX_FMT_RGB24;
      i++;
    }
  }
674 675 676 677
#if 0
  if (o->video_preset[0])
    av_dict_set (&codec_options, "preset", o->video_preset, 0);
#endif
678

679
  /* open the codec */
680
  if (avcodec_open2 (c, codec, &codec_options) < 0)
681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
    {
      fprintf (stderr, "could not open codec\n");
      exit (1);
    }

  p->video_outbuf = NULL;
  if (!(oc->oformat->flags & AVFMT_RAWPICTURE))
    {
      /* allocate output buffer */
      /* XXX: API change will be done */
      p->video_outbuf_size = 200000;
      p->video_outbuf = malloc (p->video_outbuf_size);
    }

  /* allocate the encoded raw picture */
  p->picture = alloc_picture (c->pix_fmt, c->width, c->height);
  if (!p->picture)
    {
      fprintf (stderr, "Could not allocate picture\n");
      exit (1);
    }

  /* if the output format is not YUV420P, then a temporary YUV420P
     picture is needed too. It is then converted to the required
     output format */
  p->tmp_picture = NULL;
707
  if (c->pix_fmt != AV_PIX_FMT_RGB24)
708
    {
709
      p->tmp_picture = alloc_picture (AV_PIX_FMT_RGB24, c->width, c->height);
710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735
      if (!p->tmp_picture)
        {
          fprintf (stderr, "Could not allocate temporary picture\n");
          exit (1);
        }
    }
}

static void
close_video (Priv * p, AVFormatContext * oc, AVStream * st)
{
  avcodec_close (st->codec);
  av_free (p->picture->data[0]);
  av_free (p->picture);
  if (p->tmp_picture)
    {
      av_free (p->tmp_picture->data[0]);
      av_free (p->tmp_picture);
    }
  av_free (p->video_outbuf);
}

#include "string.h"

/* prepare a dummy image */
static void
736
fill_rgb_image (GeglProperties *o,
Kevin Cozens's avatar
Kevin Cozens committed
737
                AVFrame *pict, int frame_index, int width, int height)
738
{
739
  Priv     *p = (Priv*)o->user_data;
740
  GeglRectangle rect={0,0,width,height};
741
  gegl_buffer_get (p->input, &rect, 1.0, babl_format ("R'G'B' u8"), pict->data[0], GEGL_AUTO_ROWSTRIDE, GEGL_ABYSS_NONE);
742 743 744
}

static void
745
write_video_frame (GeglProperties *o,
Kevin Cozens's avatar
Kevin Cozens committed
746
                   AVFormatContext *oc, AVStream *st)
747
{
748 749
  Priv           *p = (Priv*)o->user_data;
  int             out_size, ret;
750
  AVCodecContext *c;
751
  AVFrame        *picture_ptr;
752 753 754

  c = st->codec;

755
  if (c->pix_fmt != AV_PIX_FMT_RGB24)
756
    {
757
      struct SwsContext *img_convert_ctx;
758
      fill_rgb_image (o, p->tmp_picture, p->frame_count, c->width,
759
                      c->height);
760

761
      img_convert_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_RGB24,
762
                                       c->width, c->height, c->pix_fmt,
763 764 765 766 767 768 769 770 771
                                       SWS_BICUBIC, NULL, NULL, NULL);

      if (img_convert_ctx == NULL)
        {
          fprintf(stderr, "ff_save: Cannot initialize conversion context.");
        }
      else
        {
          sws_scale(img_convert_ctx,
772
                    (void*)p->tmp_picture->data,
773 774 775 776 777
                    p->tmp_picture->linesize,
                    0,
                    c->height,
                    p->picture->data,
                    p->picture->linesize);
778 779 780
         p->picture->format = c->pix_fmt;
         p->picture->width = c->width;
         p->picture->height = c->height;
781
         sws_freeContext (img_convert_ctx);
782
        }
783 784 785
    }
  else
    {
786
      fill_rgb_image (o, p->picture, p->frame_count, c->width, c->height);
787
    }
788

789
  picture_ptr      = p->picture;
790
  picture_ptr->pts = p->frame_count;
791 792 793 794 795 796 797 798

  if (oc->oformat->flags & AVFMT_RAWPICTURE)
    {
      /* raw video case. The API will change slightly in the near
         future for that */
      AVPacket  pkt;
      av_init_packet (&pkt);

799
      pkt.flags |= AV_PKT_FLAG_KEY;
800 801 802
      pkt.stream_index = st->index;
      pkt.data = (uint8_t *) picture_ptr;
      pkt.size = sizeof (AVPicture);
803
      pkt.pts = picture_ptr->pts;
804
      pkt.dts = picture_ptr->pts;
805
      av_packet_rescale_ts (&pkt, c->time_base, st->time_base);
806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822

      ret = av_write_frame (oc, &pkt);
    }
  else
    {
      /* encode the image */
      out_size =
        avcodec_encode_video (c,
                              p->video_outbuf,
                              p->video_outbuf_size, picture_ptr);

      /* if zero size, it means the image was buffered */
      if (out_size != 0)
        {
          AVPacket  pkt;
          av_init_packet (&pkt);
          if (c->coded_frame->key_frame)
823
            pkt.flags |= AV_PKT_FLAG_KEY;
824 825 826
          pkt.stream_index = st->index;
          pkt.data = p->video_outbuf;
          pkt.size = out_size;
827
          pkt.pts = picture_ptr->pts;
828
          pkt.dts = picture_ptr->pts;
829
          av_packet_rescale_ts (&pkt, c->time_base, st->time_base);
830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
          /* write the compressed frame in the media file */
          ret = av_write_frame (oc, &pkt);
        }
      else
        {
          ret = 0;
        }
    }
  if (ret != 0)
    {
      fprintf (stderr, "Error while writing video frame\n");
      exit (1);
    }
  p->frame_count++;
}

static int
847
tfile (GeglProperties *o)
848
{
849
  Priv *p = (Priv*)o->user_data;
850

851 852 853 854 855
  if (strcmp (o->container_format, "auto"))
    p->fmt = av_guess_format (o->container_format, o->path, NULL);
  else
    p->fmt = av_guess_format (NULL, o->path, NULL);

856 857 858 859 860
  if (!p->fmt)
    {
      fprintf (stderr,
               "ff_save couldn't deduce outputformat from file extension: using MPEG.\n%s",
               "");
861
      p->fmt = av_guess_format ("mpeg", NULL, NULL);
862
    }
863
  p->oc = avformat_alloc_context ();
864 865 866 867 868 869 870 871
  if (!p->oc)
    {
      fprintf (stderr, "memory error\n%s", "");
      return -1;
    }

  p->oc->oformat = p->fmt;

872
  snprintf (p->oc->filename, sizeof (p->oc->filename), "%s", o->path);
873 874 875

  p->video_st = NULL;
  p->audio_st = NULL;
876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908
  
  if (strcmp (o->video_codec, "auto"))
  {
    AVCodec *codec = avcodec_find_encoder_by_name (o->video_codec);
    p->fmt->video_codec = AV_CODEC_ID_NONE;
    if (codec)
      p->fmt->video_codec = codec->id;
    else
      {
        fprintf (stderr, "didn't find video encoder \"%s\"\navailable codecs: ", o->video_codec);
        while ((codec = av_codec_next (codec)))
          if (av_codec_is_encoder (codec) &&
              avcodec_get_type (codec->id) == AVMEDIA_TYPE_VIDEO)
          fprintf (stderr, "%s ", codec->name);
        fprintf (stderr, "\n");
      }
  }
  if (strcmp (o->audio_codec, "auto"))
  {
    AVCodec *codec = avcodec_find_encoder_by_name (o->audio_codec);
    p->fmt->audio_codec = AV_CODEC_ID_NONE;
    if (codec)
      p->fmt->audio_codec = codec->id;
    else
      {
        fprintf (stderr, "didn't find audio encoder \"%s\"\navailable codecs: ", o->audio_codec);
        while ((codec = av_codec_next (codec)))
          if (av_codec_is_encoder (codec) &&
              avcodec_get_type (codec->id) == AVMEDIA_TYPE_AUDIO)
                fprintf (stderr, "%s ", codec->name);
        fprintf (stderr, "\n");
      }
  }
909

910
  if (p->fmt->video_codec != AV_CODEC_ID_NONE)
911
    {
912
      p->video_st = add_video_stream (o, p->oc, p->fmt->video_codec);
913
    }
914
  if (p->fmt->audio_codec != AV_CODEC_ID_NONE)
915
    {
916
     p->audio_st = add_audio_stream (o, p->oc, p->fmt->audio_codec);
917 918 919 920
    }


  if (p->video_st)
921
    open_video (o, p->oc, p->video_st);
922

923
  if (p->audio_st)
924
    open_audio (o, p->oc, p->audio_st);
925

926 927
  av_dump_format (p->oc, 0, o->path, 1);

928
  if (avio_open (&p->oc->pb, o->path, AVIO_FLAG_WRITE) < 0)
929
    {
930
      fprintf (stderr, "couldn't open '%s'\n", o->path);
931 932 933
      return -1;
    }

934
  avformat_write_header (p->oc, NULL);
935 936 937
  return 0;
}

Kevin Cozens's avatar
Kevin Cozens committed
938 939 940
static gboolean
process (GeglOperation       *operation,
         GeglBuffer          *input,
941 942
         const GeglRectangle *result,
         gint                 level)
Kevin Cozens's avatar
Kevin Cozens committed
943
{
944
  GeglProperties *o = GEGL_PROPERTIES (operation);
945
  Priv *p = (Priv*)o->user_data;
946
  static gint     inited = 0;
Kevin Cozens's avatar
Kevin Cozens committed
947 948 949 950 951

  g_assert (input);

  if (p == NULL)
    init (o);
952
  p = (Priv*)o->user_data;
Kevin Cozens's avatar
Kevin Cozens committed
953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970

  p->width = result->width;
  p->height = result->height;
  p->input = input;

  if (!inited)
    {
      tfile (o);
      inited = 1;
    }

  write_video_frame (o, p->oc, p->video_st);
  if (p->audio_st)
    write_audio_frame (o, p->oc, p->audio_st);

  return  TRUE;
}

971 972 973 974 975
static void flush_audio (GeglProperties *o)
{
  Priv *p = (Priv*)o->user_data;

  int got_packet;
976 977
  if (!p->audio_st)
    return;
978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
  do
  {
    AVPacket  pkt = { 0 };
    int ret;
    got_packet = 0;
    av_init_packet (&pkt);
    ret = avcodec_encode_audio2 (p->audio_st->codec, &pkt, NULL, &got_packet);
    if (ret < 0)
      break;
    if (got_packet)
      {
        pkt.stream_index = p->audio_st->index;
        av_packet_rescale_ts (&pkt, p->audio_st->codec->time_base, p->audio_st->time_base);
        av_interleaved_write_frame (p->oc, &pkt);
        av_free_packet (&pkt);
      }
  } while (got_packet);
}

static void flush_video (GeglProperties *o)
{
  Priv *p = (Priv*)o->user_data;
  int got_packet = 0;
1001
  long ts = p->frame_count;
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
  do {
    AVPacket  pkt = { 0 };
    int ret;
    got_packet = 0;
    av_init_packet (&pkt);
    ret = avcodec_encode_video2 (p->video_st->codec, &pkt, NULL, &got_packet);
    if (ret < 0)
      return;
      
     if (got_packet)
     {
       pkt.stream_index = p->video_st->index;
1014 1015
       pkt.pts = ts;
       pkt.dts = ts++;
1016 1017 1018 1019 1020 1021 1022
       av_packet_rescale_ts (&pkt, p->video_st->codec->time_base, p->video_st->time_base);
       av_interleaved_write_frame (p->oc, &pkt);
       av_free_packet (&pkt);
     }
  } while (got_packet);
}

Kevin Cozens's avatar
Kevin Cozens committed
1023 1024 1025
static void
finalize (GObject *object)
{
1026 1027
  GeglProperties *o = GEGL_PROPERTIES (object);
  if (o->user_data)
Kevin Cozens's avatar
Kevin Cozens committed
1028
    {
1029
      Priv *p = (Priv*)o->user_data;
1030 1031
      flush_audio (o);
      flush_video (o);
1032

1033
      av_write_trailer (p->oc);
1034

1035 1036 1037 1038
      if (p->video_st)
        close_video (p, p->oc, p->video_st);
      if (p->audio_st)
        close_audio (p, p->oc, p->audio_st);
Kevin Cozens's avatar
Kevin Cozens committed
1039

1040 1041
      avio_closep (&p->oc->pb);
      avformat_free_context (p->oc);
1042

1043 1044
      g_free (o->user_data);
      o->user_data = NULL;
Kevin Cozens's avatar
Kevin Cozens committed
1045 1046 1047 1048 1049 1050 1051
    }

  G_OBJECT_CLASS (g_type_class_peek_parent (G_OBJECT_GET_CLASS (object)))->finalize (object);
}


static void
1052
gegl_op_class_init (GeglOpClass *klass)
Kevin Cozens's avatar
Kevin Cozens committed
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
{
  GeglOperationClass     *operation_class;
  GeglOperationSinkClass *sink_class;

  G_OBJECT_CLASS (klass)->finalize = finalize;

  operation_class = GEGL_OPERATION_CLASS (klass);
  sink_class      = GEGL_OPERATION_SINK_CLASS (klass);

  sink_class->process = process;
  sink_class->needs_full = TRUE;

1065 1066 1067 1068 1069
  gegl_operation_class_set_keys (operation_class,
    "name"        , "gegl:ff-save",
    "categories"  , "output:video",
    "description" , _("FFmpeg video output sink"),
    NULL);
Kevin Cozens's avatar
Kevin Cozens committed
1070 1071
}

1072
#endif