[OE-core] [PATCH] gst-ffmpeg: add PACKAGECONFIG for libav9 and patch from Gentoo
Martin Jansa
martin.jansa at gmail.com
Sat Aug 16 18:23:54 UTC 2014
On Fri, Aug 08, 2014 at 03:57:52PM +0200, Martin Jansa wrote:
> * apply the patch only when PACKAGECONFIG is selected, because the changes
> aren't backwards compatible
ping
> Signed-off-by: Martin Jansa <Martin.Jansa at gmail.com>
> ---
> .../gstreamer/gst-ffmpeg-0.10.13/libav-9.patch | 9304 ++++++++++++++++++++
> .../gstreamer/gst-ffmpeg_0.10.13.bb | 2 +
> 2 files changed, 9306 insertions(+)
> create mode 100644 meta/recipes-multimedia/gstreamer/gst-ffmpeg-0.10.13/libav-9.patch
>
> diff --git a/meta/recipes-multimedia/gstreamer/gst-ffmpeg-0.10.13/libav-9.patch b/meta/recipes-multimedia/gstreamer/gst-ffmpeg-0.10.13/libav-9.patch
> new file mode 100644
> index 0000000..9055b34
> --- /dev/null
> +++ b/meta/recipes-multimedia/gstreamer/gst-ffmpeg-0.10.13/libav-9.patch
> @@ -0,0 +1,9304 @@
> +Taken from gentoo patchset:
> +http://dev.gentoo.org/~tetromino/distfiles/gst-plugins-ffmpeg/gst-ffmpeg-0.10.13_p2012.11-libav-9-patches.tar.xz
> +
> +Upstream-Status: Pending
> +
> +Contains following changes, rebased to apply on top of our changes
> +0002-Fix-includes-for-systemwide-build.patch
> +0003-libav-Switch-to-non-deprecated-symbols.patch
> +0005-av-Update-for-some-constant-changes.patch
> +0006-av-Remove-palette-support-for-now.patch
> +0007-av-Port-remaining-simple-bits.patch
> +0008-av-Use-av_codec_is_-en-de-coder-API-instead-of-priva.patch
> +0009-avprotocol-Port-from-the-URL-protocol-handler-to-san.patch
> +0010-avdec-don-t-wait-for-keyframe.patch
> +
> +Following changes were skipped:
> +0001-Partially-revert-commit-0300801b.patch
> +0004-av-update-to-use-AVOption-variants.patch
> +0011-av_get_bits_per_sample_format-was-removed-in-libav-9.patch
> +
> +Signed-off-by: Martin Jansa <Martin.Jansa at gmail.com>
> +
> +diff -uNr gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpeg.c gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpeg.c
> +--- gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpeg.c 2011-10-31 11:14:03.000000000 +0100
> ++++ gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpeg.c 2014-08-08 15:26:07.872857555 +0200
> +@@ -151,9 +151,6 @@
> + #endif
> + gst_ffmpegaudioresample_register (plugin);
> +
> +- av_register_protocol2 (&gstreamer_protocol, sizeof (URLProtocol));
> +- av_register_protocol2 (&gstpipe_protocol, sizeof (URLProtocol));
> +-
> + /* Now we can return the pointer to the newly created Plugin object. */
> + return TRUE;
> + }
> +diff -uNr gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpeg.h gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpeg.h
> +--- gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpeg.h 2011-05-17 10:53:16.000000000 +0200
> ++++ gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpeg.h 2014-08-08 15:26:07.872857555 +0200
> +@@ -58,10 +58,13 @@
> + int gst_ffmpeg_avcodec_close (AVCodecContext *avctx);
> + int gst_ffmpeg_av_find_stream_info(AVFormatContext *ic);
> +
> +-G_END_DECLS
> ++int gst_ffmpegdata_open (GstPad * pad, int flags, AVIOContext ** context);
> ++int gst_ffmpegdata_close (AVIOContext * h);
> ++typedef struct _GstFFMpegPipe GstFFMpegPipe;
> ++int gst_ffmpeg_pipe_open (GstFFMpegPipe *ffpipe, int flags, AVIOContext ** context);
> ++int gst_ffmpeg_pipe_close (AVIOContext * h);
> +
> +-extern URLProtocol gstreamer_protocol;
> +-extern URLProtocol gstpipe_protocol;
> ++G_END_DECLS
> +
> + /* use GST_FFMPEG URL_STREAMHEADER with URL_WRONLY if the first
> + * buffer should be used as streamheader property on the pad's caps. */
> +diff -uNr gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegcfg.c gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegcfg.c
> +--- gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegcfg.c 2011-07-12 16:35:27.000000000 +0200
> ++++ gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegcfg.c 2014-08-08 15:24:17.899853612 +0200
> +@@ -147,7 +147,6 @@
> + {FF_DCT_FASTINT, "Fast Integer", "fastint"},
> + {FF_DCT_INT, "Accurate Integer", "int"},
> + {FF_DCT_MMX, "MMX", "mmx"},
> +- {FF_DCT_MLIB, "MLIB", "mlib"},
> + {FF_DCT_ALTIVEC, "ALTIVEC", "altivec"},
> + {FF_DCT_FAAN, "FAAN", "faan"},
> + {0, NULL, NULL},
> +@@ -173,8 +172,6 @@
> + {FF_IDCT_SIMPLE, "Simple", "simple"},
> + {FF_IDCT_SIMPLEMMX, "Simple MMX", "simplemmx"},
> + {FF_IDCT_LIBMPEG2MMX, "LIBMPEG2MMX", "libmpeg2mmx"},
> +- {FF_IDCT_PS2, "PS2", "ps2"},
> +- {FF_IDCT_MLIB, "MLIB", "mlib"},
> + {FF_IDCT_ARM, "ARM", "arm"},
> + {FF_IDCT_ALTIVEC, "ALTIVEC", "altivec"},
> + {FF_IDCT_SH4, "SH4", "sh4"},
> +@@ -263,16 +260,11 @@
> +
> + if (!ffmpeg_flags_type) {
> + static const GFlagsValue ffmpeg_flags[] = {
> +- {CODEC_FLAG_OBMC, "Use overlapped block motion compensation (h263+)",
> +- "obmc"},
> + {CODEC_FLAG_QSCALE, "Use fixed qscale", "qscale"},
> + {CODEC_FLAG_4MV, "Allow 4 MV per MB", "4mv"},
> +- {CODEC_FLAG_H263P_AIV, "H.263 alternative inter VLC", "aiv"},
> + {CODEC_FLAG_QPEL, "Quartel Pel Motion Compensation", "qpel"},
> + {CODEC_FLAG_GMC, "GMC", "gmc"},
> + {CODEC_FLAG_MV0, "Always try a MB with MV (0,0)", "mv0"},
> +- {CODEC_FLAG_PART,
> +- "Store MV, DC and AC coefficients in seperate partitions", "part"},
> + {CODEC_FLAG_LOOP_FILTER, "Loop filter", "loop-filter"},
> + {CODEC_FLAG_GRAY, "Only decode/encode grayscale", "gray"},
> + {CODEC_FLAG_NORMALIZE_AQP,
> +@@ -282,13 +274,9 @@
> + "global-headers"},
> + {CODEC_FLAG_AC_PRED, "H263 Advanced Intra Coding / MPEG4 AC prediction",
> + "aic"},
> +- {CODEC_FLAG_H263P_UMV, "Unlimited Motion Vector", "umv"},
> + {CODEC_FLAG_CBP_RD, "Rate Distoration Optimization for CBP", "cbp-rd"},
> + {CODEC_FLAG_QP_RD, "Rate Distoration Optimization for QP selection",
> + "qp-rd"},
> +- {CODEC_FLAG_H263P_SLICE_STRUCT, "H263 slice struct", "ss"},
> +- {CODEC_FLAG_SVCD_SCAN_OFFSET,
> +- "Reserve space for SVCD scan offset user data", "scanoffset"},
> + {CODEC_FLAG_CLOSED_GOP, "Closed GOP", "closedgop"},
> + {0, NULL, NULL},
> + };
> +diff -uNr gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegcodecmap.c gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegcodecmap.c
> +--- gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegcodecmap.c 2011-10-31 11:14:03.000000000 +0100
> ++++ gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegcodecmap.c 2014-08-08 15:31:30.968869139 +0200
> +@@ -25,8 +25,10 @@
> + #include <gst/gst.h>
> + #ifdef HAVE_FFMPEG_UNINSTALLED
> + #include <avcodec.h>
> ++#include <channel_layout.h>>
> + #else
> + #include <libavcodec/avcodec.h>
> ++#include <libavutil/channel_layout.h>
> + #endif
> + #include <string.h>
> +
> +@@ -35,43 +37,6 @@
> +
> + #include <gst/pbutils/codec-utils.h>
> +
> +-/*
> +- * Read a palette from a caps.
> +- */
> +-
> +-static void
> +-gst_ffmpeg_get_palette (const GstCaps * caps, AVCodecContext * context)
> +-{
> +- GstStructure *str = gst_caps_get_structure (caps, 0);
> +- const GValue *palette_v;
> +- const GstBuffer *palette;
> +-
> +- /* do we have a palette? */
> +- if ((palette_v = gst_structure_get_value (str, "palette_data")) && context) {
> +- palette = gst_value_get_buffer (palette_v);
> +- if (GST_BUFFER_SIZE (palette) >= AVPALETTE_SIZE) {
> +- if (context->palctrl)
> +- av_free (context->palctrl);
> +- context->palctrl = av_malloc (sizeof (AVPaletteControl));
> +- context->palctrl->palette_changed = 1;
> +- memcpy (context->palctrl->palette, GST_BUFFER_DATA (palette),
> +- AVPALETTE_SIZE);
> +- }
> +- }
> +-}
> +-
> +-static void
> +-gst_ffmpeg_set_palette (GstCaps * caps, AVCodecContext * context)
> +-{
> +- if (context->palctrl) {
> +- GstBuffer *palette = gst_buffer_new_and_alloc (AVPALETTE_SIZE);
> +-
> +- memcpy (GST_BUFFER_DATA (palette), context->palctrl->palette,
> +- AVPALETTE_SIZE);
> +- gst_caps_set_simple (caps, "palette_data", GST_TYPE_BUFFER, palette, NULL);
> +- }
> +-}
> +-
> + /* IMPORTANT: Keep this sorted by the ffmpeg channel masks */
> + static const struct
> + {
> +@@ -79,26 +44,26 @@
> + GstAudioChannelPosition gst;
> + } _ff_to_gst_layout[] = {
> + {
> +- CH_FRONT_LEFT, GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT}, {
> +- CH_FRONT_RIGHT, GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT}, {
> +- CH_FRONT_CENTER, GST_AUDIO_CHANNEL_POSITION_FRONT_CENTER}, {
> +- CH_LOW_FREQUENCY, GST_AUDIO_CHANNEL_POSITION_LFE}, {
> +- CH_BACK_LEFT, GST_AUDIO_CHANNEL_POSITION_REAR_LEFT}, {
> +- CH_BACK_RIGHT, GST_AUDIO_CHANNEL_POSITION_REAR_RIGHT}, {
> +- CH_FRONT_LEFT_OF_CENTER, GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER}, {
> +- CH_FRONT_RIGHT_OF_CENTER, GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER}, {
> +- CH_BACK_CENTER, GST_AUDIO_CHANNEL_POSITION_REAR_CENTER}, {
> +- CH_SIDE_LEFT, GST_AUDIO_CHANNEL_POSITION_SIDE_LEFT}, {
> +- CH_SIDE_RIGHT, GST_AUDIO_CHANNEL_POSITION_SIDE_RIGHT}, {
> +- CH_TOP_CENTER, GST_AUDIO_CHANNEL_POSITION_NONE}, {
> +- CH_TOP_FRONT_LEFT, GST_AUDIO_CHANNEL_POSITION_NONE}, {
> +- CH_TOP_FRONT_CENTER, GST_AUDIO_CHANNEL_POSITION_NONE}, {
> +- CH_TOP_FRONT_RIGHT, GST_AUDIO_CHANNEL_POSITION_NONE}, {
> +- CH_TOP_BACK_LEFT, GST_AUDIO_CHANNEL_POSITION_NONE}, {
> +- CH_TOP_BACK_CENTER, GST_AUDIO_CHANNEL_POSITION_NONE}, {
> +- CH_TOP_BACK_RIGHT, GST_AUDIO_CHANNEL_POSITION_NONE}, {
> +- CH_STEREO_LEFT, GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT}, {
> +- CH_STEREO_RIGHT, GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT}
> ++ AV_CH_FRONT_LEFT, GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT}, {
> ++ AV_CH_FRONT_RIGHT, GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT}, {
> ++ AV_CH_FRONT_CENTER, GST_AUDIO_CHANNEL_POSITION_FRONT_CENTER}, {
> ++ AV_CH_LOW_FREQUENCY, GST_AUDIO_CHANNEL_POSITION_LFE}, {
> ++ AV_CH_BACK_LEFT, GST_AUDIO_CHANNEL_POSITION_REAR_LEFT}, {
> ++ AV_CH_BACK_RIGHT, GST_AUDIO_CHANNEL_POSITION_REAR_RIGHT}, {
> ++ AV_CH_FRONT_LEFT_OF_CENTER, GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER}, {
> ++ AV_CH_FRONT_RIGHT_OF_CENTER, GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER}, {
> ++ AV_CH_BACK_CENTER, GST_AUDIO_CHANNEL_POSITION_REAR_CENTER}, {
> ++ AV_CH_SIDE_LEFT, GST_AUDIO_CHANNEL_POSITION_SIDE_LEFT}, {
> ++ AV_CH_SIDE_RIGHT, GST_AUDIO_CHANNEL_POSITION_SIDE_RIGHT}, {
> ++ AV_CH_TOP_CENTER, GST_AUDIO_CHANNEL_POSITION_NONE}, {
> ++ AV_CH_TOP_FRONT_LEFT, GST_AUDIO_CHANNEL_POSITION_NONE}, {
> ++ AV_CH_TOP_FRONT_CENTER, GST_AUDIO_CHANNEL_POSITION_NONE}, {
> ++ AV_CH_TOP_FRONT_RIGHT, GST_AUDIO_CHANNEL_POSITION_NONE}, {
> ++ AV_CH_TOP_BACK_LEFT, GST_AUDIO_CHANNEL_POSITION_NONE}, {
> ++ AV_CH_TOP_BACK_CENTER, GST_AUDIO_CHANNEL_POSITION_NONE}, {
> ++ AV_CH_TOP_BACK_RIGHT, GST_AUDIO_CHANNEL_POSITION_NONE}, {
> ++ AV_CH_STEREO_LEFT, GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT}, {
> ++ AV_CH_STEREO_RIGHT, GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT}
> + };
> +
> + static GstAudioChannelPosition *
> +@@ -342,8 +307,8 @@
> +
> + if (channel_layout == 0) {
> + const guint64 default_channel_set[] = {
> +- 0, 0, CH_LAYOUT_SURROUND, CH_LAYOUT_QUAD, CH_LAYOUT_5POINT0,
> +- CH_LAYOUT_5POINT1, 0, CH_LAYOUT_7POINT1
> ++ 0, 0, AV_CH_LAYOUT_SURROUND, AV_CH_LAYOUT_QUAD, AV_CH_LAYOUT_5POINT0,
> ++ AV_CH_LAYOUT_5POINT1, 0, AV_CH_LAYOUT_7POINT1
> + };
> +
> + switch (codec_id) {
> +@@ -1267,8 +1232,6 @@
> + case CODEC_ID_FLIC:
> + case CODEC_ID_VMDVIDEO:
> + case CODEC_ID_VMDAUDIO:
> +- case CODEC_ID_SONIC:
> +- case CODEC_ID_SONIC_LS:
> + case CODEC_ID_SNOW:
> + case CODEC_ID_VIXL:
> + case CODEC_ID_QPEG:
> +@@ -1689,11 +1652,6 @@
> + gst_buffer_unref (data);
> + }
> +
> +- /* palette */
> +- if (context) {
> +- gst_ffmpeg_set_palette (caps, context);
> +- }
> +-
> + GST_LOG ("caps for codec_id=%d: %" GST_PTR_FORMAT, codec_id, caps);
> +
> + } else {
> +@@ -1830,9 +1788,6 @@
> + "bpp", G_TYPE_INT, bpp,
> + "depth", G_TYPE_INT, depth,
> + "endianness", G_TYPE_INT, endianness, NULL);
> +- if (caps && context) {
> +- gst_ffmpeg_set_palette (caps, context);
> +- }
> + }
> + } else if (fmt) {
> + caps = gst_ff_vid_caps_new (context, codec_id, "video/x-raw-yuv",
> +@@ -1857,7 +1812,7 @@
> + */
> +
> + static GstCaps *
> +-gst_ffmpeg_smpfmt_to_caps (enum SampleFormat sample_fmt,
> ++gst_ffmpeg_smpfmt_to_caps (enum AVSampleFormat sample_fmt,
> + AVCodecContext * context, enum CodecID codec_id)
> + {
> + GstCaps *caps = NULL;
> +@@ -1867,22 +1822,22 @@
> + gboolean signedness = FALSE;
> +
> + switch (sample_fmt) {
> +- case SAMPLE_FMT_S16:
> ++ case AV_SAMPLE_FMT_S16:
> + signedness = TRUE;
> + bpp = 16;
> + break;
> +
> +- case SAMPLE_FMT_S32:
> ++ case AV_SAMPLE_FMT_S32:
> + signedness = TRUE;
> + bpp = 32;
> + break;
> +
> +- case SAMPLE_FMT_FLT:
> ++ case AV_SAMPLE_FMT_FLT:
> + integer = FALSE;
> + bpp = 32;
> + break;
> +
> +- case SAMPLE_FMT_DBL:
> ++ case AV_SAMPLE_FMT_DBL:
> + integer = FALSE;
> + bpp = 64;
> + break;
> +@@ -1941,12 +1896,12 @@
> + }
> + } else {
> + GstCaps *temp;
> +- enum SampleFormat i;
> ++ enum AVSampleFormat i;
> + AVCodecContext ctx = { 0, };
> +
> + ctx.channels = -1;
> + caps = gst_caps_new_empty ();
> +- for (i = 0; i <= SAMPLE_FMT_DBL; i++) {
> ++ for (i = 0; i <= AV_SAMPLE_FMT_DBL; i++) {
> + temp = gst_ffmpeg_smpfmt_to_caps (i, encode ? &ctx : NULL, codec_id);
> + if (temp != NULL) {
> + gst_caps_append (caps, temp);
> +@@ -2049,9 +2004,9 @@
> + gst_structure_get_int (structure, "endianness", &endianness)) {
> + if (endianness == G_BYTE_ORDER) {
> + if (width == 32)
> +- context->sample_fmt = SAMPLE_FMT_FLT;
> ++ context->sample_fmt = AV_SAMPLE_FMT_FLT;
> + else if (width == 64)
> +- context->sample_fmt = SAMPLE_FMT_DBL;
> ++ context->sample_fmt = AV_SAMPLE_FMT_DBL;
> + }
> + }
> + } else {
> +@@ -2062,9 +2017,9 @@
> + gst_structure_get_int (structure, "endianness", &endianness)) {
> + if ((endianness == G_BYTE_ORDER) && (signedness == TRUE)) {
> + if ((width == 16) && (depth == 16))
> +- context->sample_fmt = SAMPLE_FMT_S16;
> ++ context->sample_fmt = AV_SAMPLE_FMT_S16;
> + else if ((width == 32) && (depth == 32))
> +- context->sample_fmt = SAMPLE_FMT_S32;
> ++ context->sample_fmt = AV_SAMPLE_FMT_S32;
> + }
> + }
> + }
> +@@ -2190,7 +2145,6 @@
> + } else {
> + if (bpp == 8) {
> + context->pix_fmt = PIX_FMT_PAL8;
> +- gst_ffmpeg_get_palette (caps, context);
> + }
> + }
> + }
> +@@ -2576,7 +2530,6 @@
> + switch (codec_type) {
> + case AVMEDIA_TYPE_VIDEO:
> + gst_ffmpeg_caps_to_pixfmt (caps, context, codec_id == CODEC_ID_RAWVIDEO);
> +- gst_ffmpeg_get_palette (caps, context);
> + break;
> + case AVMEDIA_TYPE_AUDIO:
> + gst_ffmpeg_caps_to_smpfmt (caps, context, FALSE);
> +diff -uNr gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegcodecmap.c.orig gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegcodecmap.c.orig
> +--- gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegcodecmap.c.orig 1970-01-01 01:00:00.000000000 +0100
> ++++ gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegcodecmap.c.orig 2014-08-08 15:30:34.006867097 +0200
> +@@ -0,0 +1,3447 @@
> ++/* GStreamer
> ++ * Copyright (C) <1999> Erik Walthinsen <omega at cse.ogi.edu>
> ++ * This file:
> ++ * Copyright (c) 2002-2004 Ronald Bultje <rbultje at ronald.bitfreak.net>
> ++ *
> ++ * This library is free software; you can redistribute it and/or
> ++ * modify it under the terms of the GNU Library General Public
> ++ * License as published by the Free Software Foundation; either
> ++ * version 2 of the License, or (at your option) any later version.
> ++ *
> ++ * This library is distributed in the hope that it will be useful,
> ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
> ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> ++ * Library General Public License for more details.
> ++ *
> ++ * You should have received a copy of the GNU Library General Public
> ++ * License along with this library; if not, write to the
> ++ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
> ++ * Boston, MA 02111-1307, USA.
> ++ */
> ++
> ++#ifdef HAVE_CONFIG_H
> ++#include "config.h"
> ++#endif
> ++#include <gst/gst.h>
> ++#ifdef HAVE_FFMPEG_UNINSTALLED
> ++#include <avcodec.h>
> ++#include <channel_layout.h>>
> ++#else
> ++#include <libavcodec/avcodec.h>
> ++#include <libavutil/channel_layout.h>
> ++#endif
> ++#include <string.h>
> ++
> ++#include "gstffmpeg.h"
> ++#include "gstffmpegcodecmap.h"
> ++
> ++#include <gst/pbutils/codec-utils.h>
> ++
> ++/*
> ++ * Read a palette from a caps.
> ++ */
> ++
> ++static void
> ++gst_ffmpeg_get_palette (const GstCaps * caps, AVCodecContext * context)
> ++{
> ++ GstStructure *str = gst_caps_get_structure (caps, 0);
> ++ const GValue *palette_v;
> ++ const GstBuffer *palette;
> ++
> ++ /* do we have a palette? */
> ++ if ((palette_v = gst_structure_get_value (str, "palette_data")) && context) {
> ++ palette = gst_value_get_buffer (palette_v);
> ++ if (GST_BUFFER_SIZE (palette) >= AVPALETTE_SIZE) {
> ++ if (context->palctrl)
> ++ av_free (context->palctrl);
> ++ context->palctrl = av_malloc (sizeof (AVPaletteControl));
> ++ context->palctrl->palette_changed = 1;
> ++ memcpy (context->palctrl->palette, GST_BUFFER_DATA (palette),
> ++ AVPALETTE_SIZE);
> ++ }
> ++ }
> ++}
> ++
> ++static void
> ++gst_ffmpeg_set_palette (GstCaps * caps, AVCodecContext * context)
> ++{
> ++ if (context->palctrl) {
> ++ GstBuffer *palette = gst_buffer_new_and_alloc (AVPALETTE_SIZE);
> ++
> ++ memcpy (GST_BUFFER_DATA (palette), context->palctrl->palette,
> ++ AVPALETTE_SIZE);
> ++ gst_caps_set_simple (caps, "palette_data", GST_TYPE_BUFFER, palette, NULL);
> ++ }
> ++}
> ++
> ++/* IMPORTANT: Keep this sorted by the ffmpeg channel masks */
> ++static const struct
> ++{
> ++ guint64 ff;
> ++ GstAudioChannelPosition gst;
> ++} _ff_to_gst_layout[] = {
> ++ {
> ++ AV_CH_FRONT_LEFT, GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT}, {
> ++ AV_CH_FRONT_RIGHT, GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT}, {
> ++ AV_CH_FRONT_CENTER, GST_AUDIO_CHANNEL_POSITION_FRONT_CENTER}, {
> ++ AV_CH_LOW_FREQUENCY, GST_AUDIO_CHANNEL_POSITION_LFE}, {
> ++ AV_CH_BACK_LEFT, GST_AUDIO_CHANNEL_POSITION_REAR_LEFT}, {
> ++ AV_CH_BACK_RIGHT, GST_AUDIO_CHANNEL_POSITION_REAR_RIGHT}, {
> ++ AV_CH_FRONT_LEFT_OF_CENTER, GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER}, {
> ++ AV_CH_FRONT_RIGHT_OF_CENTER, GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER}, {
> ++ AV_CH_BACK_CENTER, GST_AUDIO_CHANNEL_POSITION_REAR_CENTER}, {
> ++ AV_CH_SIDE_LEFT, GST_AUDIO_CHANNEL_POSITION_SIDE_LEFT}, {
> ++ AV_CH_SIDE_RIGHT, GST_AUDIO_CHANNEL_POSITION_SIDE_RIGHT}, {
> ++ AV_CH_TOP_CENTER, GST_AUDIO_CHANNEL_POSITION_NONE}, {
> ++ AV_CH_TOP_FRONT_LEFT, GST_AUDIO_CHANNEL_POSITION_NONE}, {
> ++ AV_CH_TOP_FRONT_CENTER, GST_AUDIO_CHANNEL_POSITION_NONE}, {
> ++ AV_CH_TOP_FRONT_RIGHT, GST_AUDIO_CHANNEL_POSITION_NONE}, {
> ++ AV_CH_TOP_BACK_LEFT, GST_AUDIO_CHANNEL_POSITION_NONE}, {
> ++ AV_CH_TOP_BACK_CENTER, GST_AUDIO_CHANNEL_POSITION_NONE}, {
> ++ AV_CH_TOP_BACK_RIGHT, GST_AUDIO_CHANNEL_POSITION_NONE}, {
> ++ AV_CH_STEREO_LEFT, GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT}, {
> ++ AV_CH_STEREO_RIGHT, GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT}
> ++};
> ++
> ++static GstAudioChannelPosition *
> ++gst_ff_channel_layout_to_gst (guint64 channel_layout, guint channels)
> ++{
> ++ guint nchannels = 0, i, j;
> ++ GstAudioChannelPosition *pos = NULL;
> ++ gboolean none_layout = FALSE;
> ++
> ++ for (i = 0; i < 64; i++) {
> ++ if ((channel_layout & (G_GUINT64_CONSTANT (1) << i)) != 0) {
> ++ nchannels++;
> ++ }
> ++ }
> ++
> ++ if (channel_layout == 0) {
> ++ nchannels = channels;
> ++ none_layout = TRUE;
> ++ }
> ++
> ++ if (nchannels != channels) {
> ++ GST_ERROR ("Number of channels is different (%u != %u)", channels,
> ++ nchannels);
> ++ return NULL;
> ++ }
> ++
> ++ pos = g_new (GstAudioChannelPosition, nchannels);
> ++
> ++ for (i = 0, j = 0; i < G_N_ELEMENTS (_ff_to_gst_layout); i++) {
> ++ if ((channel_layout & _ff_to_gst_layout[i].ff) != 0) {
> ++ pos[j++] = _ff_to_gst_layout[i].gst;
> ++
> ++ if (_ff_to_gst_layout[i].gst == GST_AUDIO_CHANNEL_POSITION_NONE)
> ++ none_layout = TRUE;
> ++ }
> ++ }
> ++
> ++ if (j != nchannels) {
> ++ GST_WARNING ("Unknown channels in channel layout - assuming NONE layout");
> ++ none_layout = TRUE;
> ++ }
> ++
> ++ if (!none_layout && !gst_audio_check_channel_positions (pos, nchannels)) {
> ++ GST_ERROR ("Invalid channel layout %" G_GUINT64_FORMAT
> ++ " - assuming NONE layout", channel_layout);
> ++ none_layout = TRUE;
> ++ }
> ++
> ++ if (none_layout) {
> ++ if (nchannels == 1) {
> ++ pos[0] = GST_AUDIO_CHANNEL_POSITION_FRONT_MONO;
> ++ } else if (nchannels == 2) {
> ++ pos[0] = GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT;
> ++ pos[1] = GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT;
> ++ } else if (channel_layout == 0) {
> ++ g_free (pos);
> ++ pos = NULL;
> ++ } else {
> ++ for (i = 0; i < nchannels; i++)
> ++ pos[i] = GST_AUDIO_CHANNEL_POSITION_NONE;
> ++ }
> ++ }
> ++
> ++ if (nchannels == 1 && pos[0] == GST_AUDIO_CHANNEL_POSITION_FRONT_CENTER) {
> ++ GST_DEBUG ("mono common case; won't set channel positions");
> ++ g_free (pos);
> ++ pos = NULL;
> ++ } else if (nchannels == 2 && pos[0] == GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT
> ++ && pos[1] == GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT) {
> ++ GST_DEBUG ("stereo common case; won't set channel positions");
> ++ g_free (pos);
> ++ pos = NULL;
> ++ }
> ++
> ++ return pos;
> ++}
> ++
> ++/* this macro makes a caps width fixed or unfixed width/height
> ++ * properties depending on whether we've got a context.
> ++ *
> ++ * See below for why we use this.
> ++ *
> ++ * We should actually do this stuff at the end, like in riff-media.c,
> ++ * but I'm too lazy today. Maybe later.
> ++ */
> ++static GstCaps *
> ++gst_ff_vid_caps_new (AVCodecContext * context, enum CodecID codec_id,
> ++ const char *mimetype, const char *fieldname, ...)
> ++{
> ++ GstStructure *structure = NULL;
> ++ GstCaps *caps = NULL;
> ++ va_list var_args;
> ++ gint i;
> ++
> ++ GST_LOG ("context:%p, codec_id:%d, mimetype:%s", context, codec_id, mimetype);
> ++
> ++ /* fixed, non probing context */
> ++ if (context != NULL && context->width != -1) {
> ++ gint num, denom;
> ++
> ++ caps = gst_caps_new_simple (mimetype,
> ++ "width", G_TYPE_INT, context->width,
> ++ "height", G_TYPE_INT, context->height, NULL);
> ++
> ++ num = context->time_base.den / context->ticks_per_frame;
> ++ denom = context->time_base.num;
> ++
> ++ if (!denom) {
> ++ GST_LOG ("invalid framerate: %d/0, -> %d/1", num, num);
> ++ denom = 1;
> ++ }
> ++ if (gst_util_fraction_compare (num, denom, 1000, 1) > 0) {
> ++ GST_LOG ("excessive framerate: %d/%d, -> 0/1", num, denom);
> ++ num = 0;
> ++ denom = 1;
> ++ }
> ++ GST_LOG ("setting framerate: %d/%d", num, denom);
> ++ gst_caps_set_simple (caps,
> ++ "framerate", GST_TYPE_FRACTION, num, denom, NULL);
> ++ } else {
> ++ /* so we are after restricted caps in this case */
> ++ switch (codec_id) {
> ++ case CODEC_ID_H261:
> ++ {
> ++ caps = gst_caps_new_simple (mimetype,
> ++ "width", G_TYPE_INT, 352,
> ++ "height", G_TYPE_INT, 288,
> ++ "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, G_MAXINT, 1, NULL);
> ++ gst_caps_append (caps, gst_caps_new_simple (mimetype,
> ++ "width", G_TYPE_INT, 176,
> ++ "height", G_TYPE_INT, 144,
> ++ "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, G_MAXINT, 1, NULL));
> ++ break;
> ++ }
> ++ case CODEC_ID_H263:
> ++ {
> ++ /* 128x96, 176x144, 352x288, 704x576, and 1408x1152. slightly reordered
> ++ * because we want automatic negotiation to go as close to 320x240 as
> ++ * possible. */
> ++ const static gint widths[] = { 352, 704, 176, 1408, 128 };
> ++ const static gint heights[] = { 288, 576, 144, 1152, 96 };
> ++ GstCaps *temp;
> ++ gint n_sizes = G_N_ELEMENTS (widths);
> ++
> ++ caps = gst_caps_new_empty ();
> ++ for (i = 0; i < n_sizes; i++) {
> ++ temp = gst_caps_new_simple (mimetype,
> ++ "width", G_TYPE_INT, widths[i],
> ++ "height", G_TYPE_INT, heights[i],
> ++ "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, G_MAXINT, 1, NULL);
> ++
> ++ gst_caps_append (caps, temp);
> ++ }
> ++ break;
> ++ }
> ++ case CODEC_ID_DVVIDEO:
> ++ {
> ++ static struct
> ++ {
> ++ guint32 csp;
> ++ gint width, height;
> ++ gint par_n, par_d;
> ++ gint framerate_n, framerate_d;
> ++ } profiles[] = {
> ++ {
> ++ GST_MAKE_FOURCC ('Y', '4', '1', 'B'), 720, 480, 10, 11, 30000, 1001}, {
> ++ GST_MAKE_FOURCC ('Y', '4', '1', 'B'), 720, 480, 40, 33, 30000, 1001}, {
> ++ GST_MAKE_FOURCC ('I', '4', '2', '0'), 720, 576, 59, 54, 25, 1}, {
> ++ GST_MAKE_FOURCC ('I', '4', '2', '0'), 720, 576, 118, 81, 25, 1}, {
> ++ GST_MAKE_FOURCC ('Y', '4', '1', 'B'), 720, 576, 59, 54, 25, 1}, {
> ++ GST_MAKE_FOURCC ('Y', '4', '1', 'B'), 720, 576, 118, 81, 25, 1}
> ++ };
> ++ GstCaps *temp;
> ++ gint n_sizes = G_N_ELEMENTS (profiles);
> ++
> ++ caps = gst_caps_new_empty ();
> ++ for (i = 0; i < n_sizes; i++) {
> ++ temp = gst_caps_new_simple (mimetype,
> ++ "width", G_TYPE_INT, profiles[i].width,
> ++ "height", G_TYPE_INT, profiles[i].height,
> ++ "framerate", GST_TYPE_FRACTION, profiles[i].framerate_n,
> ++ profiles[i].framerate_d, "pixel-aspect-ratio", GST_TYPE_FRACTION,
> ++ profiles[i].par_n, profiles[i].par_d, NULL);
> ++
> ++ gst_caps_append (caps, temp);
> ++ }
> ++ break;
> ++ }
> ++ case CODEC_ID_DNXHD:
> ++ {
> ++ caps = gst_caps_new_simple (mimetype,
> ++ "width", G_TYPE_INT, 1920,
> ++ "height", G_TYPE_INT, 1080,
> ++ "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, G_MAXINT, 1, NULL);
> ++ gst_caps_append (caps, gst_caps_new_simple (mimetype,
> ++ "width", G_TYPE_INT, 1280,
> ++ "height", G_TYPE_INT, 720,
> ++ "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, G_MAXINT, 1, NULL));
> ++ break;
> ++ }
> ++ default:
> ++ break;
> ++ }
> ++ }
> ++
> ++ /* no fixed caps or special restrictions applied;
> ++ * default unfixed setting */
> ++ if (!caps) {
> ++ GST_DEBUG ("Creating default caps");
> ++ caps = gst_caps_new_simple (mimetype,
> ++ "width", GST_TYPE_INT_RANGE, 16, 4096,
> ++ "height", GST_TYPE_INT_RANGE, 16, 4096,
> ++ "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, G_MAXINT, 1, NULL);
> ++ }
> ++
> ++ for (i = 0; i < gst_caps_get_size (caps); i++) {
> ++ va_start (var_args, fieldname);
> ++ structure = gst_caps_get_structure (caps, i);
> ++ gst_structure_set_valist (structure, fieldname, var_args);
> ++ va_end (var_args);
> ++ }
> ++
> ++ return caps;
> ++}
> ++
> ++/* same for audio - now with channels/sample rate
> ++ */
> ++static GstCaps *
> ++gst_ff_aud_caps_new (AVCodecContext * context, enum CodecID codec_id,
> ++ const char *mimetype, const char *fieldname, ...)
> ++{
> ++ GstCaps *caps = NULL;
> ++ GstStructure *structure = NULL;
> ++ gint i;
> ++ va_list var_args;
> ++
> ++ /* fixed, non-probing context */
> ++ if (context != NULL && context->channels != -1) {
> ++ GstAudioChannelPosition *pos;
> ++ guint64 channel_layout = context->channel_layout;
> ++
> ++ if (channel_layout == 0) {
> ++ const guint64 default_channel_set[] = {
> ++ 0, 0, AV_CH_LAYOUT_SURROUND, AV_CH_LAYOUT_QUAD, AV_CH_LAYOUT_5POINT0,
> ++ AV_CH_LAYOUT_5POINT1, 0, AV_CH_LAYOUT_7POINT1
> ++ };
> ++
> ++ switch (codec_id) {
> ++ case CODEC_ID_EAC3:
> ++ case CODEC_ID_AC3:
> ++ case CODEC_ID_DTS:
> ++ if (context->channels > 0
> ++ && context->channels < G_N_ELEMENTS (default_channel_set))
> ++ channel_layout = default_channel_set[context->channels - 1];
> ++ break;
> ++ default:
> ++ break;
> ++ }
> ++ }
> ++
> ++ caps = gst_caps_new_simple (mimetype,
> ++ "rate", G_TYPE_INT, context->sample_rate,
> ++ "channels", G_TYPE_INT, context->channels, NULL);
> ++
> ++ pos = gst_ff_channel_layout_to_gst (channel_layout, context->channels);
> ++ if (pos != NULL) {
> ++ gst_audio_set_channel_positions (gst_caps_get_structure (caps, 0), pos);
> ++ g_free (pos);
> ++ }
> ++ } else {
> ++ gint maxchannels = 2;
> ++ const gint *rates = NULL;
> ++ gint n_rates = 0;
> ++
> ++ /* so we must be after restricted caps in this case */
> ++ switch (codec_id) {
> ++ case CODEC_ID_AAC:
> ++ case CODEC_ID_AAC_LATM:
> ++ case CODEC_ID_DTS:
> ++ maxchannels = 6;
> ++ break;
> ++ case CODEC_ID_MP2:
> ++ {
> ++ const static gint l_rates[] =
> ++ { 48000, 44100, 32000, 24000, 22050, 16000 };
> ++ n_rates = G_N_ELEMENTS (l_rates);
> ++ rates = l_rates;
> ++ break;
> ++ }
> ++ case CODEC_ID_EAC3:
> ++ case CODEC_ID_AC3:
> ++ {
> ++ const static gint l_rates[] = { 48000, 44100, 32000 };
> ++ maxchannels = 6;
> ++ n_rates = G_N_ELEMENTS (l_rates);
> ++ rates = l_rates;
> ++ break;
> ++ }
> ++ case CODEC_ID_ADPCM_G722:
> ++ {
> ++ const static gint l_rates[] = { 16000 };
> ++ n_rates = G_N_ELEMENTS (l_rates);
> ++ rates = l_rates;
> ++ maxchannels = 1;
> ++ break;
> ++ }
> ++ case CODEC_ID_ADPCM_G726:
> ++ {
> ++ const static gint l_rates[] = { 8000 };
> ++ n_rates = G_N_ELEMENTS (l_rates);
> ++ rates = l_rates;
> ++ maxchannels = 1;
> ++ break;
> ++ }
> ++ case CODEC_ID_ADPCM_SWF:
> ++ {
> ++ const static gint l_rates[] = { 11025, 22050, 44100 };
> ++ n_rates = G_N_ELEMENTS (l_rates);
> ++ rates = l_rates;
> ++ break;
> ++ }
> ++ case CODEC_ID_ROQ_DPCM:
> ++ {
> ++ const static gint l_rates[] = { 22050 };
> ++ n_rates = G_N_ELEMENTS (l_rates);
> ++ rates = l_rates;
> ++ break;
> ++ }
> ++ case CODEC_ID_AMR_NB:
> ++ {
> ++ const static gint l_rates[] = { 8000 };
> ++ maxchannels = 1;
> ++ n_rates = G_N_ELEMENTS (l_rates);
> ++ rates = l_rates;
> ++ break;
> ++ }
> ++ case CODEC_ID_AMR_WB:
> ++ {
> ++ const static gint l_rates[] = { 16000 };
> ++ maxchannels = 1;
> ++ n_rates = G_N_ELEMENTS (l_rates);
> ++ rates = l_rates;
> ++ break;
> ++ }
> ++ default:
> ++ break;
> ++ }
> ++
> ++ /* TODO: handle context->channel_layouts here to set
> ++ * the list of channel layouts supported by the encoder.
> ++ * Unfortunately no encoder uses this yet....
> ++ */
> ++ /* regardless of encode/decode, open up channels if applicable */
> ++ /* Until decoders/encoders expose the maximum number of channels
> ++ * they support, we whitelist them here. */
> ++ switch (codec_id) {
> ++ case CODEC_ID_WMAPRO:
> ++ case CODEC_ID_TRUEHD:
> ++ maxchannels = 8;
> ++ break;
> ++ default:
> ++ break;
> ++ }
> ++
> ++ if (maxchannels == 1)
> ++ caps = gst_caps_new_simple (mimetype,
> ++ "channels", G_TYPE_INT, maxchannels, NULL);
> ++ else
> ++ caps = gst_caps_new_simple (mimetype,
> ++ "channels", GST_TYPE_INT_RANGE, 1, maxchannels, NULL);
> ++ if (n_rates) {
> ++ GValue list = { 0, };
> ++ GstStructure *structure;
> ++
> ++ g_value_init (&list, GST_TYPE_LIST);
> ++ for (i = 0; i < n_rates; i++) {
> ++ GValue v = { 0, };
> ++
> ++ g_value_init (&v, G_TYPE_INT);
> ++ g_value_set_int (&v, rates[i]);
> ++ gst_value_list_append_value (&list, &v);
> ++ g_value_unset (&v);
> ++ }
> ++ structure = gst_caps_get_structure (caps, 0);
> ++ gst_structure_set_value (structure, "rate", &list);
> ++ g_value_unset (&list);
> ++ } else
> ++ gst_caps_set_simple (caps, "rate", GST_TYPE_INT_RANGE, 4000, 96000, NULL);
> ++ }
> ++
> ++ for (i = 0; i < gst_caps_get_size (caps); i++) {
> ++ va_start (var_args, fieldname);
> ++ structure = gst_caps_get_structure (caps, i);
> ++ gst_structure_set_valist (structure, fieldname, var_args);
> ++ va_end (var_args);
> ++ }
> ++
> ++ return caps;
> ++}
> ++
> ++/* Convert a FFMPEG codec ID and optional AVCodecContext
> ++ * to a GstCaps. If the context is ommitted, no fixed values
> ++ * for video/audio size will be included in the GstCaps
> ++ *
> ++ * CodecID is primarily meant for compressed data GstCaps!
> ++ *
> ++ * encode is a special parameter. gstffmpegdec will say
> ++ * FALSE, gstffmpegenc will say TRUE. The output caps
> ++ * depends on this, in such a way that it will be very
> ++ * specific, defined, fixed and correct caps for encoders,
> ++ * yet very wide, "forgiving" caps for decoders. Example
> ++ * for mp3: decode: audio/mpeg,mpegversion=1,layer=[1-3]
> ++ * but encode: audio/mpeg,mpegversion=1,layer=3,bitrate=x,
> ++ * rate=x,channels=x.
> ++ */
> ++
> ++GstCaps *
> ++gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
> ++ AVCodecContext * context, gboolean encode)
> ++{
> ++ GstCaps *caps = NULL;
> ++ gboolean buildcaps = FALSE;
> ++
> ++ GST_LOG ("codec_id:%d, context:%p, encode:%d", codec_id, context, encode);
> ++
> ++ switch (codec_id) {
> ++ case CODEC_ID_MPEG1VIDEO:
> ++ /* FIXME: bitrate */
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/mpeg",
> ++ "mpegversion", G_TYPE_INT, 1,
> ++ "systemstream", G_TYPE_BOOLEAN, FALSE, NULL);
> ++ break;
> ++
> ++ case CODEC_ID_MPEG2VIDEO:
> ++ if (encode) {
> ++ /* FIXME: bitrate */
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/mpeg",
> ++ "mpegversion", G_TYPE_INT, 2,
> ++ "systemstream", G_TYPE_BOOLEAN, FALSE, NULL);
> ++ } else {
> ++ /* decode both MPEG-1 and MPEG-2; width/height/fps are all in
> ++ * the MPEG video stream headers, so may be omitted from caps. */
> ++ caps = gst_caps_new_simple ("video/mpeg",
> ++ "mpegversion", GST_TYPE_INT_RANGE, 1, 2,
> ++ "systemstream", G_TYPE_BOOLEAN, FALSE, NULL);
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_MPEG2VIDEO_XVMC:
> ++ /* this is a special ID - don't need it in GStreamer, I think */
> ++ break;
> ++
> ++ case CODEC_ID_H263:
> ++ if (encode) {
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-h263",
> ++ "variant", G_TYPE_STRING, "itu",
> ++ "h263version", G_TYPE_STRING, "h263", NULL);
> ++ } else {
> ++ /* don't pass codec_id, we can decode other variants with the H263
> ++ * decoder that don't have specific size requirements
> ++ */
> ++ caps = gst_ff_vid_caps_new (context, CODEC_ID_NONE, "video/x-h263",
> ++ "variant", G_TYPE_STRING, "itu", NULL);
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_H263P:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-h263",
> ++ "variant", G_TYPE_STRING, "itu",
> ++ "h263version", G_TYPE_STRING, "h263p", NULL);
> ++ if (encode && context) {
> ++
> ++ gst_caps_set_simple (caps,
> ++ "annex-f", G_TYPE_BOOLEAN, context->flags & CODEC_FLAG_4MV,
> ++ "annex-j", G_TYPE_BOOLEAN, context->flags & CODEC_FLAG_LOOP_FILTER,
> ++ "annex-i", G_TYPE_BOOLEAN, context->flags & CODEC_FLAG_AC_PRED,
> ++ "annex-t", G_TYPE_BOOLEAN, context->flags & CODEC_FLAG_AC_PRED,
> ++ NULL);
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_H263I:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-intel-h263",
> ++ "variant", G_TYPE_STRING, "intel", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_H261:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-h261", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_RV10:
> ++ case CODEC_ID_RV20:
> ++ case CODEC_ID_RV30:
> ++ case CODEC_ID_RV40:
> ++ {
> ++ gint version;
> ++
> ++ switch (codec_id) {
> ++ case CODEC_ID_RV40:
> ++ version = 4;
> ++ break;
> ++ case CODEC_ID_RV30:
> ++ version = 3;
> ++ break;
> ++ case CODEC_ID_RV20:
> ++ version = 2;
> ++ break;
> ++ default:
> ++ version = 1;
> ++ break;
> ++ }
> ++
> ++ /* FIXME: context->sub_id must be filled in during decoding */
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-pn-realvideo",
> ++ "systemstream", G_TYPE_BOOLEAN, FALSE,
> ++ "rmversion", G_TYPE_INT, version, NULL);
> ++ if (context) {
> ++ gst_caps_set_simple (caps, "format", G_TYPE_INT, context->sub_id, NULL);
> ++ if (context->extradata_size >= 8) {
> ++ gst_caps_set_simple (caps,
> ++ "subformat", G_TYPE_INT, GST_READ_UINT32_BE (context->extradata),
> ++ NULL);
> ++ }
> ++ }
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_MP1:
> ++ /* FIXME: bitrate */
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/mpeg",
> ++ "mpegversion", G_TYPE_INT, 1, "layer", G_TYPE_INT, 1, NULL);
> ++ break;
> ++
> ++ case CODEC_ID_MP2:
> ++ /* FIXME: bitrate */
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/mpeg",
> ++ "mpegversion", G_TYPE_INT, 1, "layer", G_TYPE_INT, 2, NULL);
> ++ break;
> ++
> ++ case CODEC_ID_MP3:
> ++ if (encode) {
> ++ /* FIXME: bitrate */
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/mpeg",
> ++ "mpegversion", G_TYPE_INT, 1, "layer", G_TYPE_INT, 3, NULL);
> ++ } else {
> ++ /* Decodes MPEG-1 layer 1/2/3. Samplerate, channels et al are
> ++ * in the MPEG audio header, so may be omitted from caps. */
> ++ caps = gst_caps_new_simple ("audio/mpeg",
> ++ "mpegversion", G_TYPE_INT, 1,
> ++ "layer", GST_TYPE_INT_RANGE, 1, 3, NULL);
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_MUSEPACK7:
> ++ caps =
> ++ gst_ff_aud_caps_new (context, codec_id,
> ++ "audio/x-ffmpeg-parsed-musepack", "streamversion", G_TYPE_INT, 7,
> ++ NULL);
> ++ break;
> ++
> ++ case CODEC_ID_MUSEPACK8:
> ++ caps =
> ++ gst_ff_aud_caps_new (context, codec_id,
> ++ "audio/x-ffmpeg-parsed-musepack", "streamversion", G_TYPE_INT, 8,
> ++ NULL);
> ++ break;
> ++
> ++ case CODEC_ID_AC3:
> ++ /* FIXME: bitrate */
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-ac3", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_EAC3:
> ++ /* FIXME: bitrate */
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-eac3", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_TRUEHD:
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-true-hd", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_ATRAC1:
> ++ caps =
> ++ gst_ff_aud_caps_new (context, codec_id, "audio/x-vnd.sony.atrac1",
> ++ NULL);
> ++ break;
> ++
> ++ case CODEC_ID_ATRAC3:
> ++ caps =
> ++ gst_ff_aud_caps_new (context, codec_id, "audio/x-vnd.sony.atrac3",
> ++ NULL);
> ++ break;
> ++
> ++ case CODEC_ID_DTS:
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-dts", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_APE:
> ++ caps =
> ++ gst_ff_aud_caps_new (context, codec_id, "audio/x-ffmpeg-parsed-ape",
> ++ NULL);
> ++ if (context) {
> ++ gst_caps_set_simple (caps,
> ++ "depth", G_TYPE_INT, context->bits_per_coded_sample, NULL);
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_MLP:
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-mlp", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_IMC:
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-imc", NULL);
> ++ break;
> ++
> ++ /* MJPEG is normal JPEG, Motion-JPEG and Quicktime MJPEG-A. MJPEGB
> ++ * is Quicktime's MJPEG-B. LJPEG is lossless JPEG. I don't know what
> ++ * sp5x is, but it's apparently something JPEG... We don't separate
> ++ * between those in GStreamer. Should we (at least between MJPEG,
> ++ * MJPEG-B and sp5x decoding...)? */
> ++ case CODEC_ID_MJPEG:
> ++ case CODEC_ID_LJPEG:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "image/jpeg", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_SP5X:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/sp5x", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_MJPEGB:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-mjpeg-b", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_MPEG4:
> ++ if (encode && context != NULL) {
> ++ /* I'm not exactly sure what ffmpeg outputs... ffmpeg itself uses
> ++ * the AVI fourcc 'DIVX', but 'mp4v' for Quicktime... */
> ++ switch (context->codec_tag) {
> ++ case GST_MAKE_FOURCC ('D', 'I', 'V', 'X'):
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-divx",
> ++ "divxversion", G_TYPE_INT, 5, NULL);
> ++ break;
> ++ case GST_MAKE_FOURCC ('m', 'p', '4', 'v'):
> ++ default:
> ++ /* FIXME: bitrate */
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/mpeg",
> ++ "systemstream", G_TYPE_BOOLEAN, FALSE,
> ++ "mpegversion", G_TYPE_INT, 4, NULL);
> ++ break;
> ++ }
> ++ } else {
> ++ /* The trick here is to separate xvid, divx, mpeg4, 3ivx et al */
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/mpeg",
> ++ "mpegversion", G_TYPE_INT, 4,
> ++ "systemstream", G_TYPE_BOOLEAN, FALSE, NULL);
> ++ if (encode) {
> ++ gst_caps_append (caps, gst_ff_vid_caps_new (context, codec_id,
> ++ "video/x-divx", "divxversion", G_TYPE_INT, 5, NULL));
> ++ } else {
> ++ gst_caps_append (caps, gst_ff_vid_caps_new (context, codec_id,
> ++ "video/x-divx", "divxversion", GST_TYPE_INT_RANGE, 4, 5,
> ++ NULL));
> ++ gst_caps_append (caps, gst_ff_vid_caps_new (context, codec_id,
> ++ "video/x-xvid", NULL));
> ++ gst_caps_append (caps, gst_ff_vid_caps_new (context, codec_id,
> ++ "video/x-3ivx", NULL));
> ++ }
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_RAWVIDEO:
> ++ caps =
> ++ gst_ffmpeg_codectype_to_caps (AVMEDIA_TYPE_VIDEO, context, codec_id,
> ++ encode);
> ++ break;
> ++
> ++ case CODEC_ID_MSMPEG4V1:
> ++ case CODEC_ID_MSMPEG4V2:
> ++ case CODEC_ID_MSMPEG4V3:
> ++ {
> ++ gint version = 41 + codec_id - CODEC_ID_MSMPEG4V1;
> ++
> ++ /* encode-FIXME: bitrate */
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-msmpeg",
> ++ "msmpegversion", G_TYPE_INT, version, NULL);
> ++ if (!encode && codec_id == CODEC_ID_MSMPEG4V3) {
> ++ gst_caps_append (caps, gst_ff_vid_caps_new (context, codec_id,
> ++ "video/x-divx", "divxversion", G_TYPE_INT, 3, NULL));
> ++ }
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_WMV1:
> ++ case CODEC_ID_WMV2:
> ++ {
> ++ gint version = (codec_id == CODEC_ID_WMV1) ? 1 : 2;
> ++
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-wmv",
> ++ "wmvversion", G_TYPE_INT, version, NULL);
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_FLV1:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-flash-video",
> ++ "flvversion", G_TYPE_INT, 1, NULL);
> ++ break;
> ++
> ++ case CODEC_ID_SVQ1:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-svq",
> ++ "svqversion", G_TYPE_INT, 1, NULL);
> ++ break;
> ++
> ++ case CODEC_ID_SVQ3:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-svq",
> ++ "svqversion", G_TYPE_INT, 3, NULL);
> ++ break;
> ++
> ++ case CODEC_ID_DVAUDIO:
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-dv", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_DVVIDEO:
> ++ {
> ++ if (encode && context) {
> ++ guint32 fourcc;
> ++
> ++ switch (context->pix_fmt) {
> ++ case PIX_FMT_YUYV422:
> ++ fourcc = GST_MAKE_FOURCC ('Y', 'U', 'Y', '2');
> ++ break;
> ++ case PIX_FMT_YUV420P:
> ++ fourcc = GST_MAKE_FOURCC ('I', '4', '2', '0');
> ++ break;
> ++ case PIX_FMT_YUVA420P:
> ++ fourcc = GST_MAKE_FOURCC ('A', '4', '2', '0');
> ++ break;
> ++ case PIX_FMT_YUV411P:
> ++ fourcc = GST_MAKE_FOURCC ('Y', '4', '1', 'B');
> ++ break;
> ++ case PIX_FMT_YUV422P:
> ++ fourcc = GST_MAKE_FOURCC ('Y', '4', '2', 'B');
> ++ break;
> ++ case PIX_FMT_YUV410P:
> ++ fourcc = GST_MAKE_FOURCC ('Y', 'U', 'V', '9');
> ++ break;
> ++ default:
> ++ GST_WARNING
> ++ ("Couldnt' find fourcc for pixfmt %d, defaulting to I420",
> ++ context->pix_fmt);
> ++ fourcc = GST_MAKE_FOURCC ('I', '4', '2', '0');
> ++ break;
> ++ }
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-dv",
> ++ "systemstream", G_TYPE_BOOLEAN, FALSE,
> ++ "format", GST_TYPE_FOURCC, fourcc, NULL);
> ++ } else {
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-dv",
> ++ "systemstream", G_TYPE_BOOLEAN, FALSE, NULL);
> ++ }
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_WMAV1:
> ++ case CODEC_ID_WMAV2:
> ++ {
> ++ gint version = (codec_id == CODEC_ID_WMAV1) ? 1 : 2;
> ++
> ++ if (context) {
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-wma",
> ++ "wmaversion", G_TYPE_INT, version,
> ++ "block_align", G_TYPE_INT, context->block_align,
> ++ "bitrate", G_TYPE_INT, context->bit_rate, NULL);
> ++ } else {
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-wma",
> ++ "wmaversion", G_TYPE_INT, version,
> ++ "block_align", GST_TYPE_INT_RANGE, 0, G_MAXINT,
> ++ "bitrate", GST_TYPE_INT_RANGE, 0, G_MAXINT, NULL);
> ++ }
> ++ }
> ++ break;
> ++ case CODEC_ID_WMAPRO:
> ++ {
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-wma",
> ++ "wmaversion", G_TYPE_INT, 3, NULL);
> ++ break;
> ++ }
> ++
> ++ case CODEC_ID_WMAVOICE:
> ++ {
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-wms", NULL);
> ++ break;
> ++ }
> ++
> ++ case CODEC_ID_MACE3:
> ++ case CODEC_ID_MACE6:
> ++ {
> ++ gint version = (codec_id == CODEC_ID_MACE3) ? 3 : 6;
> ++
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-mace",
> ++ "maceversion", G_TYPE_INT, version, NULL);
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_HUFFYUV:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-huffyuv", NULL);
> ++ if (context) {
> ++ gst_caps_set_simple (caps,
> ++ "bpp", G_TYPE_INT, context->bits_per_coded_sample, NULL);
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_CYUV:
> ++ caps =
> ++ gst_ff_vid_caps_new (context, codec_id, "video/x-compressed-yuv",
> ++ NULL);
> ++ break;
> ++
> ++ case CODEC_ID_H264:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-h264", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_INDEO5:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-indeo",
> ++ "indeoversion", G_TYPE_INT, 5, NULL);
> ++ break;
> ++
> ++ case CODEC_ID_INDEO3:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-indeo",
> ++ "indeoversion", G_TYPE_INT, 3, NULL);
> ++ break;
> ++
> ++ case CODEC_ID_INDEO2:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-indeo",
> ++ "indeoversion", G_TYPE_INT, 2, NULL);
> ++ break;
> ++
> ++ case CODEC_ID_FLASHSV:
> ++ caps =
> ++ gst_ff_vid_caps_new (context, codec_id, "video/x-flash-screen", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_VP3:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-vp3", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_VP5:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-vp5", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_VP6:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-vp6", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_VP6F:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-vp6-flash", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_VP6A:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-vp6-alpha", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_VP8:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-vp8", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_THEORA:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-theora", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_AAC:
> ++ {
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/mpeg", NULL);
> ++
> ++ if (!encode) {
> ++ GValue arr = { 0, };
> ++ GValue item = { 0, };
> ++
> ++ g_value_init (&arr, GST_TYPE_LIST);
> ++ g_value_init (&item, G_TYPE_INT);
> ++ g_value_set_int (&item, 2);
> ++ gst_value_list_append_value (&arr, &item);
> ++ g_value_set_int (&item, 4);
> ++ gst_value_list_append_value (&arr, &item);
> ++ g_value_unset (&item);
> ++
> ++ gst_caps_set_value (caps, "mpegversion", &arr);
> ++ g_value_unset (&arr);
> ++
> ++ g_value_init (&arr, GST_TYPE_LIST);
> ++ g_value_init (&item, G_TYPE_STRING);
> ++ g_value_set_string (&item, "raw");
> ++ gst_value_list_append_value (&arr, &item);
> ++ g_value_set_string (&item, "adts");
> ++ gst_value_list_append_value (&arr, &item);
> ++ g_value_set_string (&item, "adif");
> ++ gst_value_list_append_value (&arr, &item);
> ++ g_value_unset (&item);
> ++
> ++ gst_caps_set_value (caps, "stream-format", &arr);
> ++ g_value_unset (&arr);
> ++ } else {
> ++ gst_caps_set_simple (caps, "mpegversion", G_TYPE_INT, 4,
> ++ "stream-format", G_TYPE_STRING, "raw",
> ++ "base-profile", G_TYPE_STRING, "lc", NULL);
> ++
> ++ if (context && context->extradata_size > 0)
> ++ gst_codec_utils_aac_caps_set_level_and_profile (caps,
> ++ context->extradata, context->extradata_size);
> ++ }
> ++
> ++ break;
> ++ }
> ++ case CODEC_ID_AAC_LATM: /* LATM/LOAS AAC syntax */
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/mpeg",
> ++ "mpegversion", G_TYPE_INT, 4, "stream-format", G_TYPE_STRING, "loas",
> ++ NULL);
> ++ break;
> ++
> ++ case CODEC_ID_ASV1:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-asus",
> ++ "asusversion", G_TYPE_INT, 1, NULL);
> ++ break;
> ++ case CODEC_ID_ASV2:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-asus",
> ++ "asusversion", G_TYPE_INT, 2, NULL);
> ++ break;
> ++
> ++ case CODEC_ID_FFV1:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-ffv",
> ++ "ffvversion", G_TYPE_INT, 1, NULL);
> ++ break;
> ++
> ++ case CODEC_ID_4XM:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-4xm", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_XAN_WC3:
> ++ case CODEC_ID_XAN_WC4:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-xan",
> ++ "wcversion", G_TYPE_INT, 3 - CODEC_ID_XAN_WC3 + codec_id, NULL);
> ++ break;
> ++
> ++ case CODEC_ID_CLJR:
> ++ caps =
> ++ gst_ff_vid_caps_new (context, codec_id,
> ++ "video/x-cirrus-logic-accupak", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_FRAPS:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-fraps", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_MDEC:
> ++ case CODEC_ID_ROQ:
> ++ case CODEC_ID_INTERPLAY_VIDEO:
> ++ buildcaps = TRUE;
> ++ break;
> ++
> ++ case CODEC_ID_VCR1:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-ati-vcr",
> ++ "vcrversion", G_TYPE_INT, 1, NULL);
> ++ break;
> ++
> ++ case CODEC_ID_RPZA:
> ++ caps =
> ++ gst_ff_vid_caps_new (context, codec_id, "video/x-apple-video", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_CINEPAK:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-cinepak", NULL);
> ++ break;
> ++
> ++ /* WS_VQA belogns here (order) */
> ++
> ++ case CODEC_ID_MSRLE:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-rle",
> ++ "layout", G_TYPE_STRING, "microsoft", NULL);
> ++ if (context) {
> ++ gst_caps_set_simple (caps,
> ++ "depth", G_TYPE_INT, (gint) context->bits_per_coded_sample, NULL);
> ++ } else {
> ++ gst_caps_set_simple (caps, "depth", GST_TYPE_INT_RANGE, 1, 64, NULL);
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_QTRLE:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-rle",
> ++ "layout", G_TYPE_STRING, "quicktime", NULL);
> ++ if (context) {
> ++ gst_caps_set_simple (caps,
> ++ "depth", G_TYPE_INT, (gint) context->bits_per_coded_sample, NULL);
> ++ } else {
> ++ gst_caps_set_simple (caps, "depth", GST_TYPE_INT_RANGE, 1, 64, NULL);
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_MSVIDEO1:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-msvideocodec",
> ++ "msvideoversion", G_TYPE_INT, 1, NULL);
> ++ break;
> ++
> ++ case CODEC_ID_WMV3:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-wmv",
> ++ "wmvversion", G_TYPE_INT, 3, NULL);
> ++ break;
> ++ case CODEC_ID_VC1:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-wmv",
> ++ "wmvversion", G_TYPE_INT, 3, "format", GST_TYPE_FOURCC,
> ++ GST_MAKE_FOURCC ('W', 'V', 'C', '1'), NULL);
> ++ break;
> ++ case CODEC_ID_QDM2:
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-qdm2", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_MSZH:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-mszh", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_ZLIB:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-zlib", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_TRUEMOTION1:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-truemotion",
> ++ "trueversion", G_TYPE_INT, 1, NULL);
> ++ break;
> ++ case CODEC_ID_TRUEMOTION2:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-truemotion",
> ++ "trueversion", G_TYPE_INT, 2, NULL);
> ++ break;
> ++
> ++ case CODEC_ID_ULTI:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-ultimotion",
> ++ NULL);
> ++ break;
> ++
> ++ case CODEC_ID_TSCC:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-camtasia", NULL);
> ++ if (context) {
> ++ gst_caps_set_simple (caps,
> ++ "depth", G_TYPE_INT, (gint) context->bits_per_coded_sample, NULL);
> ++ } else {
> ++ gst_caps_set_simple (caps, "depth", GST_TYPE_INT_RANGE, 8, 32, NULL);
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_KMVC:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-kmvc", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_NUV:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-nuv", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_GIF:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "image/gif", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_PNG:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "image/png", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_PPM:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "image/ppm", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_PBM:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "image/pbm", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_PAM:
> ++ caps =
> ++ gst_ff_vid_caps_new (context, codec_id, "image/x-portable-anymap",
> ++ NULL);
> ++ break;
> ++
> ++ case CODEC_ID_PGM:
> ++ caps =
> ++ gst_ff_vid_caps_new (context, codec_id, "image/x-portable-graymap",
> ++ NULL);
> ++ break;
> ++
> ++ case CODEC_ID_PCX:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "image/x-pcx", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_SGI:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "image/x-sgi", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_TARGA:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "image/x-tga", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_TIFF:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "image/tiff", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_SUNRAST:
> ++ caps =
> ++ gst_ff_vid_caps_new (context, codec_id, "image/x-sun-raster", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_SMC:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-smc", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_QDRAW:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-qdrw", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_DNXHD:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-dnxhd", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_MIMIC:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-mimic", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_VMNC:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-vmnc", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_TRUESPEECH:
> ++ caps =
> ++ gst_ff_aud_caps_new (context, codec_id, "audio/x-truespeech", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_QCELP:
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/qcelp", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_AMV:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-amv", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_AASC:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-aasc", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_LOCO:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-loco", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_ZMBV:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-zmbv", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_LAGARITH:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-lagarith", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_CSCD:
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-camstudio", NULL);
> ++ if (context) {
> ++ gst_caps_set_simple (caps,
> ++ "depth", G_TYPE_INT, (gint) context->bits_per_coded_sample, NULL);
> ++ } else {
> ++ gst_caps_set_simple (caps, "depth", GST_TYPE_INT_RANGE, 8, 32, NULL);
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_WS_VQA:
> ++ case CODEC_ID_IDCIN:
> ++ case CODEC_ID_8BPS:
> ++ case CODEC_ID_FLIC:
> ++ case CODEC_ID_VMDVIDEO:
> ++ case CODEC_ID_VMDAUDIO:
> ++ case CODEC_ID_SNOW:
> ++ case CODEC_ID_VIXL:
> ++ case CODEC_ID_QPEG:
> ++ case CODEC_ID_PGMYUV:
> ++ case CODEC_ID_FFVHUFF:
> ++ case CODEC_ID_WNV1:
> ++ case CODEC_ID_MP3ADU:
> ++ case CODEC_ID_MP3ON4:
> ++ case CODEC_ID_WESTWOOD_SND1:
> ++ case CODEC_ID_MMVIDEO:
> ++ case CODEC_ID_AVS:
> ++ case CODEC_ID_CAVS:
> ++ buildcaps = TRUE;
> ++ break;
> ++
> ++ /* weird quasi-codecs for the demuxers only */
> ++ case CODEC_ID_PCM_S16LE:
> ++ case CODEC_ID_PCM_S16BE:
> ++ case CODEC_ID_PCM_U16LE:
> ++ case CODEC_ID_PCM_U16BE:
> ++ case CODEC_ID_PCM_S8:
> ++ case CODEC_ID_PCM_U8:
> ++ {
> ++ gint width = 0, depth = 0, endianness = 0;
> ++ gboolean signedness = FALSE; /* blabla */
> ++
> ++ switch (codec_id) {
> ++ case CODEC_ID_PCM_S16LE:
> ++ width = 16;
> ++ depth = 16;
> ++ endianness = G_LITTLE_ENDIAN;
> ++ signedness = TRUE;
> ++ break;
> ++ case CODEC_ID_PCM_S16BE:
> ++ width = 16;
> ++ depth = 16;
> ++ endianness = G_BIG_ENDIAN;
> ++ signedness = TRUE;
> ++ break;
> ++ case CODEC_ID_PCM_U16LE:
> ++ width = 16;
> ++ depth = 16;
> ++ endianness = G_LITTLE_ENDIAN;
> ++ signedness = FALSE;
> ++ break;
> ++ case CODEC_ID_PCM_U16BE:
> ++ width = 16;
> ++ depth = 16;
> ++ endianness = G_BIG_ENDIAN;
> ++ signedness = FALSE;
> ++ break;
> ++ case CODEC_ID_PCM_S8:
> ++ width = 8;
> ++ depth = 8;
> ++ endianness = G_BYTE_ORDER;
> ++ signedness = TRUE;
> ++ break;
> ++ case CODEC_ID_PCM_U8:
> ++ width = 8;
> ++ depth = 8;
> ++ endianness = G_BYTE_ORDER;
> ++ signedness = FALSE;
> ++ break;
> ++ default:
> ++ g_assert (0); /* don't worry, we never get here */
> ++ break;
> ++ }
> ++
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-raw-int",
> ++ "width", G_TYPE_INT, width,
> ++ "depth", G_TYPE_INT, depth,
> ++ "endianness", G_TYPE_INT, endianness,
> ++ "signed", G_TYPE_BOOLEAN, signedness, NULL);
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_PCM_MULAW:
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-mulaw", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_PCM_ALAW:
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-alaw", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_ADPCM_G722:
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/G722", NULL);
> ++ if (context)
> ++ gst_caps_set_simple (caps,
> ++ "block_align", G_TYPE_INT, context->block_align,
> ++ "bitrate", G_TYPE_INT, context->bit_rate, NULL);
> ++ break;
> ++
> ++ case CODEC_ID_ADPCM_G726:
> ++ {
> ++ /* the G726 decoder can also handle G721 */
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-adpcm",
> ++ "layout", G_TYPE_STRING, "g726", NULL);
> ++ if (context)
> ++ gst_caps_set_simple (caps,
> ++ "block_align", G_TYPE_INT, context->block_align,
> ++ "bitrate", G_TYPE_INT, context->bit_rate, NULL);
> ++
> ++ if (!encode) {
> ++ gst_caps_append (caps, gst_caps_new_simple ("audio/x-adpcm",
> ++ "layout", G_TYPE_STRING, "g721",
> ++ "channels", G_TYPE_INT, 1, "rate", G_TYPE_INT, 8000, NULL));
> ++ }
> ++ break;
> ++ }
> ++ case CODEC_ID_ADPCM_IMA_QT:
> ++ case CODEC_ID_ADPCM_IMA_WAV:
> ++ case CODEC_ID_ADPCM_IMA_DK3:
> ++ case CODEC_ID_ADPCM_IMA_DK4:
> ++ case CODEC_ID_ADPCM_IMA_WS:
> ++ case CODEC_ID_ADPCM_IMA_SMJPEG:
> ++ case CODEC_ID_ADPCM_IMA_AMV:
> ++ case CODEC_ID_ADPCM_IMA_ISS:
> ++ case CODEC_ID_ADPCM_IMA_EA_EACS:
> ++ case CODEC_ID_ADPCM_IMA_EA_SEAD:
> ++ case CODEC_ID_ADPCM_MS:
> ++ case CODEC_ID_ADPCM_4XM:
> ++ case CODEC_ID_ADPCM_XA:
> ++ case CODEC_ID_ADPCM_ADX:
> ++ case CODEC_ID_ADPCM_EA:
> ++ case CODEC_ID_ADPCM_CT:
> ++ case CODEC_ID_ADPCM_SWF:
> ++ case CODEC_ID_ADPCM_YAMAHA:
> ++ case CODEC_ID_ADPCM_SBPRO_2:
> ++ case CODEC_ID_ADPCM_SBPRO_3:
> ++ case CODEC_ID_ADPCM_SBPRO_4:
> ++ case CODEC_ID_ADPCM_EA_R1:
> ++ case CODEC_ID_ADPCM_EA_R2:
> ++ case CODEC_ID_ADPCM_EA_R3:
> ++ case CODEC_ID_ADPCM_EA_MAXIS_XA:
> ++ case CODEC_ID_ADPCM_EA_XAS:
> ++ case CODEC_ID_ADPCM_THP:
> ++ {
> ++ const gchar *layout = NULL;
> ++
> ++ switch (codec_id) {
> ++ case CODEC_ID_ADPCM_IMA_QT:
> ++ layout = "quicktime";
> ++ break;
> ++ case CODEC_ID_ADPCM_IMA_WAV:
> ++ layout = "dvi";
> ++ break;
> ++ case CODEC_ID_ADPCM_IMA_DK3:
> ++ layout = "dk3";
> ++ break;
> ++ case CODEC_ID_ADPCM_IMA_DK4:
> ++ layout = "dk4";
> ++ break;
> ++ case CODEC_ID_ADPCM_IMA_WS:
> ++ layout = "westwood";
> ++ break;
> ++ case CODEC_ID_ADPCM_IMA_SMJPEG:
> ++ layout = "smjpeg";
> ++ break;
> ++ case CODEC_ID_ADPCM_IMA_AMV:
> ++ layout = "amv";
> ++ break;
> ++ case CODEC_ID_ADPCM_IMA_ISS:
> ++ layout = "iss";
> ++ break;
> ++ case CODEC_ID_ADPCM_IMA_EA_EACS:
> ++ layout = "ea-eacs";
> ++ break;
> ++ case CODEC_ID_ADPCM_IMA_EA_SEAD:
> ++ layout = "ea-sead";
> ++ break;
> ++ case CODEC_ID_ADPCM_MS:
> ++ layout = "microsoft";
> ++ break;
> ++ case CODEC_ID_ADPCM_4XM:
> ++ layout = "4xm";
> ++ break;
> ++ case CODEC_ID_ADPCM_XA:
> ++ layout = "xa";
> ++ break;
> ++ case CODEC_ID_ADPCM_ADX:
> ++ layout = "adx";
> ++ break;
> ++ case CODEC_ID_ADPCM_EA:
> ++ layout = "ea";
> ++ break;
> ++ case CODEC_ID_ADPCM_CT:
> ++ layout = "ct";
> ++ break;
> ++ case CODEC_ID_ADPCM_SWF:
> ++ layout = "swf";
> ++ break;
> ++ case CODEC_ID_ADPCM_YAMAHA:
> ++ layout = "yamaha";
> ++ break;
> ++ case CODEC_ID_ADPCM_SBPRO_2:
> ++ layout = "sbpro2";
> ++ break;
> ++ case CODEC_ID_ADPCM_SBPRO_3:
> ++ layout = "sbpro3";
> ++ break;
> ++ case CODEC_ID_ADPCM_SBPRO_4:
> ++ layout = "sbpro4";
> ++ break;
> ++ case CODEC_ID_ADPCM_EA_R1:
> ++ layout = "ea-r1";
> ++ break;
> ++ case CODEC_ID_ADPCM_EA_R2:
> ++ layout = "ea-r3";
> ++ break;
> ++ case CODEC_ID_ADPCM_EA_R3:
> ++ layout = "ea-r3";
> ++ break;
> ++ case CODEC_ID_ADPCM_EA_MAXIS_XA:
> ++ layout = "ea-maxis-xa";
> ++ break;
> ++ case CODEC_ID_ADPCM_EA_XAS:
> ++ layout = "ea-xas";
> ++ break;
> ++ case CODEC_ID_ADPCM_THP:
> ++ layout = "thp";
> ++ break;
> ++ default:
> ++ g_assert (0); /* don't worry, we never get here */
> ++ break;
> ++ }
> ++
> ++ /* FIXME: someone please check whether we need additional properties
> ++ * in this caps definition. */
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-adpcm",
> ++ "layout", G_TYPE_STRING, layout, NULL);
> ++ if (context)
> ++ gst_caps_set_simple (caps,
> ++ "block_align", G_TYPE_INT, context->block_align,
> ++ "bitrate", G_TYPE_INT, context->bit_rate, NULL);
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_AMR_NB:
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/AMR", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_AMR_WB:
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/AMR-WB", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_GSM:
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-gsm", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_GSM_MS:
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/ms-gsm", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_NELLYMOSER:
> ++ caps =
> ++ gst_ff_aud_caps_new (context, codec_id, "audio/x-nellymoser", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_SIPR:
> ++ {
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-sipro", NULL);
> ++ if (context) {
> ++ gst_caps_set_simple (caps,
> ++ "leaf_size", G_TYPE_INT, context->block_align,
> ++ "bitrate", G_TYPE_INT, context->bit_rate, NULL);
> ++ }
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_RA_144:
> ++ case CODEC_ID_RA_288:
> ++ case CODEC_ID_COOK:
> ++ {
> ++ gint version = 0;
> ++
> ++ switch (codec_id) {
> ++ case CODEC_ID_RA_144:
> ++ version = 1;
> ++ break;
> ++ case CODEC_ID_RA_288:
> ++ version = 2;
> ++ break;
> ++ case CODEC_ID_COOK:
> ++ version = 8;
> ++ break;
> ++ default:
> ++ break;
> ++ }
> ++
> ++ /* FIXME: properties? */
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-pn-realaudio",
> ++ "raversion", G_TYPE_INT, version, NULL);
> ++ if (context) {
> ++ gst_caps_set_simple (caps,
> ++ "leaf_size", G_TYPE_INT, context->block_align,
> ++ "bitrate", G_TYPE_INT, context->bit_rate, NULL);
> ++ }
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_ROQ_DPCM:
> ++ case CODEC_ID_INTERPLAY_DPCM:
> ++ case CODEC_ID_XAN_DPCM:
> ++ case CODEC_ID_SOL_DPCM:
> ++ {
> ++ const gchar *layout = NULL;
> ++
> ++ switch (codec_id) {
> ++ case CODEC_ID_ROQ_DPCM:
> ++ layout = "roq";
> ++ break;
> ++ case CODEC_ID_INTERPLAY_DPCM:
> ++ layout = "interplay";
> ++ break;
> ++ case CODEC_ID_XAN_DPCM:
> ++ layout = "xan";
> ++ break;
> ++ case CODEC_ID_SOL_DPCM:
> ++ layout = "sol";
> ++ break;
> ++ default:
> ++ g_assert (0); /* don't worry, we never get here */
> ++ break;
> ++ }
> ++
> ++ /* FIXME: someone please check whether we need additional properties
> ++ * in this caps definition. */
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-dpcm",
> ++ "layout", G_TYPE_STRING, layout, NULL);
> ++ if (context)
> ++ gst_caps_set_simple (caps,
> ++ "block_align", G_TYPE_INT, context->block_align,
> ++ "bitrate", G_TYPE_INT, context->bit_rate, NULL);
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_SHORTEN:
> ++ caps = gst_caps_new_simple ("audio/x-shorten", NULL);
> ++ break;
> ++
> ++ case CODEC_ID_ALAC:
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-alac", NULL);
> ++ if (context) {
> ++ gst_caps_set_simple (caps,
> ++ "samplesize", G_TYPE_INT, context->bits_per_coded_sample, NULL);
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_FLAC:
> ++ /* Note that ffmpeg has no encoder yet, but just for safety. In the
> ++ * encoder case, we want to add things like samplerate, channels... */
> ++ if (!encode) {
> ++ caps = gst_caps_new_simple ("audio/x-flac", NULL);
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_DVD_SUBTITLE:
> ++ case CODEC_ID_DVB_SUBTITLE:
> ++ caps = NULL;
> ++ break;
> ++ case CODEC_ID_BMP:
> ++ caps = gst_caps_new_simple ("image/bmp", NULL);
> ++ break;
> ++ case CODEC_ID_TTA:
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-tta", NULL);
> ++ if (context) {
> ++ gst_caps_set_simple (caps,
> ++ "samplesize", G_TYPE_INT, context->bits_per_coded_sample, NULL);
> ++ }
> ++ break;
> ++ case CODEC_ID_TWINVQ:
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-twin-vq", NULL);
> ++ break;
> ++ default:
> ++ GST_DEBUG ("Unknown codec ID %d, please add mapping here", codec_id);
> ++ break;
> ++ }
> ++
> ++ if (buildcaps) {
> ++ AVCodec *codec;
> ++
> ++ if ((codec = avcodec_find_decoder (codec_id)) ||
> ++ (codec = avcodec_find_encoder (codec_id))) {
> ++ gchar *mime = NULL;
> ++
> ++ GST_LOG ("Could not create stream format caps for %s", codec->name);
> ++
> ++ switch (codec->type) {
> ++ case AVMEDIA_TYPE_VIDEO:
> ++ mime = g_strdup_printf ("video/x-gst_ff-%s", codec->name);
> ++ caps = gst_ff_vid_caps_new (context, codec_id, mime, NULL);
> ++ g_free (mime);
> ++ break;
> ++ case AVMEDIA_TYPE_AUDIO:
> ++ mime = g_strdup_printf ("audio/x-gst_ff-%s", codec->name);
> ++ caps = gst_ff_aud_caps_new (context, codec_id, mime, NULL);
> ++ if (context)
> ++ gst_caps_set_simple (caps,
> ++ "block_align", G_TYPE_INT, context->block_align,
> ++ "bitrate", G_TYPE_INT, context->bit_rate, NULL);
> ++ g_free (mime);
> ++ break;
> ++ default:
> ++ break;
> ++ }
> ++ }
> ++ }
> ++
> ++ if (caps != NULL) {
> ++
> ++ /* set private data */
> ++ if (context && context->extradata_size > 0) {
> ++ GstBuffer *data = gst_buffer_new_and_alloc (context->extradata_size);
> ++
> ++ memcpy (GST_BUFFER_DATA (data), context->extradata,
> ++ context->extradata_size);
> ++ gst_caps_set_simple (caps, "codec_data", GST_TYPE_BUFFER, data, NULL);
> ++ gst_buffer_unref (data);
> ++ }
> ++
> ++ /* palette */
> ++ if (context) {
> ++ gst_ffmpeg_set_palette (caps, context);
> ++ }
> ++
> ++ GST_LOG ("caps for codec_id=%d: %" GST_PTR_FORMAT, codec_id, caps);
> ++
> ++ } else {
> ++ GST_LOG ("No caps found for codec_id=%d", codec_id);
> ++ }
> ++
> ++ return caps;
> ++}
> ++
> ++/* Convert a FFMPEG Pixel Format and optional AVCodecContext
> ++ * to a GstCaps. If the context is ommitted, no fixed values
> ++ * for video/audio size will be included in the GstCaps
> ++ *
> ++ * See below for usefullness
> ++ */
> ++
> ++GstCaps *
> ++gst_ffmpeg_pixfmt_to_caps (enum PixelFormat pix_fmt, AVCodecContext * context,
> ++ enum CodecID codec_id)
> ++{
> ++ GstCaps *caps = NULL;
> ++
> ++ int bpp = 0, depth = 0, endianness = 0;
> ++ gulong g_mask = 0, r_mask = 0, b_mask = 0, a_mask = 0;
> ++ guint32 fmt = 0;
> ++
> ++ switch (pix_fmt) {
> ++ case PIX_FMT_YUVJ420P:
> ++ case PIX_FMT_YUV420P:
> ++ fmt = GST_MAKE_FOURCC ('I', '4', '2', '0');
> ++ break;
> ++ case PIX_FMT_YUVA420P:
> ++ fmt = GST_MAKE_FOURCC ('A', '4', '2', '0');
> ++ break;
> ++ case PIX_FMT_YUYV422:
> ++ fmt = GST_MAKE_FOURCC ('Y', 'U', 'Y', '2');
> ++ break;
> ++ case PIX_FMT_RGB24:
> ++ bpp = depth = 24;
> ++ endianness = G_BIG_ENDIAN;
> ++ r_mask = 0xff0000;
> ++ g_mask = 0x00ff00;
> ++ b_mask = 0x0000ff;
> ++ break;
> ++ case PIX_FMT_BGR24:
> ++ bpp = depth = 24;
> ++ endianness = G_BIG_ENDIAN;
> ++ r_mask = 0x0000ff;
> ++ g_mask = 0x00ff00;
> ++ b_mask = 0xff0000;
> ++ break;
> ++ case PIX_FMT_YUVJ422P:
> ++ case PIX_FMT_YUV422P:
> ++ fmt = GST_MAKE_FOURCC ('Y', '4', '2', 'B');
> ++ break;
> ++ case PIX_FMT_YUVJ444P:
> ++ case PIX_FMT_YUV444P:
> ++ fmt = GST_MAKE_FOURCC ('Y', '4', '4', '4');
> ++ break;
> ++ case PIX_FMT_RGB32:
> ++ bpp = 32;
> ++ depth = 32;
> ++ endianness = G_BIG_ENDIAN;
> ++#if (G_BYTE_ORDER == G_BIG_ENDIAN)
> ++ r_mask = 0x00ff0000;
> ++ g_mask = 0x0000ff00;
> ++ b_mask = 0x000000ff;
> ++ a_mask = 0xff000000;
> ++#else
> ++ r_mask = 0x0000ff00;
> ++ g_mask = 0x00ff0000;
> ++ b_mask = 0xff000000;
> ++ a_mask = 0x000000ff;
> ++#endif
> ++ break;
> ++ case PIX_FMT_YUV410P:
> ++ fmt = GST_MAKE_FOURCC ('Y', 'U', 'V', '9');
> ++ break;
> ++ case PIX_FMT_YUV411P:
> ++ fmt = GST_MAKE_FOURCC ('Y', '4', '1', 'B');
> ++ break;
> ++ case PIX_FMT_RGB565:
> ++ bpp = depth = 16;
> ++ endianness = G_BYTE_ORDER;
> ++ r_mask = 0xf800;
> ++ g_mask = 0x07e0;
> ++ b_mask = 0x001f;
> ++ break;
> ++ case PIX_FMT_RGB555:
> ++ bpp = 16;
> ++ depth = 15;
> ++ endianness = G_BYTE_ORDER;
> ++ r_mask = 0x7c00;
> ++ g_mask = 0x03e0;
> ++ b_mask = 0x001f;
> ++ break;
> ++ case PIX_FMT_PAL8:
> ++ bpp = depth = 8;
> ++ endianness = G_BYTE_ORDER;
> ++ break;
> ++ case PIX_FMT_GRAY8:
> ++ bpp = depth = 8;
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-raw-gray",
> ++ "bpp", G_TYPE_INT, bpp, "depth", G_TYPE_INT, depth, NULL);
> ++ break;
> ++ default:
> ++ /* give up ... */
> ++ break;
> ++ }
> ++
> ++ if (caps == NULL) {
> ++ if (bpp != 0) {
> ++ if (r_mask != 0) {
> ++ if (a_mask) {
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-raw-rgb",
> ++ "bpp", G_TYPE_INT, bpp,
> ++ "depth", G_TYPE_INT, depth,
> ++ "red_mask", G_TYPE_INT, r_mask,
> ++ "green_mask", G_TYPE_INT, g_mask,
> ++ "blue_mask", G_TYPE_INT, b_mask,
> ++ "alpha_mask", G_TYPE_INT, a_mask,
> ++ "endianness", G_TYPE_INT, endianness, NULL);
> ++ } else {
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-raw-rgb",
> ++ "bpp", G_TYPE_INT, bpp,
> ++ "depth", G_TYPE_INT, depth,
> ++ "red_mask", G_TYPE_INT, r_mask,
> ++ "green_mask", G_TYPE_INT, g_mask,
> ++ "blue_mask", G_TYPE_INT, b_mask,
> ++ "endianness", G_TYPE_INT, endianness, NULL);
> ++ }
> ++ } else {
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-raw-rgb",
> ++ "bpp", G_TYPE_INT, bpp,
> ++ "depth", G_TYPE_INT, depth,
> ++ "endianness", G_TYPE_INT, endianness, NULL);
> ++ if (caps && context) {
> ++ gst_ffmpeg_set_palette (caps, context);
> ++ }
> ++ }
> ++ } else if (fmt) {
> ++ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-raw-yuv",
> ++ "format", GST_TYPE_FOURCC, fmt, NULL);
> ++ }
> ++ }
> ++
> ++ if (caps != NULL) {
> ++ GST_DEBUG ("caps for pix_fmt=%d: %" GST_PTR_FORMAT, pix_fmt, caps);
> ++ } else {
> ++ GST_LOG ("No caps found for pix_fmt=%d", pix_fmt);
> ++ }
> ++
> ++ return caps;
> ++}
> ++
> ++/* Convert a FFMPEG Sample Format and optional AVCodecContext
> ++ * to a GstCaps. If the context is ommitted, no fixed values
> ++ * for video/audio size will be included in the GstCaps
> ++ *
> ++ * See below for usefullness
> ++ */
> ++
> ++static GstCaps *
> ++gst_ffmpeg_smpfmt_to_caps (enum AVSampleFormat sample_fmt,
> ++ AVCodecContext * context, enum CodecID codec_id)
> ++{
> ++ GstCaps *caps = NULL;
> ++
> ++ int bpp = 0;
> ++ gboolean integer = TRUE;
> ++ gboolean signedness = FALSE;
> ++
> ++ switch (sample_fmt) {
> ++ case AV_SAMPLE_FMT_S16:
> ++ signedness = TRUE;
> ++ bpp = 16;
> ++ break;
> ++
> ++ case AV_SAMPLE_FMT_S32:
> ++ signedness = TRUE;
> ++ bpp = 32;
> ++ break;
> ++
> ++ case AV_SAMPLE_FMT_FLT:
> ++ integer = FALSE;
> ++ bpp = 32;
> ++ break;
> ++
> ++ case AV_SAMPLE_FMT_DBL:
> ++ integer = FALSE;
> ++ bpp = 64;
> ++ break;
> ++ default:
> ++ /* .. */
> ++ break;
> ++ }
> ++
> ++ if (bpp) {
> ++ if (integer) {
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-raw-int",
> ++ "signed", G_TYPE_BOOLEAN, signedness,
> ++ "endianness", G_TYPE_INT, G_BYTE_ORDER,
> ++ "width", G_TYPE_INT, bpp, "depth", G_TYPE_INT, bpp, NULL);
> ++ } else {
> ++ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-raw-float",
> ++ "endianness", G_TYPE_INT, G_BYTE_ORDER,
> ++ "width", G_TYPE_INT, bpp, NULL);
> ++ }
> ++ }
> ++
> ++ if (caps != NULL) {
> ++ GST_LOG ("caps for sample_fmt=%d: %" GST_PTR_FORMAT, sample_fmt, caps);
> ++ } else {
> ++ GST_LOG ("No caps found for sample_fmt=%d", sample_fmt);
> ++ }
> ++
> ++ return caps;
> ++}
> ++
> ++GstCaps *
> ++gst_ffmpeg_codectype_to_audio_caps (AVCodecContext * context,
> ++ enum CodecID codec_id, gboolean encode, AVCodec * codec)
> ++{
> ++ GstCaps *caps = NULL;
> ++
> ++ GST_DEBUG ("context:%p, codec_id:%d, encode:%d, codec:%p",
> ++ context, codec_id, encode, codec);
> ++ if (codec)
> ++ GST_DEBUG ("sample_fmts:%p, samplerates:%p",
> ++ codec->sample_fmts, codec->supported_samplerates);
> ++
> ++ if (context) {
> ++ /* Specific codec context */
> ++ caps = gst_ffmpeg_smpfmt_to_caps (context->sample_fmt, context, codec_id);
> ++ } else if (codec && codec->sample_fmts) {
> ++ GstCaps *temp;
> ++ int i;
> ++
> ++ caps = gst_caps_new_empty ();
> ++ for (i = 0; codec->sample_fmts[i] != -1; i++) {
> ++ temp =
> ++ gst_ffmpeg_smpfmt_to_caps (codec->sample_fmts[i], context, codec_id);
> ++ if (temp != NULL)
> ++ gst_caps_append (caps, temp);
> ++ }
> ++ } else {
> ++ GstCaps *temp;
> ++ enum AVSampleFormat i;
> ++ AVCodecContext ctx = { 0, };
> ++
> ++ ctx.channels = -1;
> ++ caps = gst_caps_new_empty ();
> ++ for (i = 0; i <= AV_SAMPLE_FMT_DBL; i++) {
> ++ temp = gst_ffmpeg_smpfmt_to_caps (i, encode ? &ctx : NULL, codec_id);
> ++ if (temp != NULL) {
> ++ gst_caps_append (caps, temp);
> ++ }
> ++ }
> ++ }
> ++ return caps;
> ++}
> ++
> ++GstCaps *
> ++gst_ffmpeg_codectype_to_video_caps (AVCodecContext * context,
> ++ enum CodecID codec_id, gboolean encode, AVCodec * codec)
> ++{
> ++ GstCaps *caps;
> ++
> ++ GST_LOG ("context:%p, codec_id:%d, encode:%d, codec:%p",
> ++ context, codec_id, encode, codec);
> ++
> ++ if (context) {
> ++ caps = gst_ffmpeg_pixfmt_to_caps (context->pix_fmt, context, codec_id);
> ++ } else {
> ++ GstCaps *temp;
> ++ enum PixelFormat i;
> ++ AVCodecContext ctx = { 0, };
> ++
> ++ caps = gst_caps_new_empty ();
> ++ for (i = 0; i < PIX_FMT_NB; i++) {
> ++ ctx.width = -1;
> ++ ctx.pix_fmt = i;
> ++ temp = gst_ffmpeg_pixfmt_to_caps (i, encode ? &ctx : NULL, codec_id);
> ++ if (temp != NULL) {
> ++ gst_caps_append (caps, temp);
> ++ }
> ++ }
> ++ }
> ++ return caps;
> ++}
> ++
> ++/* Convert a FFMPEG codec Type and optional AVCodecContext
> ++ * to a GstCaps. If the context is ommitted, no fixed values
> ++ * for video/audio size will be included in the GstCaps
> ++ *
> ++ * AVMediaType is primarily meant for uncompressed data GstCaps!
> ++ */
> ++
> ++GstCaps *
> ++gst_ffmpeg_codectype_to_caps (enum AVMediaType codec_type,
> ++ AVCodecContext * context, enum CodecID codec_id, gboolean encode)
> ++{
> ++ GstCaps *caps;
> ++
> ++ switch (codec_type) {
> ++ case AVMEDIA_TYPE_VIDEO:
> ++ caps =
> ++ gst_ffmpeg_codectype_to_video_caps (context, codec_id, encode, NULL);
> ++ break;
> ++ case AVMEDIA_TYPE_AUDIO:
> ++ caps =
> ++ gst_ffmpeg_codectype_to_audio_caps (context, codec_id, encode, NULL);
> ++ break;
> ++ default:
> ++ caps = NULL;
> ++ break;
> ++ }
> ++
> ++ return caps;
> ++}
> ++
> ++/* Convert a GstCaps (audio/raw) to a FFMPEG SampleFmt
> ++ * and other audio properties in a AVCodecContext.
> ++ *
> ++ * For usefullness, see below
> ++ */
> ++
> ++static void
> ++gst_ffmpeg_caps_to_smpfmt (const GstCaps * caps,
> ++ AVCodecContext * context, gboolean raw)
> ++{
> ++ GstStructure *structure;
> ++ gint depth = 0, width = 0, endianness = 0;
> ++ gboolean signedness = FALSE;
> ++ const gchar *name;
> ++
> ++ g_return_if_fail (gst_caps_get_size (caps) == 1);
> ++ structure = gst_caps_get_structure (caps, 0);
> ++
> ++ gst_structure_get_int (structure, "channels", &context->channels);
> ++ gst_structure_get_int (structure, "rate", &context->sample_rate);
> ++ gst_structure_get_int (structure, "block_align", &context->block_align);
> ++ gst_structure_get_int (structure, "bitrate", &context->bit_rate);
> ++
> ++ if (!raw)
> ++ return;
> ++
> ++ name = gst_structure_get_name (structure);
> ++
> ++ if (!strcmp (name, "audio/x-raw-float")) {
> ++ /* FLOAT */
> ++ if (gst_structure_get_int (structure, "width", &width) &&
> ++ gst_structure_get_int (structure, "endianness", &endianness)) {
> ++ if (endianness == G_BYTE_ORDER) {
> ++ if (width == 32)
> ++ context->sample_fmt = AV_SAMPLE_FMT_FLT;
> ++ else if (width == 64)
> ++ context->sample_fmt = AV_SAMPLE_FMT_DBL;
> ++ }
> ++ }
> ++ } else {
> ++ /* INT */
> ++ if (gst_structure_get_int (structure, "width", &width) &&
> ++ gst_structure_get_int (structure, "depth", &depth) &&
> ++ gst_structure_get_boolean (structure, "signed", &signedness) &&
> ++ gst_structure_get_int (structure, "endianness", &endianness)) {
> ++ if ((endianness == G_BYTE_ORDER) && (signedness == TRUE)) {
> ++ if ((width == 16) && (depth == 16))
> ++ context->sample_fmt = AV_SAMPLE_FMT_S16;
> ++ else if ((width == 32) && (depth == 32))
> ++ context->sample_fmt = AV_SAMPLE_FMT_S32;
> ++ }
> ++ }
> ++ }
> ++}
> ++
> ++
> ++/* Convert a GstCaps (video/raw) to a FFMPEG PixFmt
> ++ * and other video properties in a AVCodecContext.
> ++ *
> ++ * For usefullness, see below
> ++ */
> ++
> ++static void
> ++gst_ffmpeg_caps_to_pixfmt (const GstCaps * caps,
> ++ AVCodecContext * context, gboolean raw)
> ++{
> ++ GstStructure *structure;
> ++ const GValue *fps;
> ++ const GValue *par = NULL;
> ++
> ++ GST_DEBUG ("converting caps %" GST_PTR_FORMAT, caps);
> ++ g_return_if_fail (gst_caps_get_size (caps) == 1);
> ++ structure = gst_caps_get_structure (caps, 0);
> ++
> ++ gst_structure_get_int (structure, "width", &context->width);
> ++ gst_structure_get_int (structure, "height", &context->height);
> ++ gst_structure_get_int (structure, "bpp", &context->bits_per_coded_sample);
> ++
> ++ fps = gst_structure_get_value (structure, "framerate");
> ++ if (fps != NULL && GST_VALUE_HOLDS_FRACTION (fps)) {
> ++
> ++ /* somehow these seem mixed up.. */
> ++ context->time_base.den = gst_value_get_fraction_numerator (fps);
> ++ context->time_base.num = gst_value_get_fraction_denominator (fps);
> ++ context->ticks_per_frame = 1;
> ++
> ++ GST_DEBUG ("setting framerate %d/%d = %lf",
> ++ context->time_base.den, context->time_base.num,
> ++ 1. * context->time_base.den / context->time_base.num);
> ++ }
> ++
> ++ par = gst_structure_get_value (structure, "pixel-aspect-ratio");
> ++ if (par && GST_VALUE_HOLDS_FRACTION (par)) {
> ++
> ++ context->sample_aspect_ratio.num = gst_value_get_fraction_numerator (par);
> ++ context->sample_aspect_ratio.den = gst_value_get_fraction_denominator (par);
> ++
> ++ GST_DEBUG ("setting pixel-aspect-ratio %d/%d = %lf",
> ++ context->sample_aspect_ratio.den, context->sample_aspect_ratio.num,
> ++ 1. * context->sample_aspect_ratio.den /
> ++ context->sample_aspect_ratio.num);
> ++ }
> ++
> ++ if (!raw)
> ++ return;
> ++
> ++ g_return_if_fail (fps != NULL && GST_VALUE_HOLDS_FRACTION (fps));
> ++
> ++ if (strcmp (gst_structure_get_name (structure), "video/x-raw-yuv") == 0) {
> ++ guint32 fourcc;
> ++
> ++ if (gst_structure_get_fourcc (structure, "format", &fourcc)) {
> ++ switch (fourcc) {
> ++ case GST_MAKE_FOURCC ('Y', 'U', 'Y', '2'):
> ++ context->pix_fmt = PIX_FMT_YUYV422;
> ++ break;
> ++ case GST_MAKE_FOURCC ('I', '4', '2', '0'):
> ++ context->pix_fmt = PIX_FMT_YUV420P;
> ++ break;
> ++ case GST_MAKE_FOURCC ('A', '4', '2', '0'):
> ++ context->pix_fmt = PIX_FMT_YUVA420P;
> ++ break;
> ++ case GST_MAKE_FOURCC ('Y', '4', '1', 'B'):
> ++ context->pix_fmt = PIX_FMT_YUV411P;
> ++ break;
> ++ case GST_MAKE_FOURCC ('Y', '4', '2', 'B'):
> ++ context->pix_fmt = PIX_FMT_YUV422P;
> ++ break;
> ++ case GST_MAKE_FOURCC ('Y', 'U', 'V', '9'):
> ++ context->pix_fmt = PIX_FMT_YUV410P;
> ++ break;
> ++#if 0
> ++ case FIXME:
> ++ context->pix_fmt = PIX_FMT_YUV444P;
> ++ break;
> ++#endif
> ++ }
> ++ }
> ++ } else if (strcmp (gst_structure_get_name (structure),
> ++ "video/x-raw-rgb") == 0) {
> ++ gint bpp = 0, rmask = 0, endianness = 0;
> ++
> ++ if (gst_structure_get_int (structure, "bpp", &bpp) &&
> ++ gst_structure_get_int (structure, "endianness", &endianness)) {
> ++ if (gst_structure_get_int (structure, "red_mask", &rmask)) {
> ++ switch (bpp) {
> ++ case 32:
> ++#if (G_BYTE_ORDER == G_BIG_ENDIAN)
> ++ if (rmask == 0x00ff0000)
> ++#else
> ++ if (rmask == 0x0000ff00)
> ++#endif
> ++ context->pix_fmt = PIX_FMT_RGB32;
> ++ break;
> ++ case 24:
> ++ if (rmask == 0x0000FF)
> ++ context->pix_fmt = PIX_FMT_BGR24;
> ++ else
> ++ context->pix_fmt = PIX_FMT_RGB24;
> ++ break;
> ++ case 16:
> ++ if (endianness == G_BYTE_ORDER)
> ++ context->pix_fmt = PIX_FMT_RGB565;
> ++ break;
> ++ case 15:
> ++ if (endianness == G_BYTE_ORDER)
> ++ context->pix_fmt = PIX_FMT_RGB555;
> ++ break;
> ++ default:
> ++ /* nothing */
> ++ break;
> ++ }
> ++ } else {
> ++ if (bpp == 8) {
> ++ context->pix_fmt = PIX_FMT_PAL8;
> ++ gst_ffmpeg_get_palette (caps, context);
> ++ }
> ++ }
> ++ }
> ++ } else if (strcmp (gst_structure_get_name (structure),
> ++ "video/x-raw-gray") == 0) {
> ++ gint bpp = 0;
> ++
> ++ if (gst_structure_get_int (structure, "bpp", &bpp)) {
> ++ switch (bpp) {
> ++ case 8:
> ++ context->pix_fmt = PIX_FMT_GRAY8;
> ++ break;
> ++ }
> ++ }
> ++ }
> ++}
> ++
> ++/* Convert a GstCaps and a FFMPEG codec Type to a
> ++ * AVCodecContext. If the context is ommitted, no fixed values
> ++ * for video/audio size will be included in the context
> ++ *
> ++ * AVMediaType is primarily meant for uncompressed data GstCaps!
> ++ */
> ++
> ++void
> ++gst_ffmpeg_caps_with_codectype (enum AVMediaType type,
> ++ const GstCaps * caps, AVCodecContext * context)
> ++{
> ++ if (context == NULL)
> ++ return;
> ++
> ++ switch (type) {
> ++ case AVMEDIA_TYPE_VIDEO:
> ++ gst_ffmpeg_caps_to_pixfmt (caps, context, TRUE);
> ++ break;
> ++
> ++ case AVMEDIA_TYPE_AUDIO:
> ++ gst_ffmpeg_caps_to_smpfmt (caps, context, TRUE);
> ++ break;
> ++
> ++ default:
> ++ /* unknown */
> ++ break;
> ++ }
> ++}
> ++
> ++#if 0
> ++static void
> ++nal_escape (guint8 * dst, guint8 * src, guint size, guint * destsize)
> ++{
> ++ guint8 *dstp = dst;
> ++ guint8 *srcp = src;
> ++ guint8 *end = src + size;
> ++ gint count = 0;
> ++
> ++ while (srcp < end) {
> ++ if (count == 2 && *srcp <= 0x03) {
> ++ GST_DEBUG ("added escape code");
> ++ *dstp++ = 0x03;
> ++ count = 0;
> ++ }
> ++ if (*srcp == 0)
> ++ count++;
> ++ else
> ++ count = 0;
> ++
> ++ GST_DEBUG ("copy %02x, count %d", *srcp, count);
> ++ *dstp++ = *srcp++;
> ++ }
> ++ *destsize = dstp - dst;
> ++}
> ++
> ++/* copy the config, escaping NAL units as we iterate them, if something fails we
> ++ * copy everything and hope for the best. */
> ++static void
> ++copy_config (guint8 * dst, guint8 * src, guint size, guint * destsize)
> ++{
> ++ guint8 *dstp = dst;
> ++ guint8 *srcp = src;
> ++ gint cnt, i;
> ++ guint nalsize, esize;
> ++
> ++ /* check size */
> ++ if (size < 7)
> ++ goto full_copy;
> ++
> ++ /* check version */
> ++ if (*srcp != 1)
> ++ goto full_copy;
> ++
> ++ cnt = *(srcp + 5) & 0x1f; /* Number of sps */
> ++
> ++ GST_DEBUG ("num SPS %d", cnt);
> ++
> ++ memcpy (dstp, srcp, 6);
> ++ srcp += 6;
> ++ dstp += 6;
> ++
> ++ for (i = 0; i < cnt; i++) {
> ++ GST_DEBUG ("copy SPS %d", i);
> ++ nalsize = (srcp[0] << 8) | srcp[1];
> ++ nal_escape (dstp + 2, srcp + 2, nalsize, &esize);
> ++ dstp[0] = esize >> 8;
> ++ dstp[1] = esize & 0xff;
> ++ dstp += esize + 2;
> ++ srcp += nalsize + 2;
> ++ }
> ++
> ++ cnt = *(dstp++) = *(srcp++); /* Number of pps */
> ++
> ++ GST_DEBUG ("num PPS %d", cnt);
> ++
> ++ for (i = 0; i < cnt; i++) {
> ++ GST_DEBUG ("copy PPS %d", i);
> ++ nalsize = (srcp[0] << 8) | srcp[1];
> ++ nal_escape (dstp + 2, srcp + 2, nalsize, &esize);
> ++ dstp[0] = esize >> 8;
> ++ dstp[1] = esize & 0xff;
> ++ dstp += esize + 2;
> ++ srcp += nalsize + 2;
> ++ }
> ++ *destsize = dstp - dst;
> ++
> ++ return;
> ++
> ++full_copy:
> ++ {
> ++ GST_DEBUG ("something unexpected, doing full copy");
> ++ memcpy (dst, src, size);
> ++ *destsize = size;
> ++ return;
> ++ }
> ++}
> ++#endif
> ++
> ++/*
> ++ * caps_with_codecid () transforms a GstCaps for a known codec
> ++ * ID into a filled-in context.
> ++ * codec_data from caps will override possible extradata already in the context
> ++ */
> ++
> ++void
> ++gst_ffmpeg_caps_with_codecid (enum CodecID codec_id,
> ++ enum AVMediaType codec_type, const GstCaps * caps, AVCodecContext * context)
> ++{
> ++ GstStructure *str;
> ++ const GValue *value;
> ++ const GstBuffer *buf;
> ++
> ++ GST_LOG ("codec_id:%d, codec_type:%d, caps:%" GST_PTR_FORMAT " context:%p",
> ++ codec_id, codec_type, caps, context);
> ++
> ++ if (!context || !gst_caps_get_size (caps))
> ++ return;
> ++
> ++ str = gst_caps_get_structure (caps, 0);
> ++
> ++ /* extradata parsing (esds [mpeg4], wma/wmv, msmpeg4v1/2/3, etc.) */
> ++ if ((value = gst_structure_get_value (str, "codec_data"))) {
> ++ guint size;
> ++ guint8 *data;
> ++
> ++ buf = GST_BUFFER_CAST (gst_value_get_mini_object (value));
> ++ size = GST_BUFFER_SIZE (buf);
> ++ data = GST_BUFFER_DATA (buf);
> ++
> ++ /* free the old one if it is there */
> ++ if (context->extradata)
> ++ av_free (context->extradata);
> ++
> ++#if 0
> ++ if (codec_id == CODEC_ID_H264) {
> ++ guint extrasize;
> ++
> ++ GST_DEBUG ("copy, escaping codec_data %d", size);
> ++ /* ffmpeg h264 expects the codec_data to be escaped, there is no real
> ++ * reason for this but let's just escape it for now. Start by allocating
> ++ * enough space, x2 is more than enough.
> ++ *
> ++ * FIXME, we disabled escaping because some file already contain escaped
> ++ * codec_data and then we escape twice and fail. It's better to leave it
> ++ * as is, as that is what most players do. */
> ++ context->extradata =
> ++ av_mallocz (GST_ROUND_UP_16 (size * 2 +
> ++ FF_INPUT_BUFFER_PADDING_SIZE));
> ++ copy_config (context->extradata, data, size, &extrasize);
> ++ GST_DEBUG ("escaped size: %d", extrasize);
> ++ context->extradata_size = extrasize;
> ++ } else
> ++#endif
> ++ {
> ++ /* allocate with enough padding */
> ++ GST_DEBUG ("copy codec_data");
> ++ context->extradata =
> ++ av_mallocz (GST_ROUND_UP_16 (size + FF_INPUT_BUFFER_PADDING_SIZE));
> ++ memcpy (context->extradata, data, size);
> ++ context->extradata_size = size;
> ++ }
> ++
> ++ /* Hack for VC1. Sometimes the first (length) byte is 0 for some files */
> ++ if (codec_id == CODEC_ID_VC1 && size > 0 && data[0] == 0) {
> ++ context->extradata[0] = (guint8) size;
> ++ }
> ++
> ++ GST_DEBUG ("have codec data of size %d", size);
> ++ } else if (context->extradata == NULL && codec_id != CODEC_ID_AAC_LATM &&
> ++ codec_id != CODEC_ID_FLAC) {
> ++ /* no extradata, alloc dummy with 0 sized, some codecs insist on reading
> ++ * extradata anyway which makes then segfault. */
> ++ context->extradata =
> ++ av_mallocz (GST_ROUND_UP_16 (FF_INPUT_BUFFER_PADDING_SIZE));
> ++ context->extradata_size = 0;
> ++ GST_DEBUG ("no codec data");
> ++ }
> ++
> ++ switch (codec_id) {
> ++ case CODEC_ID_MPEG4:
> ++ {
> ++ const gchar *mime = gst_structure_get_name (str);
> ++
> ++ if (!strcmp (mime, "video/x-divx"))
> ++ context->codec_tag = GST_MAKE_FOURCC ('D', 'I', 'V', 'X');
> ++ else if (!strcmp (mime, "video/x-xvid"))
> ++ context->codec_tag = GST_MAKE_FOURCC ('X', 'V', 'I', 'D');
> ++ else if (!strcmp (mime, "video/x-3ivx"))
> ++ context->codec_tag = GST_MAKE_FOURCC ('3', 'I', 'V', '1');
> ++ else if (!strcmp (mime, "video/mpeg"))
> ++ context->codec_tag = GST_MAKE_FOURCC ('m', 'p', '4', 'v');
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_SVQ3:
> ++ /* FIXME: this is a workaround for older gst-plugins releases
> ++ * (<= 0.8.9). This should be removed at some point, because
> ++ * it causes wrong decoded frame order. */
> ++ if (!context->extradata) {
> ++ gint halfpel_flag, thirdpel_flag, low_delay, unknown_svq3_flag;
> ++ guint16 flags;
> ++
> ++ if (gst_structure_get_int (str, "halfpel_flag", &halfpel_flag) ||
> ++ gst_structure_get_int (str, "thirdpel_flag", &thirdpel_flag) ||
> ++ gst_structure_get_int (str, "low_delay", &low_delay) ||
> ++ gst_structure_get_int (str, "unknown_svq3_flag",
> ++ &unknown_svq3_flag)) {
> ++ context->extradata = (guint8 *) av_mallocz (0x64);
> ++ g_stpcpy ((gchar *) context->extradata, "SVQ3");
> ++ flags = 1 << 3;
> ++ flags |= low_delay;
> ++ flags = flags << 2;
> ++ flags |= unknown_svq3_flag;
> ++ flags = flags << 6;
> ++ flags |= halfpel_flag;
> ++ flags = flags << 1;
> ++ flags |= thirdpel_flag;
> ++ flags = flags << 3;
> ++
> ++ flags = GUINT16_FROM_LE (flags);
> ++
> ++ memcpy ((gchar *) context->extradata + 0x62, &flags, 2);
> ++ context->extradata_size = 0x64;
> ++ }
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_MSRLE:
> ++ case CODEC_ID_QTRLE:
> ++ case CODEC_ID_TSCC:
> ++ case CODEC_ID_CSCD:
> ++ case CODEC_ID_APE:
> ++ {
> ++ gint depth;
> ++
> ++ if (gst_structure_get_int (str, "depth", &depth)) {
> ++ context->bits_per_coded_sample = depth;
> ++ } else {
> ++ GST_WARNING ("No depth field in caps %" GST_PTR_FORMAT, caps);
> ++ }
> ++
> ++ }
> ++ break;
> ++
> ++ case CODEC_ID_RV10:
> ++ case CODEC_ID_RV20:
> ++ case CODEC_ID_RV30:
> ++ case CODEC_ID_RV40:
> ++ {
> ++ gint format;
> ++
> ++ if (gst_structure_get_int (str, "format", &format))
> ++ context->sub_id = format;
> ++
> ++ break;
> ++ }
> ++ case CODEC_ID_COOK:
> ++ case CODEC_ID_RA_288:
> ++ case CODEC_ID_RA_144:
> ++ case CODEC_ID_SIPR:
> ++ {
> ++ gint leaf_size;
> ++ gint bitrate;
> ++
> ++ if (gst_structure_get_int (str, "leaf_size", &leaf_size))
> ++ context->block_align = leaf_size;
> ++ if (gst_structure_get_int (str, "bitrate", &bitrate))
> ++ context->bit_rate = bitrate;
> ++ }
> ++ case CODEC_ID_ALAC:
> ++ gst_structure_get_int (str, "samplesize",
> ++ &context->bits_per_coded_sample);
> ++ break;
> ++
> ++ case CODEC_ID_DVVIDEO:
> ++ {
> ++ guint32 fourcc;
> ++
> ++ if (gst_structure_get_fourcc (str, "format", &fourcc))
> ++ switch (fourcc) {
> ++ case GST_MAKE_FOURCC ('Y', 'U', 'Y', '2'):
> ++ context->pix_fmt = PIX_FMT_YUYV422;
> ++ break;
> ++ case GST_MAKE_FOURCC ('I', '4', '2', '0'):
> ++ context->pix_fmt = PIX_FMT_YUV420P;
> ++ break;
> ++ case GST_MAKE_FOURCC ('A', '4', '2', '0'):
> ++ context->pix_fmt = PIX_FMT_YUVA420P;
> ++ break;
> ++ case GST_MAKE_FOURCC ('Y', '4', '1', 'B'):
> ++ context->pix_fmt = PIX_FMT_YUV411P;
> ++ break;
> ++ case GST_MAKE_FOURCC ('Y', '4', '2', 'B'):
> ++ context->pix_fmt = PIX_FMT_YUV422P;
> ++ break;
> ++ case GST_MAKE_FOURCC ('Y', 'U', 'V', '9'):
> ++ context->pix_fmt = PIX_FMT_YUV410P;
> ++ break;
> ++ default:
> ++ GST_WARNING ("couldn't convert fourcc %" GST_FOURCC_FORMAT
> ++ " to a pixel format", GST_FOURCC_ARGS (fourcc));
> ++ break;
> ++ }
> ++ break;
> ++ }
> ++ case CODEC_ID_H263P:
> ++ {
> ++ gboolean val;
> ++
> ++ if (!gst_structure_get_boolean (str, "annex-f", &val) || val)
> ++ context->flags |= CODEC_FLAG_4MV;
> ++ else
> ++ context->flags &= ~CODEC_FLAG_4MV;
> ++ if ((!gst_structure_get_boolean (str, "annex-i", &val) || val) &&
> ++ (!gst_structure_get_boolean (str, "annex-t", &val) || val))
> ++ context->flags |= CODEC_FLAG_AC_PRED;
> ++ else
> ++ context->flags &= ~CODEC_FLAG_AC_PRED;
> ++ if (!gst_structure_get_boolean (str, "annex-j", &val) || val)
> ++ context->flags |= CODEC_FLAG_LOOP_FILTER;
> ++ else
> ++ context->flags &= ~CODEC_FLAG_LOOP_FILTER;
> ++ break;
> ++ }
> ++ case CODEC_ID_ADPCM_G726:
> ++ {
> ++ const gchar *layout;
> ++
> ++ if ((layout = gst_structure_get_string (str, "layout"))) {
> ++ if (!strcmp (layout, "g721")) {
> ++ context->sample_rate = 8000;
> ++ context->channels = 1;
> ++ context->bit_rate = 32000;
> ++ }
> ++ }
> ++ break;
> ++ }
> ++ default:
> ++ break;
> ++ }
> ++
> ++ if (!gst_caps_is_fixed (caps))
> ++ return;
> ++
> ++ /* common properties (width, height, fps) */
> ++ switch (codec_type) {
> ++ case AVMEDIA_TYPE_VIDEO:
> ++ gst_ffmpeg_caps_to_pixfmt (caps, context, codec_id == CODEC_ID_RAWVIDEO);
> ++ gst_ffmpeg_get_palette (caps, context);
> ++ break;
> ++ case AVMEDIA_TYPE_AUDIO:
> ++ gst_ffmpeg_caps_to_smpfmt (caps, context, FALSE);
> ++ break;
> ++ default:
> ++ break;
> ++ }
> ++
> ++ /* fixup of default settings */
> ++ switch (codec_id) {
> ++ case CODEC_ID_QCELP:
> ++ /* QCELP is always mono, no matter what the caps say */
> ++ context->channels = 1;
> ++ break;
> ++ default:
> ++ break;
> ++ }
> ++}
> ++
> ++/* _formatid_to_caps () is meant for muxers/demuxers, it
> ++ * transforms a name (ffmpeg way of ID'ing these, why don't
> ++ * they have unique numerical IDs?) to the corresponding
> ++ * caps belonging to that mux-format
> ++ *
> ++ * Note: we don't need any additional info because the caps
> ++ * isn't supposed to contain any useful info besides the
> ++ * media type anyway
> ++ */
> ++
> ++GstCaps *
> ++gst_ffmpeg_formatid_to_caps (const gchar * format_name)
> ++{
> ++ GstCaps *caps = NULL;
> ++
> ++ if (!strcmp (format_name, "mpeg")) {
> ++ caps = gst_caps_new_simple ("video/mpeg",
> ++ "systemstream", G_TYPE_BOOLEAN, TRUE, NULL);
> ++ } else if (!strcmp (format_name, "mpegts")) {
> ++ caps = gst_caps_new_simple ("video/mpegts",
> ++ "systemstream", G_TYPE_BOOLEAN, TRUE, NULL);
> ++ } else if (!strcmp (format_name, "rm")) {
> ++ caps = gst_caps_new_simple ("application/x-pn-realmedia",
> ++ "systemstream", G_TYPE_BOOLEAN, TRUE, NULL);
> ++ } else if (!strcmp (format_name, "asf")) {
> ++ caps = gst_caps_new_simple ("video/x-ms-asf", NULL);
> ++ } else if (!strcmp (format_name, "avi")) {
> ++ caps = gst_caps_new_simple ("video/x-msvideo", NULL);
> ++ } else if (!strcmp (format_name, "wav")) {
> ++ caps = gst_caps_new_simple ("audio/x-wav", NULL);
> ++ } else if (!strcmp (format_name, "ape")) {
> ++ caps = gst_caps_new_simple ("application/x-ape", NULL);
> ++ } else if (!strcmp (format_name, "swf")) {
> ++ caps = gst_caps_new_simple ("application/x-shockwave-flash", NULL);
> ++ } else if (!strcmp (format_name, "au")) {
> ++ caps = gst_caps_new_simple ("audio/x-au", NULL);
> ++ } else if (!strcmp (format_name, "dv")) {
> ++ caps = gst_caps_new_simple ("video/x-dv",
> ++ "systemstream", G_TYPE_BOOLEAN, TRUE, NULL);
> ++ } else if (!strcmp (format_name, "4xm")) {
> ++ caps = gst_caps_new_simple ("video/x-4xm", NULL);
> ++ } else if (!strcmp (format_name, "matroska")) {
> ++ caps = gst_caps_new_simple ("video/x-matroska", NULL);
> ++ } else if (!strcmp (format_name, "mp3")) {
> ++ caps = gst_caps_new_simple ("application/x-id3", NULL);
> ++ } else if (!strcmp (format_name, "flic")) {
> ++ caps = gst_caps_new_simple ("video/x-fli", NULL);
> ++ } else if (!strcmp (format_name, "flv")) {
> ++ caps = gst_caps_new_simple ("video/x-flv", NULL);
> ++ } else if (!strcmp (format_name, "tta")) {
> ++ caps = gst_caps_new_simple ("audio/x-ttafile", NULL);
> ++ } else if (!strcmp (format_name, "aiff")) {
> ++ caps = gst_caps_new_simple ("audio/x-aiff", NULL);
> ++ } else if (!strcmp (format_name, "mov_mp4_m4a_3gp_3g2")) {
> ++ caps =
> ++ gst_caps_from_string
> ++ ("application/x-3gp; video/quicktime; audio/x-m4a");
> ++ } else if (!strcmp (format_name, "mov")) {
> ++ caps = gst_caps_from_string ("video/quicktime,variant=(string)apple");
> ++ } else if (!strcmp (format_name, "mp4")) {
> ++ caps = gst_caps_from_string ("video/quicktime,variant=(string)iso");
> ++ } else if (!strcmp (format_name, "3gp")) {
> ++ caps = gst_caps_from_string ("video/quicktime,variant=(string)3gpp");
> ++ } else if (!strcmp (format_name, "3g2")) {
> ++ caps = gst_caps_from_string ("video/quicktime,variant=(string)3g2");
> ++ } else if (!strcmp (format_name, "psp")) {
> ++ caps = gst_caps_from_string ("video/quicktime,variant=(string)psp");
> ++ } else if (!strcmp (format_name, "ipod")) {
> ++ caps = gst_caps_from_string ("video/quicktime,variant=(string)ipod");
> ++ } else if (!strcmp (format_name, "aac")) {
> ++ caps = gst_caps_new_simple ("audio/mpeg",
> ++ "mpegversion", G_TYPE_INT, 4, NULL);
> ++ } else if (!strcmp (format_name, "gif")) {
> ++ caps = gst_caps_from_string ("image/gif");
> ++ } else if (!strcmp (format_name, "ogg")) {
> ++ caps = gst_caps_from_string ("application/ogg");
> ++ } else if (!strcmp (format_name, "mxf") || !strcmp (format_name, "mxf_d10")) {
> ++ caps = gst_caps_from_string ("application/mxf");
> ++ } else if (!strcmp (format_name, "gxf")) {
> ++ caps = gst_caps_from_string ("application/gxf");
> ++ } else if (!strcmp (format_name, "yuv4mpegpipe")) {
> ++ caps = gst_caps_new_simple ("application/x-yuv4mpeg",
> ++ "y4mversion", G_TYPE_INT, 2, NULL);
> ++ } else if (!strcmp (format_name, "mpc")) {
> ++ caps = gst_caps_from_string ("audio/x-musepack, streamversion = (int) 7");
> ++ } else if (!strcmp (format_name, "vqf")) {
> ++ caps = gst_caps_from_string ("audio/x-vqf");
> ++ } else if (!strcmp (format_name, "nsv")) {
> ++ caps = gst_caps_from_string ("video/x-nsv");
> ++ } else if (!strcmp (format_name, "amr")) {
> ++ caps = gst_caps_from_string ("audio/x-amr-nb-sh");
> ++ } else if (!strcmp (format_name, "webm")) {
> ++ caps = gst_caps_from_string ("video/webm");
> ++ } else {
> ++ gchar *name;
> ++
> ++ GST_LOG ("Could not create stream format caps for %s", format_name);
> ++ name = g_strdup_printf ("application/x-gst_ff-%s", format_name);
> ++ caps = gst_caps_new_simple (name, NULL);
> ++ g_free (name);
> ++ }
> ++
> ++ return caps;
> ++}
> ++
> ++gboolean
> ++gst_ffmpeg_formatid_get_codecids (const gchar * format_name,
> ++ enum CodecID ** video_codec_list, enum CodecID ** audio_codec_list,
> ++ AVOutputFormat * plugin)
> ++{
> ++ static enum CodecID tmp_vlist[] = {
> ++ CODEC_ID_NONE,
> ++ CODEC_ID_NONE
> ++ };
> ++ static enum CodecID tmp_alist[] = {
> ++ CODEC_ID_NONE,
> ++ CODEC_ID_NONE
> ++ };
> ++
> ++ GST_LOG ("format_name : %s", format_name);
> ++
> ++ if (!strcmp (format_name, "mp4")) {
> ++ static enum CodecID mp4_video_list[] = {
> ++ CODEC_ID_MPEG4, CODEC_ID_H264,
> ++ CODEC_ID_MJPEG,
> ++ CODEC_ID_NONE
> ++ };
> ++ static enum CodecID mp4_audio_list[] = {
> ++ CODEC_ID_AAC, CODEC_ID_MP3,
> ++ CODEC_ID_NONE
> ++ };
> ++
> ++ *video_codec_list = mp4_video_list;
> ++ *audio_codec_list = mp4_audio_list;
> ++ } else if (!strcmp (format_name, "mpeg")) {
> ++ static enum CodecID mpeg_video_list[] = { CODEC_ID_MPEG1VIDEO,
> ++ CODEC_ID_MPEG2VIDEO,
> ++ CODEC_ID_H264,
> ++ CODEC_ID_NONE
> ++ };
> ++ static enum CodecID mpeg_audio_list[] = { CODEC_ID_MP1,
> ++ CODEC_ID_MP2,
> ++ CODEC_ID_MP3,
> ++ CODEC_ID_NONE
> ++ };
> ++
> ++ *video_codec_list = mpeg_video_list;
> ++ *audio_codec_list = mpeg_audio_list;
> ++ } else if (!strcmp (format_name, "dvd")) {
> ++ static enum CodecID mpeg_video_list[] = { CODEC_ID_MPEG2VIDEO,
> ++ CODEC_ID_NONE
> ++ };
> ++ static enum CodecID mpeg_audio_list[] = { CODEC_ID_MP2,
> ++ CODEC_ID_AC3,
> ++ CODEC_ID_DTS,
> ++ CODEC_ID_PCM_S16BE,
> ++ CODEC_ID_NONE
> ++ };
> ++
> ++ *video_codec_list = mpeg_video_list;
> ++ *audio_codec_list = mpeg_audio_list;
> ++ } else if (!strcmp (format_name, "mpegts")) {
> ++ static enum CodecID mpegts_video_list[] = { CODEC_ID_MPEG1VIDEO,
> ++ CODEC_ID_MPEG2VIDEO,
> ++ CODEC_ID_H264,
> ++ CODEC_ID_NONE
> ++ };
> ++ static enum CodecID mpegts_audio_list[] = { CODEC_ID_MP2,
> ++ CODEC_ID_MP3,
> ++ CODEC_ID_AC3,
> ++ CODEC_ID_DTS,
> ++ CODEC_ID_AAC,
> ++ CODEC_ID_NONE
> ++ };
> ++
> ++ *video_codec_list = mpegts_video_list;
> ++ *audio_codec_list = mpegts_audio_list;
> ++ } else if (!strcmp (format_name, "vob")) {
> ++ static enum CodecID vob_video_list[] =
> ++ { CODEC_ID_MPEG2VIDEO, CODEC_ID_NONE };
> ++ static enum CodecID vob_audio_list[] = { CODEC_ID_MP2, CODEC_ID_AC3,
> ++ CODEC_ID_DTS, CODEC_ID_NONE
> ++ };
> ++
> ++ *video_codec_list = vob_video_list;
> ++ *audio_codec_list = vob_audio_list;
> ++ } else if (!strcmp (format_name, "flv")) {
> ++ static enum CodecID flv_video_list[] = { CODEC_ID_FLV1, CODEC_ID_NONE };
> ++ static enum CodecID flv_audio_list[] = { CODEC_ID_MP3, CODEC_ID_NONE };
> ++
> ++ *video_codec_list = flv_video_list;
> ++ *audio_codec_list = flv_audio_list;
> ++ } else if (!strcmp (format_name, "asf")) {
> ++ static enum CodecID asf_video_list[] =
> ++ { CODEC_ID_WMV1, CODEC_ID_WMV2, CODEC_ID_MSMPEG4V3, CODEC_ID_NONE };
> ++ static enum CodecID asf_audio_list[] =
> ++ { CODEC_ID_WMAV1, CODEC_ID_WMAV2, CODEC_ID_MP3, CODEC_ID_NONE };
> ++
> ++ *video_codec_list = asf_video_list;
> ++ *audio_codec_list = asf_audio_list;
> ++ } else if (!strcmp (format_name, "dv")) {
> ++ static enum CodecID dv_video_list[] = { CODEC_ID_DVVIDEO, CODEC_ID_NONE };
> ++ static enum CodecID dv_audio_list[] = { CODEC_ID_PCM_S16LE, CODEC_ID_NONE };
> ++
> ++ *video_codec_list = dv_video_list;
> ++ *audio_codec_list = dv_audio_list;
> ++ } else if (!strcmp (format_name, "mov")) {
> ++ static enum CodecID mov_video_list[] = {
> ++ CODEC_ID_SVQ1, CODEC_ID_SVQ3, CODEC_ID_MPEG4,
> ++ CODEC_ID_H263, CODEC_ID_H263P,
> ++ CODEC_ID_H264, CODEC_ID_DVVIDEO,
> ++ CODEC_ID_MJPEG,
> ++ CODEC_ID_NONE
> ++ };
> ++ static enum CodecID mov_audio_list[] = {
> ++ CODEC_ID_PCM_MULAW, CODEC_ID_PCM_ALAW, CODEC_ID_ADPCM_IMA_QT,
> ++ CODEC_ID_MACE3, CODEC_ID_MACE6, CODEC_ID_AAC,
> ++ CODEC_ID_AMR_NB, CODEC_ID_AMR_WB,
> ++ CODEC_ID_PCM_S16BE, CODEC_ID_PCM_S16LE,
> ++ CODEC_ID_MP3, CODEC_ID_NONE
> ++ };
> ++
> ++ *video_codec_list = mov_video_list;
> ++ *audio_codec_list = mov_audio_list;
> ++ } else if ((!strcmp (format_name, "3gp") || !strcmp (format_name, "3g2"))) {
> ++ static enum CodecID tgp_video_list[] = {
> ++ CODEC_ID_MPEG4, CODEC_ID_H263, CODEC_ID_H263P, CODEC_ID_H264,
> ++ CODEC_ID_NONE
> ++ };
> ++ static enum CodecID tgp_audio_list[] = {
> ++ CODEC_ID_AMR_NB, CODEC_ID_AMR_WB,
> ++ CODEC_ID_AAC,
> ++ CODEC_ID_NONE
> ++ };
> ++
> ++ *video_codec_list = tgp_video_list;
> ++ *audio_codec_list = tgp_audio_list;
> ++ } else if (!strcmp (format_name, "mmf")) {
> ++ static enum CodecID mmf_audio_list[] = {
> ++ CODEC_ID_ADPCM_YAMAHA, CODEC_ID_NONE
> ++ };
> ++ *video_codec_list = NULL;
> ++ *audio_codec_list = mmf_audio_list;
> ++ } else if (!strcmp (format_name, "amr")) {
> ++ static enum CodecID amr_audio_list[] = {
> ++ CODEC_ID_AMR_NB, CODEC_ID_AMR_WB,
> ++ CODEC_ID_NONE
> ++ };
> ++ *video_codec_list = NULL;
> ++ *audio_codec_list = amr_audio_list;
> ++ } else if (!strcmp (format_name, "gif")) {
> ++ static enum CodecID gif_image_list[] = {
> ++ CODEC_ID_RAWVIDEO, CODEC_ID_NONE
> ++ };
> ++ *video_codec_list = gif_image_list;
> ++ *audio_codec_list = NULL;
> ++ } else if ((plugin->audio_codec != CODEC_ID_NONE) ||
> ++ (plugin->video_codec != CODEC_ID_NONE)) {
> ++ tmp_vlist[0] = plugin->video_codec;
> ++ tmp_alist[0] = plugin->audio_codec;
> ++
> ++ *video_codec_list = tmp_vlist;
> ++ *audio_codec_list = tmp_alist;
> ++ } else {
> ++ GST_LOG ("Format %s not found", format_name);
> ++ return FALSE;
> ++ }
> ++
> ++ return TRUE;
> ++}
> ++
> ++/* Convert a GstCaps to a FFMPEG codec ID. Size et all
> ++ * are omitted, that can be queried by the user itself,
> ++ * we're not eating the GstCaps or anything
> ++ * A pointer to an allocated context is also needed for
> ++ * optional extra info
> ++ */
> ++
> ++enum CodecID
> ++gst_ffmpeg_caps_to_codecid (const GstCaps * caps, AVCodecContext * context)
> ++{
> ++ enum CodecID id = CODEC_ID_NONE;
> ++ const gchar *mimetype;
> ++ const GstStructure *structure;
> ++ gboolean video = FALSE, audio = FALSE; /* we want to be sure! */
> ++
> ++ g_return_val_if_fail (caps != NULL, CODEC_ID_NONE);
> ++ g_return_val_if_fail (gst_caps_get_size (caps) == 1, CODEC_ID_NONE);
> ++ structure = gst_caps_get_structure (caps, 0);
> ++
> ++ mimetype = gst_structure_get_name (structure);
> ++
> ++ if (!strcmp (mimetype, "video/x-raw-rgb") ||
> ++ !strcmp (mimetype, "video/x-raw-yuv")) {
> ++ id = CODEC_ID_RAWVIDEO;
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "audio/x-raw-int")) {
> ++ gint depth, width, endianness;
> ++ gboolean signedness;
> ++
> ++ if (gst_structure_get_int (structure, "endianness", &endianness) &&
> ++ gst_structure_get_boolean (structure, "signed", &signedness) &&
> ++ gst_structure_get_int (structure, "width", &width) &&
> ++ gst_structure_get_int (structure, "depth", &depth) && depth == width) {
> ++ switch (depth) {
> ++ case 8:
> ++ if (signedness) {
> ++ id = CODEC_ID_PCM_S8;
> ++ } else {
> ++ id = CODEC_ID_PCM_U8;
> ++ }
> ++ break;
> ++ case 16:
> ++ switch (endianness) {
> ++ case G_BIG_ENDIAN:
> ++ if (signedness) {
> ++ id = CODEC_ID_PCM_S16BE;
> ++ } else {
> ++ id = CODEC_ID_PCM_U16BE;
> ++ }
> ++ break;
> ++ case G_LITTLE_ENDIAN:
> ++ if (signedness) {
> ++ id = CODEC_ID_PCM_S16LE;
> ++ } else {
> ++ id = CODEC_ID_PCM_U16LE;
> ++ }
> ++ break;
> ++ }
> ++ break;
> ++ }
> ++ if (id != CODEC_ID_NONE)
> ++ audio = TRUE;
> ++ }
> ++ } else if (!strcmp (mimetype, "audio/x-mulaw")) {
> ++ id = CODEC_ID_PCM_MULAW;
> ++ audio = TRUE;
> ++ } else if (!strcmp (mimetype, "audio/x-alaw")) {
> ++ id = CODEC_ID_PCM_ALAW;
> ++ audio = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-dv")) {
> ++ gboolean sys_strm;
> ++
> ++ if (gst_structure_get_boolean (structure, "systemstream", &sys_strm) &&
> ++ !sys_strm) {
> ++ id = CODEC_ID_DVVIDEO;
> ++ video = TRUE;
> ++ }
> ++ } else if (!strcmp (mimetype, "audio/x-dv")) { /* ??? */
> ++ id = CODEC_ID_DVAUDIO;
> ++ audio = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-h263")) {
> ++ const gchar *h263version =
> ++ gst_structure_get_string (structure, "h263version");
> ++ if (h263version && !strcmp (h263version, "h263p"))
> ++ id = CODEC_ID_H263P;
> ++ else
> ++ id = CODEC_ID_H263;
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-intel-h263")) {
> ++ id = CODEC_ID_H263I;
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-h261")) {
> ++ id = CODEC_ID_H261;
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "video/mpeg")) {
> ++ gboolean sys_strm;
> ++ gint mpegversion;
> ++
> ++ if (gst_structure_get_boolean (structure, "systemstream", &sys_strm) &&
> ++ gst_structure_get_int (structure, "mpegversion", &mpegversion) &&
> ++ !sys_strm) {
> ++ switch (mpegversion) {
> ++ case 1:
> ++ id = CODEC_ID_MPEG1VIDEO;
> ++ break;
> ++ case 2:
> ++ id = CODEC_ID_MPEG2VIDEO;
> ++ break;
> ++ case 4:
> ++ id = CODEC_ID_MPEG4;
> ++ break;
> ++ }
> ++ }
> ++ if (id != CODEC_ID_NONE)
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "image/jpeg")) {
> ++ id = CODEC_ID_MJPEG; /* A... B... */
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-jpeg-b")) {
> ++ id = CODEC_ID_MJPEGB;
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-wmv")) {
> ++ gint wmvversion = 0;
> ++
> ++ if (gst_structure_get_int (structure, "wmvversion", &wmvversion)) {
> ++ switch (wmvversion) {
> ++ case 1:
> ++ id = CODEC_ID_WMV1;
> ++ break;
> ++ case 2:
> ++ id = CODEC_ID_WMV2;
> ++ break;
> ++ case 3:
> ++ {
> ++ guint32 fourcc;
> ++
> ++ /* WMV3 unless the fourcc exists and says otherwise */
> ++ id = CODEC_ID_WMV3;
> ++
> ++ if (gst_structure_get_fourcc (structure, "format", &fourcc)) {
> ++ if ((fourcc == GST_MAKE_FOURCC ('W', 'V', 'C', '1')) ||
> ++ (fourcc == GST_MAKE_FOURCC ('W', 'M', 'V', 'A'))) {
> ++ id = CODEC_ID_VC1;
> ++ }
> ++ }
> ++ }
> ++ break;
> ++ }
> ++ }
> ++ if (id != CODEC_ID_NONE)
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "audio/x-vorbis")) {
> ++ id = CODEC_ID_VORBIS;
> ++ audio = TRUE;
> ++ } else if (!strcmp (mimetype, "audio/x-qdm2")) {
> ++ id = CODEC_ID_QDM2;
> ++ audio = TRUE;
> ++ } else if (!strcmp (mimetype, "audio/mpeg")) {
> ++ gint layer = 0;
> ++ gint mpegversion = 0;
> ++
> ++ if (gst_structure_get_int (structure, "mpegversion", &mpegversion)) {
> ++ switch (mpegversion) {
> ++ case 2: /* ffmpeg uses faad for both... */
> ++ case 4:
> ++ id = CODEC_ID_AAC;
> ++ break;
> ++ case 1:
> ++ if (gst_structure_get_int (structure, "layer", &layer)) {
> ++ switch (layer) {
> ++ case 1:
> ++ id = CODEC_ID_MP1;
> ++ break;
> ++ case 2:
> ++ id = CODEC_ID_MP2;
> ++ break;
> ++ case 3:
> ++ id = CODEC_ID_MP3;
> ++ break;
> ++ }
> ++ }
> ++ }
> ++ }
> ++ if (id != CODEC_ID_NONE)
> ++ audio = TRUE;
> ++ } else if (!strcmp (mimetype, "audio/x-musepack")) {
> ++ gint streamversion = -1;
> ++
> ++ if (gst_structure_get_int (structure, "streamversion", &streamversion)) {
> ++ if (streamversion == 7)
> ++ id = CODEC_ID_MUSEPACK7;
> ++ } else {
> ++ id = CODEC_ID_MUSEPACK7;
> ++ }
> ++ } else if (!strcmp (mimetype, "audio/x-wma")) {
> ++ gint wmaversion = 0;
> ++
> ++ if (gst_structure_get_int (structure, "wmaversion", &wmaversion)) {
> ++ switch (wmaversion) {
> ++ case 1:
> ++ id = CODEC_ID_WMAV1;
> ++ break;
> ++ case 2:
> ++ id = CODEC_ID_WMAV2;
> ++ break;
> ++ case 3:
> ++ id = CODEC_ID_WMAPRO;
> ++ break;
> ++ }
> ++ }
> ++ if (id != CODEC_ID_NONE)
> ++ audio = TRUE;
> ++ } else if (!strcmp (mimetype, "audio/x-wms")) {
> ++ id = CODEC_ID_WMAVOICE;
> ++ audio = TRUE;
> ++ } else if (!strcmp (mimetype, "audio/x-ac3")) {
> ++ id = CODEC_ID_AC3;
> ++ audio = TRUE;
> ++ } else if (!strcmp (mimetype, "audio/x-eac3")) {
> ++ id = CODEC_ID_EAC3;
> ++ audio = TRUE;
> ++ } else if (!strcmp (mimetype, "audio/x-vnd.sony.atrac3") ||
> ++ !strcmp (mimetype, "audio/atrac3")) {
> ++ id = CODEC_ID_ATRAC3;
> ++ audio = TRUE;
> ++ } else if (!strcmp (mimetype, "audio/x-dts")) {
> ++ id = CODEC_ID_DTS;
> ++ audio = TRUE;
> ++ } else if (!strcmp (mimetype, "application/x-ape")) {
> ++ id = CODEC_ID_APE;
> ++ audio = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-msmpeg")) {
> ++ gint msmpegversion = 0;
> ++
> ++ if (gst_structure_get_int (structure, "msmpegversion", &msmpegversion)) {
> ++ switch (msmpegversion) {
> ++ case 41:
> ++ id = CODEC_ID_MSMPEG4V1;
> ++ break;
> ++ case 42:
> ++ id = CODEC_ID_MSMPEG4V2;
> ++ break;
> ++ case 43:
> ++ id = CODEC_ID_MSMPEG4V3;
> ++ break;
> ++ }
> ++ }
> ++ if (id != CODEC_ID_NONE)
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-svq")) {
> ++ gint svqversion = 0;
> ++
> ++ if (gst_structure_get_int (structure, "svqversion", &svqversion)) {
> ++ switch (svqversion) {
> ++ case 1:
> ++ id = CODEC_ID_SVQ1;
> ++ break;
> ++ case 3:
> ++ id = CODEC_ID_SVQ3;
> ++ break;
> ++ }
> ++ }
> ++ if (id != CODEC_ID_NONE)
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-huffyuv")) {
> ++ id = CODEC_ID_HUFFYUV;
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "audio/x-mace")) {
> ++ gint maceversion = 0;
> ++
> ++ if (gst_structure_get_int (structure, "maceversion", &maceversion)) {
> ++ switch (maceversion) {
> ++ case 3:
> ++ id = CODEC_ID_MACE3;
> ++ break;
> ++ case 6:
> ++ id = CODEC_ID_MACE6;
> ++ break;
> ++ }
> ++ }
> ++ if (id != CODEC_ID_NONE)
> ++ audio = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-theora")) {
> ++ id = CODEC_ID_THEORA;
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-vp3")) {
> ++ id = CODEC_ID_VP3;
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-vp5")) {
> ++ id = CODEC_ID_VP5;
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-vp6")) {
> ++ id = CODEC_ID_VP6;
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-vp6-flash")) {
> ++ id = CODEC_ID_VP6F;
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-vp6-alpha")) {
> ++ id = CODEC_ID_VP6A;
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-vp8")) {
> ++ id = CODEC_ID_VP8;
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-flash-screen")) {
> ++ id = CODEC_ID_FLASHSV;
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-indeo")) {
> ++ gint indeoversion = 0;
> ++
> ++ if (gst_structure_get_int (structure, "indeoversion", &indeoversion)) {
> ++ switch (indeoversion) {
> ++ case 5:
> ++ id = CODEC_ID_INDEO5;
> ++ break;
> ++ case 3:
> ++ id = CODEC_ID_INDEO3;
> ++ break;
> ++ case 2:
> ++ id = CODEC_ID_INDEO2;
> ++ break;
> ++ }
> ++ if (id != CODEC_ID_NONE)
> ++ video = TRUE;
> ++ }
> ++ } else if (!strcmp (mimetype, "video/x-divx")) {
> ++ gint divxversion = 0;
> ++
> ++ if (gst_structure_get_int (structure, "divxversion", &divxversion)) {
> ++ switch (divxversion) {
> ++ case 3:
> ++ id = CODEC_ID_MSMPEG4V3;
> ++ break;
> ++ case 4:
> ++ case 5:
> ++ id = CODEC_ID_MPEG4;
> ++ break;
> ++ }
> ++ }
> ++ if (id != CODEC_ID_NONE)
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-3ivx")) {
> ++ id = CODEC_ID_MPEG4;
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-xvid")) {
> ++ id = CODEC_ID_MPEG4;
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-ffv")) {
> ++ gint ffvversion = 0;
> ++
> ++ if (gst_structure_get_int (structure, "ffvversion", &ffvversion) &&
> ++ ffvversion == 1) {
> ++ id = CODEC_ID_FFV1;
> ++ video = TRUE;
> ++ }
> ++ } else if (!strcmp (mimetype, "audio/x-adpcm")) {
> ++ const gchar *layout;
> ++
> ++ layout = gst_structure_get_string (structure, "layout");
> ++ if (layout == NULL) {
> ++ /* break */
> ++ } else if (!strcmp (layout, "quicktime")) {
> ++ id = CODEC_ID_ADPCM_IMA_QT;
> ++ } else if (!strcmp (layout, "microsoft")) {
> ++ id = CODEC_ID_ADPCM_MS;
> ++ } else if (!strcmp (layout, "dvi")) {
> ++ id = CODEC_ID_ADPCM_IMA_WAV;
> ++ } else if (!strcmp (layout, "4xm")) {
> ++ id = CODEC_ID_ADPCM_4XM;
> ++ } else if (!strcmp (layout, "smjpeg")) {
> ++ id = CODEC_ID_ADPCM_IMA_SMJPEG;
> ++ } else if (!strcmp (layout, "dk3")) {
> ++ id = CODEC_ID_ADPCM_IMA_DK3;
> ++ } else if (!strcmp (layout, "dk4")) {
> ++ id = CODEC_ID_ADPCM_IMA_DK4;
> ++ } else if (!strcmp (layout, "westwood")) {
> ++ id = CODEC_ID_ADPCM_IMA_WS;
> ++ } else if (!strcmp (layout, "iss")) {
> ++ id = CODEC_ID_ADPCM_IMA_ISS;
> ++ } else if (!strcmp (layout, "xa")) {
> ++ id = CODEC_ID_ADPCM_XA;
> ++ } else if (!strcmp (layout, "adx")) {
> ++ id = CODEC_ID_ADPCM_ADX;
> ++ } else if (!strcmp (layout, "ea")) {
> ++ id = CODEC_ID_ADPCM_EA;
> ++ } else if (!strcmp (layout, "g726")) {
> ++ id = CODEC_ID_ADPCM_G726;
> ++ } else if (!strcmp (layout, "g721")) {
> ++ id = CODEC_ID_ADPCM_G726;
> ++ } else if (!strcmp (layout, "ct")) {
> ++ id = CODEC_ID_ADPCM_CT;
> ++ } else if (!strcmp (layout, "swf")) {
> ++ id = CODEC_ID_ADPCM_SWF;
> ++ } else if (!strcmp (layout, "yamaha")) {
> ++ id = CODEC_ID_ADPCM_YAMAHA;
> ++ } else if (!strcmp (layout, "sbpro2")) {
> ++ id = CODEC_ID_ADPCM_SBPRO_2;
> ++ } else if (!strcmp (layout, "sbpro3")) {
> ++ id = CODEC_ID_ADPCM_SBPRO_3;
> ++ } else if (!strcmp (layout, "sbpro4")) {
> ++ id = CODEC_ID_ADPCM_SBPRO_4;
> ++ }
> ++ if (id != CODEC_ID_NONE)
> ++ audio = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-4xm")) {
> ++ id = CODEC_ID_4XM;
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "audio/x-dpcm")) {
> ++ const gchar *layout;
> ++
> ++ layout = gst_structure_get_string (structure, "layout");
> ++ if (!layout) {
> ++ /* .. */
> ++ } else if (!strcmp (layout, "roq")) {
> ++ id = CODEC_ID_ROQ_DPCM;
> ++ } else if (!strcmp (layout, "interplay")) {
> ++ id = CODEC_ID_INTERPLAY_DPCM;
> ++ } else if (!strcmp (layout, "xan")) {
> ++ id = CODEC_ID_XAN_DPCM;
> ++ } else if (!strcmp (layout, "sol")) {
> ++ id = CODEC_ID_SOL_DPCM;
> ++ }
> ++ if (id != CODEC_ID_NONE)
> ++ audio = TRUE;
> ++ } else if (!strcmp (mimetype, "audio/x-flac")) {
> ++ id = CODEC_ID_FLAC;
> ++ audio = TRUE;
> ++ } else if (!strcmp (mimetype, "audio/x-shorten")) {
> ++ id = CODEC_ID_SHORTEN;
> ++ audio = TRUE;
> ++ } else if (!strcmp (mimetype, "audio/x-alac")) {
> ++ id = CODEC_ID_ALAC;
> ++ audio = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-cinepak")) {
> ++ id = CODEC_ID_CINEPAK;
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-pn-realvideo")) {
> ++ gint rmversion;
> ++
> ++ if (gst_structure_get_int (structure, "rmversion", &rmversion)) {
> ++ switch (rmversion) {
> ++ case 1:
> ++ id = CODEC_ID_RV10;
> ++ break;
> ++ case 2:
> ++ id = CODEC_ID_RV20;
> ++ break;
> ++ case 3:
> ++ id = CODEC_ID_RV30;
> ++ break;
> ++ case 4:
> ++ id = CODEC_ID_RV40;
> ++ break;
> ++ }
> ++ }
> ++ if (id != CODEC_ID_NONE)
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "audio/x-sipro")) {
> ++ id = CODEC_ID_SIPR;
> ++ audio = TRUE;
> ++ } else if (!strcmp (mimetype, "audio/x-pn-realaudio")) {
> ++ gint raversion;
> ++
> ++ if (gst_structure_get_int (structure, "raversion", &raversion)) {
> ++ switch (raversion) {
> ++ case 1:
> ++ id = CODEC_ID_RA_144;
> ++ break;
> ++ case 2:
> ++ id = CODEC_ID_RA_288;
> ++ break;
> ++ case 8:
> ++ id = CODEC_ID_COOK;
> ++ break;
> ++ }
> ++ }
> ++ if (id != CODEC_ID_NONE)
> ++ audio = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-rle")) {
> ++ const gchar *layout;
> ++
> ++ if ((layout = gst_structure_get_string (structure, "layout"))) {
> ++ if (!strcmp (layout, "microsoft")) {
> ++ id = CODEC_ID_MSRLE;
> ++ video = TRUE;
> ++ }
> ++ }
> ++ } else if (!strcmp (mimetype, "video/x-xan")) {
> ++ gint wcversion = 0;
> ++
> ++ if ((gst_structure_get_int (structure, "wcversion", &wcversion))) {
> ++ switch (wcversion) {
> ++ case 3:
> ++ id = CODEC_ID_XAN_WC3;
> ++ video = TRUE;
> ++ break;
> ++ case 4:
> ++ id = CODEC_ID_XAN_WC4;
> ++ video = TRUE;
> ++ break;
> ++ default:
> ++ break;
> ++ }
> ++ }
> ++ } else if (!strcmp (mimetype, "audio/AMR")) {
> ++ audio = TRUE;
> ++ id = CODEC_ID_AMR_NB;
> ++ } else if (!strcmp (mimetype, "audio/AMR-WB")) {
> ++ id = CODEC_ID_AMR_WB;
> ++ audio = TRUE;
> ++ } else if (!strcmp (mimetype, "audio/qcelp")) {
> ++ id = CODEC_ID_QCELP;
> ++ audio = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-h264")) {
> ++ id = CODEC_ID_H264;
> ++ video = TRUE;
> ++ } else if (!strcmp (mimetype, "video/x-flash-video")) {
> ++ gint flvversion = 0;
> ++
> ++ if ((gst_structure_get_int (structure, "flvversion", &flvversion))) {
> ++ switch (flvversion) {
> ++ case 1:
> ++ id = CODEC_ID_FLV1;
> ++ video = TRUE;
> ++ break;
> ++ default:
> ++ break;
> ++ }
> ++ }
> ++
> ++ } else if (!strcmp (mimetype, "audio/x-nellymoser")) {
> ++ id = CODEC_ID_NELLYMOSER;
> ++ audio = TRUE;
> ++ } else if (!strncmp (mimetype, "audio/x-gst_ff-", 15)) {
> ++ gchar ext[16];
> ++ AVCodec *codec;
> ++
> ++ if (strlen (mimetype) <= 30 &&
> ++ sscanf (mimetype, "audio/x-gst_ff-%s", ext) == 1) {
> ++ if ((codec = avcodec_find_decoder_by_name (ext)) ||
> ++ (codec = avcodec_find_encoder_by_name (ext))) {
> ++ id = codec->id;
> ++ audio = TRUE;
> ++ }
> ++ }
> ++ } else if (!strncmp (mimetype, "video/x-gst_ff-", 15)) {
> ++ gchar ext[16];
> ++ AVCodec *codec;
> ++
> ++ if (strlen (mimetype) <= 30 &&
> ++ sscanf (mimetype, "video/x-gst_ff-%s", ext) == 1) {
> ++ if ((codec = avcodec_find_decoder_by_name (ext)) ||
> ++ (codec = avcodec_find_encoder_by_name (ext))) {
> ++ id = codec->id;
> ++ video = TRUE;
> ++ }
> ++ }
> ++ }
> ++
> ++ if (context != NULL) {
> ++ if (video == TRUE) {
> ++ context->codec_type = AVMEDIA_TYPE_VIDEO;
> ++ } else if (audio == TRUE) {
> ++ context->codec_type = AVMEDIA_TYPE_AUDIO;
> ++ } else {
> ++ context->codec_type = AVMEDIA_TYPE_UNKNOWN;
> ++ }
> ++ context->codec_id = id;
> ++ gst_ffmpeg_caps_with_codecid (id, context->codec_type, caps, context);
> ++ }
> ++
> ++ if (id != CODEC_ID_NONE) {
> ++ GST_DEBUG ("The id=%d belongs to the caps %" GST_PTR_FORMAT, id, caps);
> ++ } else {
> ++ GST_WARNING ("Couldn't figure out the id for caps %" GST_PTR_FORMAT, caps);
> ++ }
> ++
> ++ return id;
> ++}
> +diff -uNr gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegcodecmap.c.rej gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegcodecmap.c.rej
> +--- gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegcodecmap.c.rej 1970-01-01 01:00:00.000000000 +0100
> ++++ gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegcodecmap.c.rej 2014-08-08 15:31:06.055868246 +0200
> +@@ -0,0 +1,12 @@
> ++--- ext/ffmpeg/gstffmpegcodecmap.c
> +++++ ext/ffmpeg/gstffmpegcodecmap.c
> ++@@ -1884,9 +1842,6 @@
> ++ gst_ff_vid_caps_new (context, codec_id, encode, "video/x-raw-rgb",
> ++ "bpp", G_TYPE_INT, bpp, "depth", G_TYPE_INT, depth, "endianness",
> ++ G_TYPE_INT, endianness, NULL);
> ++- if (caps && context) {
> ++- gst_ffmpeg_set_palette (caps, context);
> ++- }
> ++ }
> ++ } else if (fmt) {
> ++ caps = gst_ff_vid_caps_new (context, codec_id, encode, "video/x-raw-yuv",
> +diff -uNr gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegdec.c gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegdec.c
> +--- gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegdec.c 2014-08-08 14:46:31.462772351 +0200
> ++++ gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegdec.c 2014-08-08 15:32:18.608870847 +0200
> +@@ -88,7 +88,6 @@
> + gint depth;
> + } audio;
> + } format;
> +- gboolean waiting_for_key;
> + gboolean discont;
> + gboolean clear_ts;
> +
> +@@ -438,7 +437,6 @@
> + ffmpegdec->pcache = NULL;
> + ffmpegdec->par = NULL;
> + ffmpegdec->opened = FALSE;
> +- ffmpegdec->waiting_for_key = TRUE;
> + ffmpegdec->skip_frame = ffmpegdec->lowres = 0;
> + ffmpegdec->direct_rendering = DEFAULT_DIRECT_RENDERING;
> + ffmpegdec->do_padding = DEFAULT_DO_PADDING;
> +@@ -608,11 +606,6 @@
> + gst_ffmpeg_avcodec_close (ffmpegdec->context);
> + ffmpegdec->opened = FALSE;
> +
> +- if (ffmpegdec->context->palctrl) {
> +- av_free (ffmpegdec->context->palctrl);
> +- ffmpegdec->context->palctrl = NULL;
> +- }
> +-
> + if (ffmpegdec->context->extradata) {
> + av_free (ffmpegdec->context->extradata);
> + ffmpegdec->context->extradata = NULL;
> +@@ -864,7 +857,7 @@
> +
> + /* workaround encoder bugs */
> + ffmpegdec->context->workaround_bugs |= FF_BUG_AUTODETECT;
> +- ffmpegdec->context->error_recognition = 1;
> ++ ffmpegdec->context->err_recognition = 1;
> +
> + /* for slow cpus */
> + ffmpegdec->context->lowres = ffmpegdec->lowres;
> +@@ -944,7 +937,7 @@
> + fsize = gst_ffmpeg_avpicture_get_size (ffmpegdec->context->pix_fmt,
> + width, height);
> +
> +- if (!ffmpegdec->context->palctrl && ffmpegdec->can_allocate_aligned) {
> ++ if (ffmpegdec->can_allocate_aligned) {
> + GST_LOG_OBJECT (ffmpegdec, "calling pad_alloc");
> + /* no pallete, we can use the buffer size to alloc */
> + ret = gst_pad_alloc_buffer_and_set_caps (ffmpegdec->srcpad,
> +@@ -1083,7 +1076,6 @@
> + /* tell ffmpeg we own this buffer, tranfer the ref we have on the buffer to
> + * the opaque data. */
> + picture->type = FF_BUFFER_TYPE_USER;
> +- picture->age = 256 * 256 * 256 * 64;
> + picture->opaque = buf;
> +
> + #ifdef EXTRA_REF
> +@@ -1414,10 +1406,6 @@
> + } else {
> + if (diff >= 0) {
> + /* we're too slow, try to speed up */
> +- if (ffmpegdec->waiting_for_key) {
> +- /* we were waiting for a keyframe, that's ok */
> +- goto skipping;
> +- }
> + /* switch to skip_frame mode */
> + goto skip_frame;
> + }
> +@@ -1427,11 +1415,6 @@
> + ffmpegdec->processed++;
> + return TRUE;
> +
> +-skipping:
> +- {
> +- res = FALSE;
> +- goto drop_qos;
> +- }
> + normal_mode:
> + {
> + if (ffmpegdec->context->skip_frame != AVDISCARD_DEFAULT) {
> +@@ -1528,43 +1511,6 @@
> + }
> +
> +
> +-/* figure out if the current picture is a keyframe, return TRUE if that is
> +- * the case. */
> +-static gboolean
> +-check_keyframe (GstFFMpegDec * ffmpegdec)
> +-{
> +- GstFFMpegDecClass *oclass;
> +- gboolean is_itype = FALSE;
> +- gboolean is_reference = FALSE;
> +- gboolean iskeyframe;
> +-
> +- /* figure out if we are dealing with a keyframe */
> +- oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
> +-
> +- /* remember that we have B frames, we need this for the DTS -> PTS conversion
> +- * code */
> +- if (!ffmpegdec->has_b_frames && ffmpegdec->picture->pict_type == FF_B_TYPE) {
> +- GST_DEBUG_OBJECT (ffmpegdec, "we have B frames");
> +- ffmpegdec->has_b_frames = TRUE;
> +- }
> +-
> +- is_itype = (ffmpegdec->picture->pict_type == FF_I_TYPE);
> +- is_reference = (ffmpegdec->picture->reference == 1);
> +-
> +- iskeyframe = (is_itype || is_reference || ffmpegdec->picture->key_frame)
> +- || (oclass->in_plugin->id == CODEC_ID_INDEO3)
> +- || (oclass->in_plugin->id == CODEC_ID_MSZH)
> +- || (oclass->in_plugin->id == CODEC_ID_ZLIB)
> +- || (oclass->in_plugin->id == CODEC_ID_VP3)
> +- || (oclass->in_plugin->id == CODEC_ID_HUFFYUV);
> +-
> +- GST_LOG_OBJECT (ffmpegdec,
> +- "current picture: type: %d, is_keyframe:%d, is_itype:%d, is_reference:%d",
> +- ffmpegdec->picture->pict_type, iskeyframe, is_itype, is_reference);
> +-
> +- return iskeyframe;
> +-}
> +-
> + /* get an outbuf buffer with the current picture */
> + static GstFlowReturn
> + get_output_buffer (GstFFMpegDec * ffmpegdec, GstBuffer ** outbuf)
> +@@ -1694,7 +1640,6 @@
> + {
> + gint len = -1;
> + gint have_data;
> +- gboolean iskeyframe;
> + gboolean mode_switch;
> + gboolean decode;
> + gint skip_frame = AVDISCARD_DEFAULT;
> +@@ -1809,7 +1754,6 @@
> + gst_ffmpegdec_negotiate (ffmpegdec, TRUE);
> + }
> +
> +-
> + /* Whether a frame is interlaced or not is unknown at the time of
> + buffer allocation, so caps on the buffer in opaque will have
> + the previous frame's interlaced flag set. So if interlacedness
> +@@ -1831,10 +1775,6 @@
> + }
> + }
> +
> +- /* check if we are dealing with a keyframe here, this will also check if we
> +- * are dealing with B frames. */
> +- iskeyframe = check_keyframe (ffmpegdec);
> +-
> + /* check that the timestamps go upwards */
> + if (ffmpegdec->last_out != -1 && ffmpegdec->last_out > out_pts) {
> + /* timestamps go backwards, this means frames were reordered and we must
> +@@ -1865,7 +1805,7 @@
> + * timestamps */
> + if (!ffmpegdec->reordered_in && ffmpegdec->reordered_out) {
> + /* PTS and DTS are the same for keyframes */
> +- if (!iskeyframe && ffmpegdec->next_out != -1) {
> ++ if (ffmpegdec->next_out != -1) {
> + /* interpolate all timestamps except for keyframes, FIXME, this is
> + * wrong when QoS is active. */
> + GST_DEBUG_OBJECT (ffmpegdec, "interpolate timestamps");
> +@@ -1874,16 +1814,6 @@
> + }
> + }
> +
> +- /* when we're waiting for a keyframe, see if we have one or drop the current
> +- * non-keyframe */
> +- if (G_UNLIKELY (ffmpegdec->waiting_for_key)) {
> +- if (G_LIKELY (!iskeyframe))
> +- goto drop_non_keyframe;
> +-
> +- /* we have a keyframe, we can stop waiting for one */
> +- ffmpegdec->waiting_for_key = FALSE;
> +- }
> +-
> + /* get a handle to the output buffer */
> + *ret = get_output_buffer (ffmpegdec, outbuf);
> + if (G_UNLIKELY (*ret != GST_FLOW_OK))
> +@@ -2000,20 +1930,11 @@
> + else
> + ffmpegdec->next_out = -1;
> +
> +- /* palette is not part of raw video frame in gst and the size
> +- * of the outgoing buffer needs to be adjusted accordingly */
> +- if (ffmpegdec->context->palctrl != NULL)
> +- GST_BUFFER_SIZE (*outbuf) -= AVPALETTE_SIZE;
> +-
> + /* now see if we need to clip the buffer against the segment boundaries. */
> + if (G_UNLIKELY (!clip_video_buffer (ffmpegdec, *outbuf, out_timestamp,
> + out_duration)))
> + goto clipped;
> +
> +- /* mark as keyframe or delta unit */
> +- if (!iskeyframe)
> +- GST_BUFFER_FLAG_SET (*outbuf, GST_BUFFER_FLAG_DELTA_UNIT);
> +-
> + if (ffmpegdec->picture->top_field_first)
> + GST_BUFFER_FLAG_SET (*outbuf, GST_VIDEO_BUFFER_TFF);
> +
> +@@ -2024,11 +1945,6 @@
> + return len;
> +
> + /* special cases */
> +-drop_non_keyframe:
> +- {
> +- GST_WARNING_OBJECT (ffmpegdec, "Dropping non-keyframe (seek/init)");
> +- goto beach;
> +- }
> + no_output:
> + {
> + GST_DEBUG_OBJECT (ffmpegdec, "no output buffer");
> +@@ -2422,7 +2338,6 @@
> + gst_ffmpegdec_reset_ts (ffmpegdec);
> + gst_ffmpegdec_reset_qos (ffmpegdec);
> + gst_ffmpegdec_flush_pcache (ffmpegdec);
> +- ffmpegdec->waiting_for_key = TRUE;
> + gst_segment_init (&ffmpegdec->segment, GST_FORMAT_TIME);
> + clear_queued (ffmpegdec);
> + break;
> +@@ -2560,17 +2475,6 @@
> +
> + oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
> +
> +- /* do early keyframe check pretty bad to rely on the keyframe flag in the
> +- * source for this as it might not even be parsed (UDP/file/..). */
> +- if (G_UNLIKELY (ffmpegdec->waiting_for_key)) {
> +- GST_DEBUG_OBJECT (ffmpegdec, "waiting for keyframe");
> +- if (GST_BUFFER_FLAG_IS_SET (inbuf, GST_BUFFER_FLAG_DELTA_UNIT) &&
> +- oclass->in_plugin->type != AVMEDIA_TYPE_AUDIO)
> +- goto skip_keyframe;
> +-
> +- GST_DEBUG_OBJECT (ffmpegdec, "got keyframe");
> +- ffmpegdec->waiting_for_key = FALSE;
> +- }
> + /* parse cache joining. If there is cached data */
> + if (ffmpegdec->pcache) {
> + /* join with previous data */
> +@@ -2805,12 +2709,6 @@
> + gst_buffer_unref (inbuf);
> + return GST_FLOW_NOT_NEGOTIATED;
> + }
> +-skip_keyframe:
> +- {
> +- GST_DEBUG_OBJECT (ffmpegdec, "skipping non keyframe");
> +- gst_buffer_unref (inbuf);
> +- return GST_FLOW_OK;
> +- }
> + }
> +
> + static GstStateChangeReturn
> +@@ -2936,7 +2834,7 @@
> + gchar *plugin_name;
> +
> + /* only decoders */
> +- if (!in_plugin->decode) {
> ++ if (!av_codec_is_decoder (in_plugin)) {
> + goto next;
> + }
> +
> +diff -uNr gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegdec.c.orig gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegdec.c.orig
> +--- gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegdec.c.orig 1970-01-01 01:00:00.000000000 +0100
> ++++ gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegdec.c.orig 2014-08-08 15:31:06.057868246 +0200
> +@@ -0,0 +1,2973 @@
> ++/* GStreamer
> ++ * Copyright (C) <1999> Erik Walthinsen <omega at cse.ogi.edu>
> ++ *
> ++ * This library is free software; you can redistribute it and/or
> ++ * modify it under the terms of the GNU Library General Public
> ++ * License as published by the Free Software Foundation; either
> ++ * version 2 of the License, or (at your option) any later version.
> ++ *
> ++ * This library is distributed in the hope that it will be useful,
> ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
> ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> ++ * Library General Public License for more details.
> ++ *
> ++ * You should have received a copy of the GNU Library General Public
> ++ * License along with this library; if not, write to the
> ++ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
> ++ * Boston, MA 02111-1307, USA.
> ++ */
> ++
> ++#ifdef HAVE_CONFIG_H
> ++#include "config.h"
> ++#endif
> ++
> ++#include <assert.h>
> ++#include <string.h>
> ++
> ++#ifdef HAVE_FFMPEG_UNINSTALLED
> ++#include <avcodec.h>
> ++#else
> ++#include <libavcodec/avcodec.h>
> ++#endif
> ++
> ++#include <gst/gst.h>
> ++#include <gst/video/video.h>
> ++
> ++#include "gstffmpeg.h"
> ++#include "gstffmpegcodecmap.h"
> ++#include "gstffmpegutils.h"
> ++
> ++/* define to enable alternative buffer refcounting algorithm */
> ++#undef EXTRA_REF
> ++
> ++typedef struct _GstFFMpegDec GstFFMpegDec;
> ++
> ++#define MAX_TS_MASK 0xff
> ++
> ++/* for each incomming buffer we keep all timing info in a structure like this.
> ++ * We keep a circular array of these structures around to store the timing info.
> ++ * The index in the array is what we pass as opaque data (to pictures) and
> ++ * pts (to parsers) so that ffmpeg can remember them for us. */
> ++typedef struct
> ++{
> ++ gint idx;
> ++ GstClockTime timestamp;
> ++ GstClockTime duration;
> ++ gint64 offset;
> ++} GstTSInfo;
> ++
> ++struct _GstFFMpegDec
> ++{
> ++ GstElement element;
> ++
> ++ /* We need to keep track of our pads, so we do so here. */
> ++ GstPad *srcpad;
> ++ GstPad *sinkpad;
> ++
> ++ /* decoding */
> ++ AVCodecContext *context;
> ++ AVFrame *picture;
> ++ gboolean opened;
> ++ union
> ++ {
> ++ struct
> ++ {
> ++ gint width, height;
> ++ gint clip_width, clip_height;
> ++ gint par_n, par_d;
> ++ gint fps_n, fps_d;
> ++ gint old_fps_n, old_fps_d;
> ++ gboolean interlaced;
> ++
> ++ enum PixelFormat pix_fmt;
> ++ } video;
> ++ struct
> ++ {
> ++ gint channels;
> ++ gint samplerate;
> ++ gint depth;
> ++ } audio;
> ++ } format;
> ++ gboolean discont;
> ++ gboolean clear_ts;
> ++
> ++ /* for tracking DTS/PTS */
> ++ gboolean has_b_frames;
> ++ gboolean reordered_in;
> ++ GstClockTime last_in;
> ++ GstClockTime last_diff;
> ++ guint last_frames;
> ++ gboolean reordered_out;
> ++ GstClockTime last_out;
> ++ GstClockTime next_out;
> ++
> ++ /* parsing */
> ++ gboolean turnoff_parser; /* used for turning off aac raw parsing
> ++ * See bug #566250 */
> ++ AVCodecParserContext *pctx;
> ++ GstBuffer *pcache;
> ++ guint8 *padded;
> ++ guint padded_size;
> ++
> ++ GValue *par; /* pixel aspect ratio of incoming data */
> ++ gboolean current_dr; /* if direct rendering is enabled */
> ++ gboolean extra_ref; /* keep extra ref around in get/release */
> ++
> ++ /* some properties */
> ++ enum AVDiscard skip_frame;
> ++ gint lowres;
> ++ gboolean direct_rendering;
> ++ gboolean do_padding;
> ++ gboolean debug_mv;
> ++ gboolean crop;
> ++ int max_threads;
> ++
> ++ /* QoS stuff *//* with LOCK */
> ++ gdouble proportion;
> ++ GstClockTime earliest_time;
> ++ gint64 processed;
> ++ gint64 dropped;
> ++
> ++ /* clipping segment */
> ++ GstSegment segment;
> ++
> ++ gboolean is_realvideo;
> ++
> ++ GstTSInfo ts_info[MAX_TS_MASK + 1];
> ++ gint ts_idx;
> ++
> ++ /* reverse playback queue */
> ++ GList *queued;
> ++
> ++ /* Can downstream allocate 16bytes aligned data. */
> ++ gboolean can_allocate_aligned;
> ++};
> ++
> ++typedef struct _GstFFMpegDecClass GstFFMpegDecClass;
> ++
> ++struct _GstFFMpegDecClass
> ++{
> ++ GstElementClass parent_class;
> ++
> ++ AVCodec *in_plugin;
> ++ GstPadTemplate *srctempl, *sinktempl;
> ++};
> ++
> ++#define GST_TS_INFO_NONE &ts_info_none
> ++static const GstTSInfo ts_info_none = { -1, -1, -1, -1 };
> ++
> ++static const GstTSInfo *
> ++gst_ts_info_store (GstFFMpegDec * dec, GstClockTime timestamp,
> ++ GstClockTime duration, gint64 offset)
> ++{
> ++ gint idx = dec->ts_idx;
> ++ dec->ts_info[idx].idx = idx;
> ++ dec->ts_info[idx].timestamp = timestamp;
> ++ dec->ts_info[idx].duration = duration;
> ++ dec->ts_info[idx].offset = offset;
> ++ dec->ts_idx = (idx + 1) & MAX_TS_MASK;
> ++
> ++ return &dec->ts_info[idx];
> ++}
> ++
> ++static const GstTSInfo *
> ++gst_ts_info_get (GstFFMpegDec * dec, gint idx)
> ++{
> ++ if (G_UNLIKELY (idx < 0 || idx > MAX_TS_MASK))
> ++ return GST_TS_INFO_NONE;
> ++
> ++ return &dec->ts_info[idx];
> ++}
> ++
> ++#define GST_TYPE_FFMPEGDEC \
> ++ (gst_ffmpegdec_get_type())
> ++#define GST_FFMPEGDEC(obj) \
> ++ (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_FFMPEGDEC,GstFFMpegDec))
> ++#define GST_FFMPEGDEC_CLASS(klass) \
> ++ (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_FFMPEGDEC,GstFFMpegDecClass))
> ++#define GST_IS_FFMPEGDEC(obj) \
> ++ (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_FFMPEGDEC))
> ++#define GST_IS_FFMPEGDEC_CLASS(klass) \
> ++ (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_FFMPEGDEC))
> ++
> ++#define DEFAULT_LOWRES 0
> ++#define DEFAULT_SKIPFRAME 0
> ++#define DEFAULT_DIRECT_RENDERING TRUE
> ++#define DEFAULT_DO_PADDING TRUE
> ++#define DEFAULT_DEBUG_MV FALSE
> ++#define DEFAULT_CROP TRUE
> ++#define DEFAULT_MAX_THREADS 1
> ++
> ++enum
> ++{
> ++ PROP_0,
> ++ PROP_LOWRES,
> ++ PROP_SKIPFRAME,
> ++ PROP_DIRECT_RENDERING,
> ++ PROP_DO_PADDING,
> ++ PROP_DEBUG_MV,
> ++ PROP_CROP,
> ++ PROP_MAX_THREADS,
> ++ PROP_LAST
> ++};
> ++
> ++/* A number of function prototypes are given so we can refer to them later. */
> ++static void gst_ffmpegdec_base_init (GstFFMpegDecClass * klass);
> ++static void gst_ffmpegdec_class_init (GstFFMpegDecClass * klass);
> ++static void gst_ffmpegdec_init (GstFFMpegDec * ffmpegdec);
> ++static void gst_ffmpegdec_finalize (GObject * object);
> ++
> ++static gboolean gst_ffmpegdec_query (GstPad * pad, GstQuery * query);
> ++static gboolean gst_ffmpegdec_src_event (GstPad * pad, GstEvent * event);
> ++
> ++static gboolean gst_ffmpegdec_setcaps (GstPad * pad, GstCaps * caps);
> ++static gboolean gst_ffmpegdec_sink_event (GstPad * pad, GstEvent * event);
> ++static GstFlowReturn gst_ffmpegdec_chain (GstPad * pad, GstBuffer * buf);
> ++
> ++static GstStateChangeReturn gst_ffmpegdec_change_state (GstElement * element,
> ++ GstStateChange transition);
> ++
> ++static void gst_ffmpegdec_set_property (GObject * object,
> ++ guint prop_id, const GValue * value, GParamSpec * pspec);
> ++static void gst_ffmpegdec_get_property (GObject * object,
> ++ guint prop_id, GValue * value, GParamSpec * pspec);
> ++
> ++static gboolean gst_ffmpegdec_negotiate (GstFFMpegDec * ffmpegdec,
> ++ gboolean force);
> ++
> ++/* some sort of bufferpool handling, but different */
> ++static int gst_ffmpegdec_get_buffer (AVCodecContext * context,
> ++ AVFrame * picture);
> ++static void gst_ffmpegdec_release_buffer (AVCodecContext * context,
> ++ AVFrame * picture);
> ++
> ++static void gst_ffmpegdec_drain (GstFFMpegDec * ffmpegdec);
> ++
> ++#define GST_FFDEC_PARAMS_QDATA g_quark_from_static_string("ffdec-params")
> ++
> ++static GstElementClass *parent_class = NULL;
> ++
> ++#define GST_FFMPEGDEC_TYPE_LOWRES (gst_ffmpegdec_lowres_get_type())
> ++static GType
> ++gst_ffmpegdec_lowres_get_type (void)
> ++{
> ++ static GType ffmpegdec_lowres_type = 0;
> ++
> ++ if (!ffmpegdec_lowres_type) {
> ++ static const GEnumValue ffmpegdec_lowres[] = {
> ++ {0, "0", "full"},
> ++ {1, "1", "1/2-size"},
> ++ {2, "2", "1/4-size"},
> ++ {0, NULL, NULL},
> ++ };
> ++
> ++ ffmpegdec_lowres_type =
> ++ g_enum_register_static ("GstFFMpegDecLowres", ffmpegdec_lowres);
> ++ }
> ++
> ++ return ffmpegdec_lowres_type;
> ++}
> ++
> ++#define GST_FFMPEGDEC_TYPE_SKIPFRAME (gst_ffmpegdec_skipframe_get_type())
> ++static GType
> ++gst_ffmpegdec_skipframe_get_type (void)
> ++{
> ++ static GType ffmpegdec_skipframe_type = 0;
> ++
> ++ if (!ffmpegdec_skipframe_type) {
> ++ static const GEnumValue ffmpegdec_skipframe[] = {
> ++ {0, "0", "Skip nothing"},
> ++ {1, "1", "Skip B-frames"},
> ++ {2, "2", "Skip IDCT/Dequantization"},
> ++ {5, "5", "Skip everything"},
> ++ {0, NULL, NULL},
> ++ };
> ++
> ++ ffmpegdec_skipframe_type =
> ++ g_enum_register_static ("GstFFMpegDecSkipFrame", ffmpegdec_skipframe);
> ++ }
> ++
> ++ return ffmpegdec_skipframe_type;
> ++}
> ++
> ++static void
> ++gst_ffmpegdec_base_init (GstFFMpegDecClass * klass)
> ++{
> ++ GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
> ++ GstPadTemplate *sinktempl, *srctempl;
> ++ GstCaps *sinkcaps, *srccaps;
> ++ AVCodec *in_plugin;
> ++ gchar *longname, *classification, *description;
> ++
> ++ in_plugin =
> ++ (AVCodec *) g_type_get_qdata (G_OBJECT_CLASS_TYPE (klass),
> ++ GST_FFDEC_PARAMS_QDATA);
> ++ g_assert (in_plugin != NULL);
> ++
> ++ /* construct the element details struct */
> ++ longname = g_strdup_printf ("FFmpeg %s decoder", in_plugin->long_name);
> ++ classification = g_strdup_printf ("Codec/Decoder/%s",
> ++ (in_plugin->type == AVMEDIA_TYPE_VIDEO) ? "Video" : "Audio");
> ++ description = g_strdup_printf ("FFmpeg %s decoder", in_plugin->name);
> ++ gst_element_class_set_details_simple (element_class, longname, classification,
> ++ description,
> ++ "Wim Taymans <wim.taymans at gmail.com>, "
> ++ "Ronald Bultje <rbultje at ronald.bitfreak.net>, "
> ++ "Edward Hervey <bilboed at bilboed.com>");
> ++ g_free (longname);
> ++ g_free (classification);
> ++ g_free (description);
> ++
> ++ /* get the caps */
> ++ sinkcaps = gst_ffmpeg_codecid_to_caps (in_plugin->id, NULL, FALSE);
> ++ if (!sinkcaps) {
> ++ GST_DEBUG ("Couldn't get sink caps for decoder '%s'", in_plugin->name);
> ++ sinkcaps = gst_caps_from_string ("unknown/unknown");
> ++ }
> ++ if (in_plugin->type == AVMEDIA_TYPE_VIDEO) {
> ++ srccaps = gst_caps_from_string ("video/x-raw-rgb; video/x-raw-yuv");
> ++ } else {
> ++ srccaps = gst_ffmpeg_codectype_to_audio_caps (NULL,
> ++ in_plugin->id, FALSE, in_plugin);
> ++ }
> ++ if (!srccaps) {
> ++ GST_DEBUG ("Couldn't get source caps for decoder '%s'", in_plugin->name);
> ++ srccaps = gst_caps_from_string ("unknown/unknown");
> ++ }
> ++
> ++ /* pad templates */
> ++ sinktempl = gst_pad_template_new ("sink", GST_PAD_SINK,
> ++ GST_PAD_ALWAYS, sinkcaps);
> ++ srctempl = gst_pad_template_new ("src", GST_PAD_SRC, GST_PAD_ALWAYS, srccaps);
> ++
> ++ gst_element_class_add_pad_template (element_class, srctempl);
> ++ gst_element_class_add_pad_template (element_class, sinktempl);
> ++
> ++ klass->in_plugin = in_plugin;
> ++ klass->srctempl = srctempl;
> ++ klass->sinktempl = sinktempl;
> ++}
> ++
> ++static void
> ++gst_ffmpegdec_class_init (GstFFMpegDecClass * klass)
> ++{
> ++ GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
> ++ GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
> ++
> ++ parent_class = g_type_class_peek_parent (klass);
> ++
> ++ gobject_class->finalize = gst_ffmpegdec_finalize;
> ++
> ++ gobject_class->set_property = gst_ffmpegdec_set_property;
> ++ gobject_class->get_property = gst_ffmpegdec_get_property;
> ++
> ++ if (klass->in_plugin->type == AVMEDIA_TYPE_VIDEO) {
> ++ int caps;
> ++
> ++ g_object_class_install_property (gobject_class, PROP_SKIPFRAME,
> ++ g_param_spec_enum ("skip-frame", "Skip frames",
> ++ "Which types of frames to skip during decoding",
> ++ GST_FFMPEGDEC_TYPE_SKIPFRAME, 0,
> ++ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
> ++ g_object_class_install_property (gobject_class, PROP_LOWRES,
> ++ g_param_spec_enum ("lowres", "Low resolution",
> ++ "At which resolution to decode images", GST_FFMPEGDEC_TYPE_LOWRES,
> ++ 0, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
> ++ g_object_class_install_property (gobject_class, PROP_DIRECT_RENDERING,
> ++ g_param_spec_boolean ("direct-rendering", "Direct Rendering",
> ++ "Enable direct rendering", DEFAULT_DIRECT_RENDERING,
> ++ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
> ++ g_object_class_install_property (gobject_class, PROP_DO_PADDING,
> ++ g_param_spec_boolean ("do-padding", "Do Padding",
> ++ "Add 0 padding before decoding data", DEFAULT_DO_PADDING,
> ++ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
> ++ g_object_class_install_property (gobject_class, PROP_DEBUG_MV,
> ++ g_param_spec_boolean ("debug-mv", "Debug motion vectors",
> ++ "Whether ffmpeg should print motion vectors on top of the image",
> ++ DEFAULT_DEBUG_MV, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
> ++#if 0
> ++ g_object_class_install_property (gobject_class, PROP_CROP,
> ++ g_param_spec_boolean ("crop", "Crop",
> ++ "Crop images to the display region",
> ++ DEFAULT_CROP, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
> ++#endif
> ++
> ++ caps = klass->in_plugin->capabilities;
> ++ if (caps & (CODEC_CAP_FRAME_THREADS | CODEC_CAP_SLICE_THREADS)) {
> ++ g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_MAX_THREADS,
> ++ g_param_spec_int ("max-threads", "Maximum decode threads",
> ++ "Maximum number of worker threads to spawn. (0 = auto)",
> ++ 0, G_MAXINT, DEFAULT_MAX_THREADS,
> ++ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
> ++ }
> ++ }
> ++
> ++ gstelement_class->change_state = gst_ffmpegdec_change_state;
> ++}
> ++
> ++static void
> ++gst_ffmpegdec_init (GstFFMpegDec * ffmpegdec)
> ++{
> ++ GstFFMpegDecClass *oclass;
> ++
> ++ oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
> ++
> ++ /* setup pads */
> ++ ffmpegdec->sinkpad = gst_pad_new_from_template (oclass->sinktempl, "sink");
> ++ gst_pad_set_setcaps_function (ffmpegdec->sinkpad,
> ++ GST_DEBUG_FUNCPTR (gst_ffmpegdec_setcaps));
> ++ gst_pad_set_event_function (ffmpegdec->sinkpad,
> ++ GST_DEBUG_FUNCPTR (gst_ffmpegdec_sink_event));
> ++ gst_pad_set_chain_function (ffmpegdec->sinkpad,
> ++ GST_DEBUG_FUNCPTR (gst_ffmpegdec_chain));
> ++ gst_element_add_pad (GST_ELEMENT (ffmpegdec), ffmpegdec->sinkpad);
> ++
> ++ ffmpegdec->srcpad = gst_pad_new_from_template (oclass->srctempl, "src");
> ++ gst_pad_use_fixed_caps (ffmpegdec->srcpad);
> ++ gst_pad_set_event_function (ffmpegdec->srcpad,
> ++ GST_DEBUG_FUNCPTR (gst_ffmpegdec_src_event));
> ++ gst_pad_set_query_function (ffmpegdec->srcpad,
> ++ GST_DEBUG_FUNCPTR (gst_ffmpegdec_query));
> ++ gst_element_add_pad (GST_ELEMENT (ffmpegdec), ffmpegdec->srcpad);
> ++
> ++ /* some ffmpeg data */
> ++ ffmpegdec->context = avcodec_alloc_context ();
> ++ ffmpegdec->picture = avcodec_alloc_frame ();
> ++ ffmpegdec->pctx = NULL;
> ++ ffmpegdec->pcache = NULL;
> ++ ffmpegdec->par = NULL;
> ++ ffmpegdec->opened = FALSE;
> ++ ffmpegdec->skip_frame = ffmpegdec->lowres = 0;
> ++ ffmpegdec->direct_rendering = DEFAULT_DIRECT_RENDERING;
> ++ ffmpegdec->do_padding = DEFAULT_DO_PADDING;
> ++ ffmpegdec->debug_mv = DEFAULT_DEBUG_MV;
> ++ ffmpegdec->crop = DEFAULT_CROP;
> ++ ffmpegdec->max_threads = DEFAULT_MAX_THREADS;
> ++
> ++ ffmpegdec->format.video.par_n = -1;
> ++ ffmpegdec->format.video.fps_n = -1;
> ++ ffmpegdec->format.video.old_fps_n = -1;
> ++ gst_segment_init (&ffmpegdec->segment, GST_FORMAT_TIME);
> ++
> ++ /* We initially assume downstream can allocate 16 bytes aligned buffers */
> ++ ffmpegdec->can_allocate_aligned = TRUE;
> ++}
> ++
> ++static void
> ++gst_ffmpegdec_finalize (GObject * object)
> ++{
> ++ GstFFMpegDec *ffmpegdec = (GstFFMpegDec *) object;
> ++
> ++ if (ffmpegdec->context != NULL) {
> ++ av_free (ffmpegdec->context);
> ++ ffmpegdec->context = NULL;
> ++ }
> ++
> ++ if (ffmpegdec->picture != NULL) {
> ++ av_free (ffmpegdec->picture);
> ++ ffmpegdec->picture = NULL;
> ++ }
> ++
> ++ G_OBJECT_CLASS (parent_class)->finalize (object);
> ++}
> ++
> ++static gboolean
> ++gst_ffmpegdec_query (GstPad * pad, GstQuery * query)
> ++{
> ++ GstFFMpegDec *ffmpegdec;
> ++ GstPad *peer;
> ++ gboolean res;
> ++
> ++ ffmpegdec = (GstFFMpegDec *) gst_pad_get_parent (pad);
> ++
> ++ res = FALSE;
> ++
> ++ if ((peer = gst_pad_get_peer (ffmpegdec->sinkpad))) {
> ++ /* just forward to peer */
> ++ res = gst_pad_query (peer, query);
> ++ gst_object_unref (peer);
> ++ }
> ++#if 0
> ++ {
> ++ GstFormat bfmt;
> ++
> ++ bfmt = GST_FORMAT_BYTES;
> ++
> ++ /* ok, do bitrate calc... */
> ++ if ((type != GST_QUERY_POSITION && type != GST_QUERY_TOTAL) ||
> ++ *fmt != GST_FORMAT_TIME || ffmpegdec->context->bit_rate == 0 ||
> ++ !gst_pad_query (peer, type, &bfmt, value))
> ++ return FALSE;
> ++
> ++ if (ffmpegdec->pcache && type == GST_QUERY_POSITION)
> ++ *value -= GST_BUFFER_SIZE (ffmpegdec->pcache);
> ++ *value *= GST_SECOND / ffmpegdec->context->bit_rate;
> ++ }
> ++#endif
> ++
> ++ gst_object_unref (ffmpegdec);
> ++
> ++ return res;
> ++}
> ++
> ++static void
> ++gst_ffmpegdec_reset_ts (GstFFMpegDec * ffmpegdec)
> ++{
> ++ ffmpegdec->last_in = GST_CLOCK_TIME_NONE;
> ++ ffmpegdec->last_diff = GST_CLOCK_TIME_NONE;
> ++ ffmpegdec->last_frames = 0;
> ++ ffmpegdec->last_out = GST_CLOCK_TIME_NONE;
> ++ ffmpegdec->next_out = GST_CLOCK_TIME_NONE;
> ++ ffmpegdec->reordered_in = FALSE;
> ++ ffmpegdec->reordered_out = FALSE;
> ++}
> ++
> ++static void
> ++gst_ffmpegdec_update_qos (GstFFMpegDec * ffmpegdec, gdouble proportion,
> ++ GstClockTime timestamp)
> ++{
> ++ GST_LOG_OBJECT (ffmpegdec, "update QOS: %f, %" GST_TIME_FORMAT,
> ++ proportion, GST_TIME_ARGS (timestamp));
> ++
> ++ GST_OBJECT_LOCK (ffmpegdec);
> ++ ffmpegdec->proportion = proportion;
> ++ ffmpegdec->earliest_time = timestamp;
> ++ GST_OBJECT_UNLOCK (ffmpegdec);
> ++}
> ++
> ++static void
> ++gst_ffmpegdec_reset_qos (GstFFMpegDec * ffmpegdec)
> ++{
> ++ gst_ffmpegdec_update_qos (ffmpegdec, 0.5, GST_CLOCK_TIME_NONE);
> ++ ffmpegdec->processed = 0;
> ++ ffmpegdec->dropped = 0;
> ++}
> ++
> ++static void
> ++gst_ffmpegdec_read_qos (GstFFMpegDec * ffmpegdec, gdouble * proportion,
> ++ GstClockTime * timestamp)
> ++{
> ++ GST_OBJECT_LOCK (ffmpegdec);
> ++ *proportion = ffmpegdec->proportion;
> ++ *timestamp = ffmpegdec->earliest_time;
> ++ GST_OBJECT_UNLOCK (ffmpegdec);
> ++}
> ++
> ++static gboolean
> ++gst_ffmpegdec_src_event (GstPad * pad, GstEvent * event)
> ++{
> ++ GstFFMpegDec *ffmpegdec;
> ++ gboolean res;
> ++
> ++ ffmpegdec = (GstFFMpegDec *) gst_pad_get_parent (pad);
> ++
> ++ switch (GST_EVENT_TYPE (event)) {
> ++ case GST_EVENT_QOS:
> ++ {
> ++ gdouble proportion;
> ++ GstClockTimeDiff diff;
> ++ GstClockTime timestamp;
> ++
> ++ gst_event_parse_qos (event, &proportion, &diff, ×tamp);
> ++
> ++ /* update our QoS values */
> ++ gst_ffmpegdec_update_qos (ffmpegdec, proportion, timestamp + diff);
> ++
> ++ /* forward upstream */
> ++ res = gst_pad_push_event (ffmpegdec->sinkpad, event);
> ++ break;
> ++ }
> ++ default:
> ++ /* forward upstream */
> ++ res = gst_pad_push_event (ffmpegdec->sinkpad, event);
> ++ break;
> ++ }
> ++
> ++ gst_object_unref (ffmpegdec);
> ++
> ++ return res;
> ++}
> ++
> ++/* with LOCK */
> ++static void
> ++gst_ffmpegdec_close (GstFFMpegDec * ffmpegdec)
> ++{
> ++ if (!ffmpegdec->opened)
> ++ return;
> ++
> ++ GST_LOG_OBJECT (ffmpegdec, "closing ffmpeg codec");
> ++
> ++ if (ffmpegdec->par) {
> ++ g_free (ffmpegdec->par);
> ++ ffmpegdec->par = NULL;
> ++ }
> ++
> ++ if (ffmpegdec->context->priv_data)
> ++ gst_ffmpeg_avcodec_close (ffmpegdec->context);
> ++ ffmpegdec->opened = FALSE;
> ++
> ++ if (ffmpegdec->context->extradata) {
> ++ av_free (ffmpegdec->context->extradata);
> ++ ffmpegdec->context->extradata = NULL;
> ++ }
> ++
> ++ if (ffmpegdec->pctx) {
> ++ if (ffmpegdec->pcache) {
> ++ gst_buffer_unref (ffmpegdec->pcache);
> ++ ffmpegdec->pcache = NULL;
> ++ }
> ++ av_parser_close (ffmpegdec->pctx);
> ++ ffmpegdec->pctx = NULL;
> ++ }
> ++
> ++ ffmpegdec->format.video.par_n = -1;
> ++ ffmpegdec->format.video.fps_n = -1;
> ++ ffmpegdec->format.video.old_fps_n = -1;
> ++ ffmpegdec->format.video.interlaced = FALSE;
> ++}
> ++
> ++/* with LOCK */
> ++static gboolean
> ++gst_ffmpegdec_open (GstFFMpegDec * ffmpegdec)
> ++{
> ++ GstFFMpegDecClass *oclass;
> ++
> ++ oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
> ++
> ++ if (gst_ffmpeg_avcodec_open (ffmpegdec->context, oclass->in_plugin) < 0)
> ++ goto could_not_open;
> ++
> ++ ffmpegdec->opened = TRUE;
> ++ ffmpegdec->is_realvideo = FALSE;
> ++
> ++ GST_LOG_OBJECT (ffmpegdec, "Opened ffmpeg codec %s, id %d",
> ++ oclass->in_plugin->name, oclass->in_plugin->id);
> ++
> ++ /* open a parser if we can */
> ++ switch (oclass->in_plugin->id) {
> ++ case CODEC_ID_MPEG4:
> ++ case CODEC_ID_MJPEG:
> ++ case CODEC_ID_VC1:
> ++ GST_LOG_OBJECT (ffmpegdec, "not using parser, blacklisted codec");
> ++ ffmpegdec->pctx = NULL;
> ++ break;
> ++ case CODEC_ID_H264:
> ++ /* For H264, only use a parser if there is no context data, if there is,
> ++ * we're talking AVC */
> ++ if (ffmpegdec->context->extradata_size == 0) {
> ++ GST_LOG_OBJECT (ffmpegdec, "H264 with no extradata, creating parser");
> ++ ffmpegdec->pctx = av_parser_init (oclass->in_plugin->id);
> ++ } else {
> ++ GST_LOG_OBJECT (ffmpegdec,
> ++ "H264 with extradata implies framed data - not using parser");
> ++ ffmpegdec->pctx = NULL;
> ++ }
> ++ break;
> ++ case CODEC_ID_RV10:
> ++ case CODEC_ID_RV30:
> ++ case CODEC_ID_RV20:
> ++ case CODEC_ID_RV40:
> ++ ffmpegdec->is_realvideo = TRUE;
> ++ break;
> ++ default:
> ++ if (!ffmpegdec->turnoff_parser) {
> ++ ffmpegdec->pctx = av_parser_init (oclass->in_plugin->id);
> ++ if (ffmpegdec->pctx)
> ++ GST_LOG_OBJECT (ffmpegdec, "Using parser %p", ffmpegdec->pctx);
> ++ else
> ++ GST_LOG_OBJECT (ffmpegdec, "No parser for codec");
> ++ } else {
> ++ GST_LOG_OBJECT (ffmpegdec, "Parser deactivated for format");
> ++ }
> ++ break;
> ++ }
> ++
> ++ switch (oclass->in_plugin->type) {
> ++ case AVMEDIA_TYPE_VIDEO:
> ++ ffmpegdec->format.video.width = 0;
> ++ ffmpegdec->format.video.height = 0;
> ++ ffmpegdec->format.video.clip_width = -1;
> ++ ffmpegdec->format.video.clip_height = -1;
> ++ ffmpegdec->format.video.pix_fmt = PIX_FMT_NB;
> ++ ffmpegdec->format.video.interlaced = FALSE;
> ++ break;
> ++ case AVMEDIA_TYPE_AUDIO:
> ++ ffmpegdec->format.audio.samplerate = 0;
> ++ ffmpegdec->format.audio.channels = 0;
> ++ ffmpegdec->format.audio.depth = 0;
> ++ break;
> ++ default:
> ++ break;
> ++ }
> ++
> ++ gst_ffmpegdec_reset_ts (ffmpegdec);
> ++ /* FIXME, reset_qos holds the LOCK */
> ++ ffmpegdec->proportion = 0.0;
> ++ ffmpegdec->earliest_time = -1;
> ++
> ++ return TRUE;
> ++
> ++ /* ERRORS */
> ++could_not_open:
> ++ {
> ++ gst_ffmpegdec_close (ffmpegdec);
> ++ GST_DEBUG_OBJECT (ffmpegdec, "ffdec_%s: Failed to open FFMPEG codec",
> ++ oclass->in_plugin->name);
> ++ return FALSE;
> ++ }
> ++}
> ++
> ++static gboolean
> ++gst_ffmpegdec_setcaps (GstPad * pad, GstCaps * caps)
> ++{
> ++ GstFFMpegDec *ffmpegdec;
> ++ GstFFMpegDecClass *oclass;
> ++ GstStructure *structure;
> ++ const GValue *par;
> ++ const GValue *fps;
> ++ gboolean ret = TRUE;
> ++
> ++ ffmpegdec = (GstFFMpegDec *) (gst_pad_get_parent (pad));
> ++ oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
> ++
> ++ GST_DEBUG_OBJECT (pad, "setcaps called");
> ++
> ++ GST_OBJECT_LOCK (ffmpegdec);
> ++
> ++ /* stupid check for VC1 */
> ++ if ((oclass->in_plugin->id == CODEC_ID_WMV3) ||
> ++ (oclass->in_plugin->id == CODEC_ID_VC1))
> ++ oclass->in_plugin->id = gst_ffmpeg_caps_to_codecid (caps, NULL);
> ++
> ++ /* close old session */
> ++ if (ffmpegdec->opened) {
> ++ GST_OBJECT_UNLOCK (ffmpegdec);
> ++ gst_ffmpegdec_drain (ffmpegdec);
> ++ GST_OBJECT_LOCK (ffmpegdec);
> ++ gst_ffmpegdec_close (ffmpegdec);
> ++
> ++ /* and reset the defaults that were set when a context is created */
> ++ avcodec_get_context_defaults (ffmpegdec->context);
> ++ }
> ++
> ++ /* set buffer functions */
> ++ ffmpegdec->context->get_buffer = gst_ffmpegdec_get_buffer;
> ++ ffmpegdec->context->release_buffer = gst_ffmpegdec_release_buffer;
> ++ ffmpegdec->context->draw_horiz_band = NULL;
> ++
> ++ /* default is to let format decide if it needs a parser */
> ++ ffmpegdec->turnoff_parser = FALSE;
> ++
> ++ ffmpegdec->has_b_frames = FALSE;
> ++
> ++ GST_LOG_OBJECT (ffmpegdec, "size %dx%d", ffmpegdec->context->width,
> ++ ffmpegdec->context->height);
> ++
> ++ /* get size and so */
> ++ gst_ffmpeg_caps_with_codecid (oclass->in_plugin->id,
> ++ oclass->in_plugin->type, caps, ffmpegdec->context);
> ++
> ++ GST_LOG_OBJECT (ffmpegdec, "size after %dx%d", ffmpegdec->context->width,
> ++ ffmpegdec->context->height);
> ++
> ++ if (!ffmpegdec->context->time_base.den || !ffmpegdec->context->time_base.num) {
> ++ GST_DEBUG_OBJECT (ffmpegdec, "forcing 25/1 framerate");
> ++ ffmpegdec->context->time_base.num = 1;
> ++ ffmpegdec->context->time_base.den = 25;
> ++ }
> ++
> ++ /* get pixel aspect ratio if it's set */
> ++ structure = gst_caps_get_structure (caps, 0);
> ++
> ++ par = gst_structure_get_value (structure, "pixel-aspect-ratio");
> ++ if (par) {
> ++ GST_DEBUG_OBJECT (ffmpegdec, "sink caps have pixel-aspect-ratio of %d:%d",
> ++ gst_value_get_fraction_numerator (par),
> ++ gst_value_get_fraction_denominator (par));
> ++ /* should be NULL */
> ++ if (ffmpegdec->par)
> ++ g_free (ffmpegdec->par);
> ++ ffmpegdec->par = g_new0 (GValue, 1);
> ++ gst_value_init_and_copy (ffmpegdec->par, par);
> ++ }
> ++
> ++ /* get the framerate from incoming caps. fps_n is set to -1 when
> ++ * there is no valid framerate */
> ++ fps = gst_structure_get_value (structure, "framerate");
> ++ if (fps != NULL && GST_VALUE_HOLDS_FRACTION (fps)) {
> ++ ffmpegdec->format.video.fps_n = gst_value_get_fraction_numerator (fps);
> ++ ffmpegdec->format.video.fps_d = gst_value_get_fraction_denominator (fps);
> ++ GST_DEBUG_OBJECT (ffmpegdec, "Using framerate %d/%d from incoming caps",
> ++ ffmpegdec->format.video.fps_n, ffmpegdec->format.video.fps_d);
> ++ } else {
> ++ ffmpegdec->format.video.fps_n = -1;
> ++ GST_DEBUG_OBJECT (ffmpegdec, "Using framerate from codec");
> ++ }
> ++
> ++ /* figure out if we can use direct rendering */
> ++ ffmpegdec->current_dr = FALSE;
> ++ ffmpegdec->extra_ref = FALSE;
> ++ if (ffmpegdec->direct_rendering) {
> ++ GST_DEBUG_OBJECT (ffmpegdec, "trying to enable direct rendering");
> ++ if (oclass->in_plugin->capabilities & CODEC_CAP_DR1) {
> ++ if (oclass->in_plugin->id == CODEC_ID_H264) {
> ++ GST_DEBUG_OBJECT (ffmpegdec, "disable direct rendering setup for H264");
> ++ /* does not work, many stuff reads outside of the planes */
> ++ ffmpegdec->current_dr = FALSE;
> ++ ffmpegdec->extra_ref = TRUE;
> ++ } else if ((oclass->in_plugin->id == CODEC_ID_SVQ1) ||
> ++ (oclass->in_plugin->id == CODEC_ID_VP5) ||
> ++ (oclass->in_plugin->id == CODEC_ID_VP6) ||
> ++ (oclass->in_plugin->id == CODEC_ID_VP6F) ||
> ++ (oclass->in_plugin->id == CODEC_ID_VP6A)) {
> ++ GST_DEBUG_OBJECT (ffmpegdec,
> ++ "disable direct rendering setup for broken stride support");
> ++ /* does not work, uses a incompatible stride. See #610613 */
> ++ ffmpegdec->current_dr = FALSE;
> ++ ffmpegdec->extra_ref = TRUE;
> ++ } else {
> ++ GST_DEBUG_OBJECT (ffmpegdec, "enabled direct rendering");
> ++ ffmpegdec->current_dr = TRUE;
> ++ }
> ++ } else {
> ++ GST_DEBUG_OBJECT (ffmpegdec, "direct rendering not supported");
> ++ }
> ++ }
> ++ if (ffmpegdec->current_dr) {
> ++ /* do *not* draw edges when in direct rendering, for some reason it draws
> ++ * outside of the memory. */
> ++ ffmpegdec->context->flags |= CODEC_FLAG_EMU_EDGE;
> ++ }
> ++
> ++ /* for AAC we only use av_parse if not on stream-format==raw or ==loas */
> ++ if (oclass->in_plugin->id == CODEC_ID_AAC
> ++ || oclass->in_plugin->id == CODEC_ID_AAC_LATM) {
> ++ const gchar *format = gst_structure_get_string (structure, "stream-format");
> ++
> ++ if (format == NULL || strcmp (format, "raw") == 0) {
> ++ ffmpegdec->turnoff_parser = TRUE;
> ++ }
> ++ }
> ++
> ++ /* for FLAC, don't parse if it's already parsed */
> ++ if (oclass->in_plugin->id == CODEC_ID_FLAC) {
> ++ if (gst_structure_has_field (structure, "streamheader"))
> ++ ffmpegdec->turnoff_parser = TRUE;
> ++ }
> ++
> ++ /* workaround encoder bugs */
> ++ ffmpegdec->context->workaround_bugs |= FF_BUG_AUTODETECT;
> ++ ffmpegdec->context->err_recognition = 1;
> ++
> ++ /* for slow cpus */
> ++ ffmpegdec->context->lowres = ffmpegdec->lowres;
> ++ ffmpegdec->context->skip_frame = ffmpegdec->skip_frame;
> ++
> ++ /* ffmpeg can draw motion vectors on top of the image (not every decoder
> ++ * supports it) */
> ++ ffmpegdec->context->debug_mv = ffmpegdec->debug_mv;
> ++
> ++ if (ffmpegdec->max_threads == 0)
> ++ ffmpegdec->context->thread_count = gst_ffmpeg_auto_max_threads ();
> ++ else
> ++ ffmpegdec->context->thread_count = ffmpegdec->max_threads;
> ++
> ++ /* open codec - we don't select an output pix_fmt yet,
> ++ * simply because we don't know! We only get it
> ++ * during playback... */
> ++ if (!gst_ffmpegdec_open (ffmpegdec))
> ++ goto open_failed;
> ++
> ++ /* clipping region */
> ++ gst_structure_get_int (structure, "width",
> ++ &ffmpegdec->format.video.clip_width);
> ++ gst_structure_get_int (structure, "height",
> ++ &ffmpegdec->format.video.clip_height);
> ++
> ++ GST_DEBUG_OBJECT (pad, "clipping to %dx%d",
> ++ ffmpegdec->format.video.clip_width, ffmpegdec->format.video.clip_height);
> ++
> ++ /* take into account the lowres property */
> ++ if (ffmpegdec->format.video.clip_width != -1)
> ++ ffmpegdec->format.video.clip_width >>= ffmpegdec->lowres;
> ++ if (ffmpegdec->format.video.clip_height != -1)
> ++ ffmpegdec->format.video.clip_height >>= ffmpegdec->lowres;
> ++
> ++ GST_DEBUG_OBJECT (pad, "final clipping to %dx%d",
> ++ ffmpegdec->format.video.clip_width, ffmpegdec->format.video.clip_height);
> ++
> ++done:
> ++ GST_OBJECT_UNLOCK (ffmpegdec);
> ++
> ++ gst_object_unref (ffmpegdec);
> ++
> ++ return ret;
> ++
> ++ /* ERRORS */
> ++open_failed:
> ++ {
> ++ GST_DEBUG_OBJECT (ffmpegdec, "Failed to open");
> ++ if (ffmpegdec->par) {
> ++ g_free (ffmpegdec->par);
> ++ ffmpegdec->par = NULL;
> ++ }
> ++ ret = FALSE;
> ++ goto done;
> ++ }
> ++}
> ++
> ++static GstFlowReturn
> ++alloc_output_buffer (GstFFMpegDec * ffmpegdec, GstBuffer ** outbuf,
> ++ gint width, gint height)
> ++{
> ++ GstFlowReturn ret;
> ++ gint fsize;
> ++
> ++ ret = GST_FLOW_ERROR;
> ++ *outbuf = NULL;
> ++
> ++ GST_LOG_OBJECT (ffmpegdec, "alloc output buffer");
> ++
> ++ /* see if we need renegotiation */
> ++ if (G_UNLIKELY (!gst_ffmpegdec_negotiate (ffmpegdec, FALSE)))
> ++ goto negotiate_failed;
> ++
> ++ /* get the size of the gstreamer output buffer given a
> ++ * width/height/format */
> ++ fsize = gst_ffmpeg_avpicture_get_size (ffmpegdec->context->pix_fmt,
> ++ width, height);
> ++
> ++ if (ffmpegdec->can_allocate_aligned) {
> ++ GST_LOG_OBJECT (ffmpegdec, "calling pad_alloc");
> ++ /* no pallete, we can use the buffer size to alloc */
> ++ ret = gst_pad_alloc_buffer_and_set_caps (ffmpegdec->srcpad,
> ++ GST_BUFFER_OFFSET_NONE, fsize,
> ++ GST_PAD_CAPS (ffmpegdec->srcpad), outbuf);
> ++ if (G_UNLIKELY (ret != GST_FLOW_OK))
> ++ goto alloc_failed;
> ++
> ++ /* If buffer isn't 128-bit aligned, create a memaligned one ourselves */
> ++ if (((uintptr_t) GST_BUFFER_DATA (*outbuf)) % 16) {
> ++ GST_DEBUG_OBJECT (ffmpegdec,
> ++ "Downstream can't allocate aligned buffers.");
> ++ ffmpegdec->can_allocate_aligned = FALSE;
> ++ gst_buffer_unref (*outbuf);
> ++ *outbuf = new_aligned_buffer (fsize, GST_PAD_CAPS (ffmpegdec->srcpad));
> ++ }
> ++ } else {
> ++ GST_LOG_OBJECT (ffmpegdec,
> ++ "not calling pad_alloc, we have a pallete or downstream can't give 16 byte aligned buffers.");
> ++ /* for paletted data we can't use pad_alloc_buffer(), because
> ++ * fsize contains the size of the palette, so the overall size
> ++ * is bigger than ffmpegcolorspace's unit size, which will
> ++ * prompt GstBaseTransform to complain endlessly ... */
> ++ *outbuf = new_aligned_buffer (fsize, GST_PAD_CAPS (ffmpegdec->srcpad));
> ++ ret = GST_FLOW_OK;
> ++ }
> ++ /* set caps, we do this here because the buffer is still writable here and we
> ++ * are sure to be negotiated */
> ++ gst_buffer_set_caps (*outbuf, GST_PAD_CAPS (ffmpegdec->srcpad));
> ++
> ++ return ret;
> ++
> ++ /* special cases */
> ++negotiate_failed:
> ++ {
> ++ GST_DEBUG_OBJECT (ffmpegdec, "negotiate failed");
> ++ return GST_FLOW_NOT_NEGOTIATED;
> ++ }
> ++alloc_failed:
> ++ {
> ++ GST_DEBUG_OBJECT (ffmpegdec, "pad_alloc failed %d (%s)", ret,
> ++ gst_flow_get_name (ret));
> ++ return ret;
> ++ }
> ++}
> ++
> ++static int
> ++gst_ffmpegdec_get_buffer (AVCodecContext * context, AVFrame * picture)
> ++{
> ++ GstBuffer *buf = NULL;
> ++ GstFFMpegDec *ffmpegdec;
> ++ gint width, height;
> ++ gint coded_width, coded_height;
> ++ gint res;
> ++
> ++ ffmpegdec = (GstFFMpegDec *) context->opaque;
> ++
> ++ GST_DEBUG_OBJECT (ffmpegdec, "getting buffer");
> ++
> ++ /* apply the last info we have seen to this picture, when we get the
> ++ * picture back from ffmpeg we can use this to correctly timestamp the output
> ++ * buffer */
> ++ picture->reordered_opaque = context->reordered_opaque;
> ++ /* make sure we don't free the buffer when it's not ours */
> ++ picture->opaque = NULL;
> ++
> ++ /* take width and height before clipping */
> ++ width = context->width;
> ++ height = context->height;
> ++ coded_width = context->coded_width;
> ++ coded_height = context->coded_height;
> ++
> ++ GST_LOG_OBJECT (ffmpegdec, "dimension %dx%d, coded %dx%d", width, height,
> ++ coded_width, coded_height);
> ++ if (!ffmpegdec->current_dr) {
> ++ GST_LOG_OBJECT (ffmpegdec, "direct rendering disabled, fallback alloc");
> ++ res = avcodec_default_get_buffer (context, picture);
> ++
> ++ GST_LOG_OBJECT (ffmpegdec, "linsize %d %d %d", picture->linesize[0],
> ++ picture->linesize[1], picture->linesize[2]);
> ++ GST_LOG_OBJECT (ffmpegdec, "data %u %u %u", 0,
> ++ (guint) (picture->data[1] - picture->data[0]),
> ++ (guint) (picture->data[2] - picture->data[0]));
> ++ return res;
> ++ }
> ++
> ++ switch (context->codec_type) {
> ++ case AVMEDIA_TYPE_VIDEO:
> ++ /* some ffmpeg video plugins don't see the point in setting codec_type ... */
> ++ case AVMEDIA_TYPE_UNKNOWN:
> ++ {
> ++ GstFlowReturn ret;
> ++ gint clip_width, clip_height;
> ++
> ++ /* take final clipped output size */
> ++ if ((clip_width = ffmpegdec->format.video.clip_width) == -1)
> ++ clip_width = width;
> ++ if ((clip_height = ffmpegdec->format.video.clip_height) == -1)
> ++ clip_height = height;
> ++
> ++ GST_LOG_OBJECT (ffmpegdec, "raw outsize %d/%d", width, height);
> ++
> ++ /* this is the size ffmpeg needs for the buffer */
> ++ avcodec_align_dimensions (context, &width, &height);
> ++
> ++ GST_LOG_OBJECT (ffmpegdec, "aligned outsize %d/%d, clip %d/%d",
> ++ width, height, clip_width, clip_height);
> ++
> ++ if (width != clip_width || height != clip_height) {
> ++ /* We can't alloc if we need to clip the output buffer later */
> ++ GST_LOG_OBJECT (ffmpegdec, "we need clipping, fallback alloc");
> ++ return avcodec_default_get_buffer (context, picture);
> ++ }
> ++
> ++ /* alloc with aligned dimensions for ffmpeg */
> ++ ret = alloc_output_buffer (ffmpegdec, &buf, width, height);
> ++ if (G_UNLIKELY (ret != GST_FLOW_OK)) {
> ++ /* alloc default buffer when we can't get one from downstream */
> ++ GST_LOG_OBJECT (ffmpegdec, "alloc failed, fallback alloc");
> ++ return avcodec_default_get_buffer (context, picture);
> ++ }
> ++
> ++ /* copy the right pointers and strides in the picture object */
> ++ gst_ffmpeg_avpicture_fill ((AVPicture *) picture,
> ++ GST_BUFFER_DATA (buf), context->pix_fmt, width, height);
> ++ break;
> ++ }
> ++ case AVMEDIA_TYPE_AUDIO:
> ++ default:
> ++ GST_ERROR_OBJECT (ffmpegdec,
> ++ "_get_buffer() should never get called for non-video buffers !");
> ++ g_assert_not_reached ();
> ++ break;
> ++ }
> ++
> ++ /* tell ffmpeg we own this buffer, tranfer the ref we have on the buffer to
> ++ * the opaque data. */
> ++ picture->type = FF_BUFFER_TYPE_USER;
> ++ picture->opaque = buf;
> ++
> ++#ifdef EXTRA_REF
> ++ if (picture->reference != 0 || ffmpegdec->extra_ref) {
> ++ GST_DEBUG_OBJECT (ffmpegdec, "adding extra ref");
> ++ gst_buffer_ref (buf);
> ++ }
> ++#endif
> ++
> ++ GST_LOG_OBJECT (ffmpegdec, "returned buffer %p", buf);
> ++
> ++ return 0;
> ++}
> ++
> ++static void
> ++gst_ffmpegdec_release_buffer (AVCodecContext * context, AVFrame * picture)
> ++{
> ++ gint i;
> ++ GstBuffer *buf;
> ++ GstFFMpegDec *ffmpegdec;
> ++
> ++ ffmpegdec = (GstFFMpegDec *) context->opaque;
> ++
> ++ /* check if it was our buffer */
> ++ if (picture->opaque == NULL) {
> ++ GST_DEBUG_OBJECT (ffmpegdec, "default release buffer");
> ++ avcodec_default_release_buffer (context, picture);
> ++ return;
> ++ }
> ++
> ++ /* we remove the opaque data now */
> ++ buf = GST_BUFFER_CAST (picture->opaque);
> ++ GST_DEBUG_OBJECT (ffmpegdec, "release buffer %p", buf);
> ++ picture->opaque = NULL;
> ++
> ++#ifdef EXTRA_REF
> ++ if (picture->reference != 0 || ffmpegdec->extra_ref) {
> ++ GST_DEBUG_OBJECT (ffmpegdec, "remove extra ref");
> ++ gst_buffer_unref (buf);
> ++ }
> ++#else
> ++ gst_buffer_unref (buf);
> ++#endif
> ++
> ++ /* zero out the reference in ffmpeg */
> ++ for (i = 0; i < 4; i++) {
> ++ picture->data[i] = NULL;
> ++ picture->linesize[i] = 0;
> ++ }
> ++}
> ++
> ++static void
> ++gst_ffmpegdec_add_pixel_aspect_ratio (GstFFMpegDec * ffmpegdec,
> ++ GstStructure * s)
> ++{
> ++ gboolean demuxer_par_set = FALSE;
> ++ gboolean decoder_par_set = FALSE;
> ++ gint demuxer_num = 1, demuxer_denom = 1;
> ++ gint decoder_num = 1, decoder_denom = 1;
> ++
> ++ GST_OBJECT_LOCK (ffmpegdec);
> ++
> ++ if (ffmpegdec->par) {
> ++ demuxer_num = gst_value_get_fraction_numerator (ffmpegdec->par);
> ++ demuxer_denom = gst_value_get_fraction_denominator (ffmpegdec->par);
> ++ demuxer_par_set = TRUE;
> ++ GST_DEBUG_OBJECT (ffmpegdec, "Demuxer PAR: %d:%d", demuxer_num,
> ++ demuxer_denom);
> ++ }
> ++
> ++ if (ffmpegdec->context->sample_aspect_ratio.num &&
> ++ ffmpegdec->context->sample_aspect_ratio.den) {
> ++ decoder_num = ffmpegdec->context->sample_aspect_ratio.num;
> ++ decoder_denom = ffmpegdec->context->sample_aspect_ratio.den;
> ++ decoder_par_set = TRUE;
> ++ GST_DEBUG_OBJECT (ffmpegdec, "Decoder PAR: %d:%d", decoder_num,
> ++ decoder_denom);
> ++ }
> ++
> ++ GST_OBJECT_UNLOCK (ffmpegdec);
> ++
> ++ if (!demuxer_par_set && !decoder_par_set)
> ++ goto no_par;
> ++
> ++ if (demuxer_par_set && !decoder_par_set)
> ++ goto use_demuxer_par;
> ++
> ++ if (decoder_par_set && !demuxer_par_set)
> ++ goto use_decoder_par;
> ++
> ++ /* Both the demuxer and the decoder provide a PAR. If one of
> ++ * the two PARs is 1:1 and the other one is not, use the one
> ++ * that is not 1:1. */
> ++ if (demuxer_num == demuxer_denom && decoder_num != decoder_denom)
> ++ goto use_decoder_par;
> ++
> ++ if (decoder_num == decoder_denom && demuxer_num != demuxer_denom)
> ++ goto use_demuxer_par;
> ++
> ++ /* Both PARs are non-1:1, so use the PAR provided by the demuxer */
> ++ goto use_demuxer_par;
> ++
> ++use_decoder_par:
> ++ {
> ++ GST_DEBUG_OBJECT (ffmpegdec,
> ++ "Setting decoder provided pixel-aspect-ratio of %u:%u", decoder_num,
> ++ decoder_denom);
> ++ gst_structure_set (s, "pixel-aspect-ratio", GST_TYPE_FRACTION, decoder_num,
> ++ decoder_denom, NULL);
> ++ return;
> ++ }
> ++
> ++use_demuxer_par:
> ++ {
> ++ GST_DEBUG_OBJECT (ffmpegdec,
> ++ "Setting demuxer provided pixel-aspect-ratio of %u:%u", demuxer_num,
> ++ demuxer_denom);
> ++ gst_structure_set (s, "pixel-aspect-ratio", GST_TYPE_FRACTION, demuxer_num,
> ++ demuxer_denom, NULL);
> ++ return;
> ++ }
> ++no_par:
> ++ {
> ++ GST_DEBUG_OBJECT (ffmpegdec,
> ++ "Neither demuxer nor codec provide a pixel-aspect-ratio");
> ++ return;
> ++ }
> ++}
> ++
> ++static gboolean
> ++gst_ffmpegdec_negotiate (GstFFMpegDec * ffmpegdec, gboolean force)
> ++{
> ++ GstFFMpegDecClass *oclass;
> ++ GstCaps *caps;
> ++
> ++ oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
> ++
> ++ switch (oclass->in_plugin->type) {
> ++ case AVMEDIA_TYPE_VIDEO:
> ++ if (!force && ffmpegdec->format.video.width == ffmpegdec->context->width
> ++ && ffmpegdec->format.video.height == ffmpegdec->context->height
> ++ && ffmpegdec->format.video.fps_n == ffmpegdec->format.video.old_fps_n
> ++ && ffmpegdec->format.video.fps_d == ffmpegdec->format.video.old_fps_d
> ++ && ffmpegdec->format.video.pix_fmt == ffmpegdec->context->pix_fmt
> ++ && ffmpegdec->format.video.par_n ==
> ++ ffmpegdec->context->sample_aspect_ratio.num
> ++ && ffmpegdec->format.video.par_d ==
> ++ ffmpegdec->context->sample_aspect_ratio.den)
> ++ return TRUE;
> ++ GST_DEBUG_OBJECT (ffmpegdec,
> ++ "Renegotiating video from %dx%d@ %d:%d PAR %d/%d fps to %dx%d@ %d:%d PAR %d/%d fps",
> ++ ffmpegdec->format.video.width, ffmpegdec->format.video.height,
> ++ ffmpegdec->format.video.par_n, ffmpegdec->format.video.par_d,
> ++ ffmpegdec->format.video.old_fps_n, ffmpegdec->format.video.old_fps_n,
> ++ ffmpegdec->context->width, ffmpegdec->context->height,
> ++ ffmpegdec->context->sample_aspect_ratio.num,
> ++ ffmpegdec->context->sample_aspect_ratio.den,
> ++ ffmpegdec->format.video.fps_n, ffmpegdec->format.video.fps_d);
> ++ ffmpegdec->format.video.width = ffmpegdec->context->width;
> ++ ffmpegdec->format.video.height = ffmpegdec->context->height;
> ++ ffmpegdec->format.video.old_fps_n = ffmpegdec->format.video.fps_n;
> ++ ffmpegdec->format.video.old_fps_d = ffmpegdec->format.video.fps_d;
> ++ ffmpegdec->format.video.pix_fmt = ffmpegdec->context->pix_fmt;
> ++ ffmpegdec->format.video.par_n =
> ++ ffmpegdec->context->sample_aspect_ratio.num;
> ++ ffmpegdec->format.video.par_d =
> ++ ffmpegdec->context->sample_aspect_ratio.den;
> ++ break;
> ++ case AVMEDIA_TYPE_AUDIO:
> ++ {
> ++ gint depth = av_smp_format_depth (ffmpegdec->context->sample_fmt);
> ++ if (!force && ffmpegdec->format.audio.samplerate ==
> ++ ffmpegdec->context->sample_rate &&
> ++ ffmpegdec->format.audio.channels == ffmpegdec->context->channels &&
> ++ ffmpegdec->format.audio.depth == depth)
> ++ return TRUE;
> ++ GST_DEBUG_OBJECT (ffmpegdec,
> ++ "Renegotiating audio from %dHz@%dchannels (%d) to %dHz@%dchannels (%d)",
> ++ ffmpegdec->format.audio.samplerate, ffmpegdec->format.audio.channels,
> ++ ffmpegdec->format.audio.depth,
> ++ ffmpegdec->context->sample_rate, ffmpegdec->context->channels, depth);
> ++ ffmpegdec->format.audio.samplerate = ffmpegdec->context->sample_rate;
> ++ ffmpegdec->format.audio.channels = ffmpegdec->context->channels;
> ++ ffmpegdec->format.audio.depth = depth;
> ++ }
> ++ break;
> ++ default:
> ++ break;
> ++ }
> ++
> ++ caps = gst_ffmpeg_codectype_to_caps (oclass->in_plugin->type,
> ++ ffmpegdec->context, oclass->in_plugin->id, FALSE);
> ++
> ++ if (caps == NULL)
> ++ goto no_caps;
> ++
> ++ switch (oclass->in_plugin->type) {
> ++ case AVMEDIA_TYPE_VIDEO:
> ++ {
> ++ gint width, height;
> ++ gboolean interlaced;
> ++
> ++ width = ffmpegdec->format.video.clip_width;
> ++ height = ffmpegdec->format.video.clip_height;
> ++ interlaced = ffmpegdec->format.video.interlaced;
> ++
> ++ if (width != -1 && height != -1) {
> ++ /* overwrite the output size with the dimension of the
> ++ * clipping region but only if they are smaller. */
> ++ if (width < ffmpegdec->context->width)
> ++ gst_caps_set_simple (caps, "width", G_TYPE_INT, width, NULL);
> ++ if (height < ffmpegdec->context->height)
> ++ gst_caps_set_simple (caps, "height", G_TYPE_INT, height, NULL);
> ++ }
> ++ gst_caps_set_simple (caps, "interlaced", G_TYPE_BOOLEAN, interlaced,
> ++ NULL);
> ++
> ++ /* If a demuxer provided a framerate then use it (#313970) */
> ++ if (ffmpegdec->format.video.fps_n != -1) {
> ++ gst_caps_set_simple (caps, "framerate",
> ++ GST_TYPE_FRACTION, ffmpegdec->format.video.fps_n,
> ++ ffmpegdec->format.video.fps_d, NULL);
> ++ }
> ++ gst_ffmpegdec_add_pixel_aspect_ratio (ffmpegdec,
> ++ gst_caps_get_structure (caps, 0));
> ++ break;
> ++ }
> ++ case AVMEDIA_TYPE_AUDIO:
> ++ {
> ++ break;
> ++ }
> ++ default:
> ++ break;
> ++ }
> ++
> ++ if (!gst_pad_set_caps (ffmpegdec->srcpad, caps))
> ++ goto caps_failed;
> ++
> ++ gst_caps_unref (caps);
> ++
> ++ return TRUE;
> ++
> ++ /* ERRORS */
> ++no_caps:
> ++ {
> ++#ifdef HAVE_FFMPEG_UNINSTALLED
> ++ /* using internal ffmpeg snapshot */
> ++ GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION,
> ++ ("Could not find GStreamer caps mapping for FFmpeg codec '%s'.",
> ++ oclass->in_plugin->name), (NULL));
> ++#else
> ++ /* using external ffmpeg */
> ++ GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION,
> ++ ("Could not find GStreamer caps mapping for FFmpeg codec '%s', and "
> ++ "you are using an external libavcodec. This is most likely due to "
> ++ "a packaging problem and/or libavcodec having been upgraded to a "
> ++ "version that is not compatible with this version of "
> ++ "gstreamer-ffmpeg. Make sure your gstreamer-ffmpeg and libavcodec "
> ++ "packages come from the same source/repository.",
> ++ oclass->in_plugin->name), (NULL));
> ++#endif
> ++ return FALSE;
> ++ }
> ++caps_failed:
> ++ {
> ++ GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION, (NULL),
> ++ ("Could not set caps for ffmpeg decoder (%s), not fixed?",
> ++ oclass->in_plugin->name));
> ++ gst_caps_unref (caps);
> ++
> ++ return FALSE;
> ++ }
> ++}
> ++
> ++/* perform qos calculations before decoding the next frame.
> ++ *
> ++ * Sets the skip_frame flag and if things are really bad, skips to the next
> ++ * keyframe.
> ++ *
> ++ * Returns TRUE if the frame should be decoded, FALSE if the frame can be dropped
> ++ * entirely.
> ++ */
> ++static gboolean
> ++gst_ffmpegdec_do_qos (GstFFMpegDec * ffmpegdec, GstClockTime timestamp,
> ++ gboolean * mode_switch)
> ++{
> ++ GstClockTimeDiff diff;
> ++ gdouble proportion;
> ++ GstClockTime qostime, earliest_time;
> ++ gboolean res = TRUE;
> ++
> ++ *mode_switch = FALSE;
> ++
> ++ /* no timestamp, can't do QoS */
> ++ if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (timestamp)))
> ++ goto no_qos;
> ++
> ++ /* get latest QoS observation values */
> ++ gst_ffmpegdec_read_qos (ffmpegdec, &proportion, &earliest_time);
> ++
> ++ /* skip qos if we have no observation (yet) */
> ++ if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (earliest_time))) {
> ++ /* no skip_frame initialy */
> ++ ffmpegdec->context->skip_frame = AVDISCARD_DEFAULT;
> ++ goto no_qos;
> ++ }
> ++
> ++ /* qos is done on running time of the timestamp */
> ++ qostime = gst_segment_to_running_time (&ffmpegdec->segment, GST_FORMAT_TIME,
> ++ timestamp);
> ++
> ++ /* timestamp can be out of segment, then we don't do QoS */
> ++ if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (qostime)))
> ++ goto no_qos;
> ++
> ++ /* see how our next timestamp relates to the latest qos timestamp. negative
> ++ * values mean we are early, positive values mean we are too late. */
> ++ diff = GST_CLOCK_DIFF (qostime, earliest_time);
> ++
> ++ GST_DEBUG_OBJECT (ffmpegdec, "QOS: qostime %" GST_TIME_FORMAT
> ++ ", earliest %" GST_TIME_FORMAT, GST_TIME_ARGS (qostime),
> ++ GST_TIME_ARGS (earliest_time));
> ++
> ++ /* if we using less than 40% of the available time, we can try to
> ++ * speed up again when we were slow. */
> ++ if (proportion < 0.4 && diff < 0) {
> ++ goto normal_mode;
> ++ } else {
> ++ if (diff >= 0) {
> ++ /* we're too slow, try to speed up */
> ++ /* switch to skip_frame mode */
> ++ goto skip_frame;
> ++ }
> ++ }
> ++
> ++no_qos:
> ++ ffmpegdec->processed++;
> ++ return TRUE;
> ++
> ++normal_mode:
> ++ {
> ++ if (ffmpegdec->context->skip_frame != AVDISCARD_DEFAULT) {
> ++ ffmpegdec->context->skip_frame = AVDISCARD_DEFAULT;
> ++ *mode_switch = TRUE;
> ++ GST_DEBUG_OBJECT (ffmpegdec, "QOS: normal mode %g < 0.4", proportion);
> ++ }
> ++ ffmpegdec->processed++;
> ++ return TRUE;
> ++ }
> ++skip_frame:
> ++ {
> ++ if (ffmpegdec->context->skip_frame != AVDISCARD_NONREF) {
> ++ ffmpegdec->context->skip_frame = AVDISCARD_NONREF;
> ++ *mode_switch = TRUE;
> ++ GST_DEBUG_OBJECT (ffmpegdec,
> ++ "QOS: hurry up, diff %" G_GINT64_FORMAT " >= 0", diff);
> ++ }
> ++ goto drop_qos;
> ++ }
> ++drop_qos:
> ++ {
> ++ GstClockTime stream_time, jitter;
> ++ GstMessage *qos_msg;
> ++
> ++ ffmpegdec->dropped++;
> ++ stream_time =
> ++ gst_segment_to_stream_time (&ffmpegdec->segment, GST_FORMAT_TIME,
> ++ timestamp);
> ++ jitter = GST_CLOCK_DIFF (qostime, earliest_time);
> ++ qos_msg =
> ++ gst_message_new_qos (GST_OBJECT_CAST (ffmpegdec), FALSE, qostime,
> ++ stream_time, timestamp, GST_CLOCK_TIME_NONE);
> ++ gst_message_set_qos_values (qos_msg, jitter, proportion, 1000000);
> ++ gst_message_set_qos_stats (qos_msg, GST_FORMAT_BUFFERS,
> ++ ffmpegdec->processed, ffmpegdec->dropped);
> ++ gst_element_post_message (GST_ELEMENT_CAST (ffmpegdec), qos_msg);
> ++
> ++ return res;
> ++ }
> ++}
> ++
> ++/* returns TRUE if buffer is within segment, else FALSE.
> ++ * if Buffer is on segment border, it's timestamp and duration will be clipped */
> ++static gboolean
> ++clip_video_buffer (GstFFMpegDec * dec, GstBuffer * buf, GstClockTime in_ts,
> ++ GstClockTime in_dur)
> ++{
> ++ gboolean res = TRUE;
> ++ gint64 cstart, cstop;
> ++ GstClockTime stop;
> ++
> ++ GST_LOG_OBJECT (dec,
> ++ "timestamp:%" GST_TIME_FORMAT " , duration:%" GST_TIME_FORMAT,
> ++ GST_TIME_ARGS (in_ts), GST_TIME_ARGS (in_dur));
> ++
> ++ /* can't clip without TIME segment */
> ++ if (G_UNLIKELY (dec->segment.format != GST_FORMAT_TIME))
> ++ goto beach;
> ++
> ++ /* we need a start time */
> ++ if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (in_ts)))
> ++ goto beach;
> ++
> ++ /* generate valid stop, if duration unknown, we have unknown stop */
> ++ stop =
> ++ GST_CLOCK_TIME_IS_VALID (in_dur) ? (in_ts + in_dur) : GST_CLOCK_TIME_NONE;
> ++
> ++ /* now clip */
> ++ res =
> ++ gst_segment_clip (&dec->segment, GST_FORMAT_TIME, in_ts, stop, &cstart,
> ++ &cstop);
> ++ if (G_UNLIKELY (!res))
> ++ goto beach;
> ++
> ++ /* we're pretty sure the duration of this buffer is not till the end of this
> ++ * segment (which _clip will assume when the stop is -1) */
> ++ if (stop == GST_CLOCK_TIME_NONE)
> ++ cstop = GST_CLOCK_TIME_NONE;
> ++
> ++ /* update timestamp and possibly duration if the clipped stop time is
> ++ * valid */
> ++ GST_BUFFER_TIMESTAMP (buf) = cstart;
> ++ if (GST_CLOCK_TIME_IS_VALID (cstop))
> ++ GST_BUFFER_DURATION (buf) = cstop - cstart;
> ++
> ++ GST_LOG_OBJECT (dec,
> ++ "clipped timestamp:%" GST_TIME_FORMAT " , duration:%" GST_TIME_FORMAT,
> ++ GST_TIME_ARGS (cstart), GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
> ++
> ++beach:
> ++ GST_LOG_OBJECT (dec, "%sdropping", (res ? "not " : ""));
> ++ return res;
> ++}
> ++
> ++
> ++/* get an outbuf buffer with the current picture */
> ++static GstFlowReturn
> ++get_output_buffer (GstFFMpegDec * ffmpegdec, GstBuffer ** outbuf)
> ++{
> ++ GstFlowReturn ret;
> ++
> ++ ret = GST_FLOW_OK;
> ++ *outbuf = NULL;
> ++
> ++ if (ffmpegdec->picture->opaque != NULL) {
> ++ /* we allocated a picture already for ffmpeg to decode into, let's pick it
> ++ * up and use it now. */
> ++ *outbuf = (GstBuffer *) ffmpegdec->picture->opaque;
> ++ GST_LOG_OBJECT (ffmpegdec, "using opaque buffer %p", *outbuf);
> ++#ifndef EXTRA_REF
> ++ gst_buffer_ref (*outbuf);
> ++#endif
> ++ } else {
> ++ AVPicture pic, *outpic;
> ++ gint width, height;
> ++
> ++ GST_LOG_OBJECT (ffmpegdec, "get output buffer");
> ++
> ++ /* figure out size of output buffer, this is the clipped output size because
> ++ * we will copy the picture into it but only when the clipping region is
> ++ * smaller than the actual picture size. */
> ++ if ((width = ffmpegdec->format.video.clip_width) == -1)
> ++ width = ffmpegdec->context->width;
> ++ else if (width > ffmpegdec->context->width)
> ++ width = ffmpegdec->context->width;
> ++
> ++ if ((height = ffmpegdec->format.video.clip_height) == -1)
> ++ height = ffmpegdec->context->height;
> ++ else if (height > ffmpegdec->context->height)
> ++ height = ffmpegdec->context->height;
> ++
> ++ GST_LOG_OBJECT (ffmpegdec, "clip width %d/height %d", width, height);
> ++
> ++ ret = alloc_output_buffer (ffmpegdec, outbuf, width, height);
> ++ if (G_UNLIKELY (ret != GST_FLOW_OK))
> ++ goto alloc_failed;
> ++
> ++ /* original ffmpeg code does not handle odd sizes correctly.
> ++ * This patched up version does */
> ++ gst_ffmpeg_avpicture_fill (&pic, GST_BUFFER_DATA (*outbuf),
> ++ ffmpegdec->context->pix_fmt, width, height);
> ++
> ++ outpic = (AVPicture *) ffmpegdec->picture;
> ++
> ++ GST_LOG_OBJECT (ffmpegdec, "linsize %d %d %d", outpic->linesize[0],
> ++ outpic->linesize[1], outpic->linesize[2]);
> ++ GST_LOG_OBJECT (ffmpegdec, "data %u %u %u", 0,
> ++ (guint) (outpic->data[1] - outpic->data[0]),
> ++ (guint) (outpic->data[2] - outpic->data[0]));
> ++
> ++ av_picture_copy (&pic, outpic, ffmpegdec->context->pix_fmt, width, height);
> ++ }
> ++ ffmpegdec->picture->reordered_opaque = -1;
> ++
> ++ return ret;
> ++
> ++ /* special cases */
> ++alloc_failed:
> ++ {
> ++ GST_DEBUG_OBJECT (ffmpegdec, "pad_alloc failed");
> ++ return ret;
> ++ }
> ++}
> ++
> ++static void
> ++clear_queued (GstFFMpegDec * ffmpegdec)
> ++{
> ++ g_list_foreach (ffmpegdec->queued, (GFunc) gst_mini_object_unref, NULL);
> ++ g_list_free (ffmpegdec->queued);
> ++ ffmpegdec->queued = NULL;
> ++}
> ++
> ++static GstFlowReturn
> ++flush_queued (GstFFMpegDec * ffmpegdec)
> ++{
> ++ GstFlowReturn res = GST_FLOW_OK;
> ++
> ++ while (ffmpegdec->queued) {
> ++ GstBuffer *buf = GST_BUFFER_CAST (ffmpegdec->queued->data);
> ++
> ++ GST_LOG_OBJECT (ffmpegdec, "pushing buffer %p, offset %"
> ++ G_GUINT64_FORMAT ", timestamp %"
> ++ GST_TIME_FORMAT ", duration %" GST_TIME_FORMAT, buf,
> ++ GST_BUFFER_OFFSET (buf),
> ++ GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
> ++ GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
> ++
> ++ /* iterate ouput queue an push downstream */
> ++ res = gst_pad_push (ffmpegdec->srcpad, buf);
> ++
> ++ ffmpegdec->queued =
> ++ g_list_delete_link (ffmpegdec->queued, ffmpegdec->queued);
> ++ }
> ++ return res;
> ++}
> ++
> ++static void
> ++gst_avpacket_init (AVPacket * packet, guint8 * data, guint size)
> ++{
> ++ memset (packet, 0, sizeof (AVPacket));
> ++ packet->data = data;
> ++ packet->size = size;
> ++}
> ++
> ++/* gst_ffmpegdec_[video|audio]_frame:
> ++ * ffmpegdec:
> ++ * data: pointer to the data to decode
> ++ * size: size of data in bytes
> ++ * in_timestamp: incoming timestamp.
> ++ * in_duration: incoming duration.
> ++ * in_offset: incoming offset (frame number).
> ++ * outbuf: outgoing buffer. Different from NULL ONLY if it contains decoded data.
> ++ * ret: Return flow.
> ++ *
> ++ * Returns: number of bytes used in decoding. The check for successful decode is
> ++ * outbuf being non-NULL.
> ++ */
> ++static gint
> ++gst_ffmpegdec_video_frame (GstFFMpegDec * ffmpegdec,
> ++ guint8 * data, guint size,
> ++ const GstTSInfo * dec_info, GstBuffer ** outbuf, GstFlowReturn * ret)
> ++{
> ++ gint len = -1;
> ++ gint have_data;
> ++ gboolean mode_switch;
> ++ gboolean decode;
> ++ gint skip_frame = AVDISCARD_DEFAULT;
> ++ GstClockTime out_timestamp, out_duration, out_pts;
> ++ gint64 out_offset;
> ++ const GstTSInfo *out_info;
> ++ AVPacket packet;
> ++
> ++ *ret = GST_FLOW_OK;
> ++ *outbuf = NULL;
> ++
> ++ ffmpegdec->context->opaque = ffmpegdec;
> ++
> ++ /* in case we skip frames */
> ++ ffmpegdec->picture->pict_type = -1;
> ++
> ++ /* run QoS code, we don't stop decoding the frame when we are late because
> ++ * else we might skip a reference frame */
> ++ decode = gst_ffmpegdec_do_qos (ffmpegdec, dec_info->timestamp, &mode_switch);
> ++
> ++ if (ffmpegdec->is_realvideo && data != NULL) {
> ++ gint slice_count;
> ++ gint i;
> ++
> ++ /* setup the slice table for realvideo */
> ++ if (ffmpegdec->context->slice_offset == NULL)
> ++ ffmpegdec->context->slice_offset = g_malloc (sizeof (guint32) * 1000);
> ++
> ++ slice_count = (*data++) + 1;
> ++ ffmpegdec->context->slice_count = slice_count;
> ++
> ++ for (i = 0; i < slice_count; i++) {
> ++ data += 4;
> ++ ffmpegdec->context->slice_offset[i] = GST_READ_UINT32_LE (data);
> ++ data += 4;
> ++ }
> ++ }
> ++
> ++ if (!decode) {
> ++ /* no decoding needed, save previous skip_frame value and brutely skip
> ++ * decoding everything */
> ++ skip_frame = ffmpegdec->context->skip_frame;
> ++ ffmpegdec->context->skip_frame = AVDISCARD_NONREF;
> ++ }
> ++
> ++ /* save reference to the timing info */
> ++ ffmpegdec->context->reordered_opaque = (gint64) dec_info->idx;
> ++ ffmpegdec->picture->reordered_opaque = (gint64) dec_info->idx;
> ++
> ++ GST_DEBUG_OBJECT (ffmpegdec, "stored opaque values idx %d", dec_info->idx);
> ++
> ++ /* now decode the frame */
> ++ gst_avpacket_init (&packet, data, size);
> ++ len = avcodec_decode_video2 (ffmpegdec->context,
> ++ ffmpegdec->picture, &have_data, &packet);
> ++
> ++ /* restore previous state */
> ++ if (!decode)
> ++ ffmpegdec->context->skip_frame = skip_frame;
> ++
> ++ GST_DEBUG_OBJECT (ffmpegdec, "after decode: len %d, have_data %d",
> ++ len, have_data);
> ++
> ++ /* when we are in skip_frame mode, don't complain when ffmpeg returned
> ++ * no data because we told it to skip stuff. */
> ++ if (len < 0 && (mode_switch || ffmpegdec->context->skip_frame))
> ++ len = 0;
> ++
> ++ if (len > 0 && have_data <= 0 && (mode_switch
> ++ || ffmpegdec->context->skip_frame)) {
> ++ /* we consumed some bytes but nothing decoded and we are skipping frames,
> ++ * disable the interpollation of DTS timestamps */
> ++ ffmpegdec->last_out = -1;
> ++ }
> ++
> ++ /* no data, we're done */
> ++ if (len < 0 || have_data <= 0)
> ++ goto beach;
> ++
> ++ /* get the output picture timing info again */
> ++ out_info = gst_ts_info_get (ffmpegdec, ffmpegdec->picture->reordered_opaque);
> ++ out_pts = out_info->timestamp;
> ++ out_duration = out_info->duration;
> ++ out_offset = out_info->offset;
> ++
> ++ GST_DEBUG_OBJECT (ffmpegdec,
> ++ "pts %" G_GUINT64_FORMAT " duration %" G_GUINT64_FORMAT " offset %"
> ++ G_GINT64_FORMAT, out_pts, out_duration, out_offset);
> ++ GST_DEBUG_OBJECT (ffmpegdec, "picture: pts %" G_GUINT64_FORMAT,
> ++ (guint64) ffmpegdec->picture->pts);
> ++ GST_DEBUG_OBJECT (ffmpegdec, "picture: num %d",
> ++ ffmpegdec->picture->coded_picture_number);
> ++ GST_DEBUG_OBJECT (ffmpegdec, "picture: ref %d",
> ++ ffmpegdec->picture->reference);
> ++ GST_DEBUG_OBJECT (ffmpegdec, "picture: display %d",
> ++ ffmpegdec->picture->display_picture_number);
> ++ GST_DEBUG_OBJECT (ffmpegdec, "picture: opaque %p",
> ++ ffmpegdec->picture->opaque);
> ++ GST_DEBUG_OBJECT (ffmpegdec, "picture: reordered opaque %" G_GUINT64_FORMAT,
> ++ (guint64) ffmpegdec->picture->reordered_opaque);
> ++ GST_DEBUG_OBJECT (ffmpegdec, "repeat_pict:%d",
> ++ ffmpegdec->picture->repeat_pict);
> ++ GST_DEBUG_OBJECT (ffmpegdec, "interlaced_frame:%d",
> ++ ffmpegdec->picture->interlaced_frame);
> ++
> ++ if (G_UNLIKELY (ffmpegdec->picture->interlaced_frame !=
> ++ ffmpegdec->format.video.interlaced)) {
> ++ GST_WARNING ("Change in interlacing ! picture:%d, recorded:%d",
> ++ ffmpegdec->picture->interlaced_frame,
> ++ ffmpegdec->format.video.interlaced);
> ++ ffmpegdec->format.video.interlaced = ffmpegdec->picture->interlaced_frame;
> ++ gst_ffmpegdec_negotiate (ffmpegdec, TRUE);
> ++ }
> ++
> ++ /* Whether a frame is interlaced or not is unknown at the time of
> ++ buffer allocation, so caps on the buffer in opaque will have
> ++ the previous frame's interlaced flag set. So if interlacedness
> ++ has changed since allocation, we update the buffer (if any)
> ++ caps now with the correct interlaced flag. */
> ++ if (ffmpegdec->picture->opaque != NULL) {
> ++ GstBuffer *buffer = ffmpegdec->picture->opaque;
> ++ if (GST_BUFFER_CAPS (buffer) && GST_PAD_CAPS (ffmpegdec->srcpad)) {
> ++ GstStructure *s = gst_caps_get_structure (GST_BUFFER_CAPS (buffer), 0);
> ++ gboolean interlaced;
> ++ gboolean found = gst_structure_get_boolean (s, "interlaced", &interlaced);
> ++ if (!found || (!!interlaced != !!ffmpegdec->format.video.interlaced)) {
> ++ GST_DEBUG_OBJECT (ffmpegdec,
> ++ "Buffer interlacing does not match pad, updating");
> ++ buffer = gst_buffer_make_metadata_writable (buffer);
> ++ gst_buffer_set_caps (buffer, GST_PAD_CAPS (ffmpegdec->srcpad));
> ++ ffmpegdec->picture->opaque = buffer;
> ++ }
> ++ }
> ++ }
> ++
> ++ /* check that the timestamps go upwards */
> ++ if (ffmpegdec->last_out != -1 && ffmpegdec->last_out > out_pts) {
> ++ /* timestamps go backwards, this means frames were reordered and we must
> ++ * be dealing with DTS as the buffer timestamps */
> ++ if (!ffmpegdec->reordered_out) {
> ++ GST_DEBUG_OBJECT (ffmpegdec, "detected reordered out timestamps");
> ++ ffmpegdec->reordered_out = TRUE;
> ++ }
> ++ if (ffmpegdec->reordered_in) {
> ++ /* we reset the input reordering here because we want to recover from an
> ++ * occasionally wrong reordered input timestamp */
> ++ GST_DEBUG_OBJECT (ffmpegdec, "assuming DTS input timestamps");
> ++ ffmpegdec->reordered_in = FALSE;
> ++ }
> ++ }
> ++
> ++ if (out_pts == 0 && out_pts == ffmpegdec->last_out) {
> ++ GST_LOG_OBJECT (ffmpegdec, "ffmpeg returns 0 timestamps, ignoring");
> ++ /* some codecs only output 0 timestamps, when that happens, make us select an
> ++ * output timestamp based on the input timestamp. We do this by making the
> ++ * ffmpeg timestamp and the interpollated next timestamp invalid. */
> ++ out_pts = -1;
> ++ ffmpegdec->next_out = -1;
> ++ } else
> ++ ffmpegdec->last_out = out_pts;
> ++
> ++ /* we assume DTS as input timestamps unless we see reordered input
> ++ * timestamps */
> ++ if (!ffmpegdec->reordered_in && ffmpegdec->reordered_out) {
> ++ /* PTS and DTS are the same for keyframes */
> ++ if (ffmpegdec->next_out != -1) {
> ++ /* interpolate all timestamps except for keyframes, FIXME, this is
> ++ * wrong when QoS is active. */
> ++ GST_DEBUG_OBJECT (ffmpegdec, "interpolate timestamps");
> ++ out_pts = -1;
> ++ out_offset = -1;
> ++ }
> ++ }
> ++
> ++ /* get a handle to the output buffer */
> ++ *ret = get_output_buffer (ffmpegdec, outbuf);
> ++ if (G_UNLIKELY (*ret != GST_FLOW_OK))
> ++ goto no_output;
> ++
> ++ /*
> ++ * Timestamps:
> ++ *
> ++ * 1) Copy picture timestamp if valid
> ++ * 2) else interpolate from previous output timestamp
> ++ * 3) else copy input timestamp
> ++ */
> ++ out_timestamp = -1;
> ++ if (out_pts != -1) {
> ++ /* Get (interpolated) timestamp from FFMPEG */
> ++ out_timestamp = (GstClockTime) out_pts;
> ++ GST_LOG_OBJECT (ffmpegdec, "using timestamp %" GST_TIME_FORMAT
> ++ " returned by ffmpeg", GST_TIME_ARGS (out_timestamp));
> ++ }
> ++ if (!GST_CLOCK_TIME_IS_VALID (out_timestamp) && ffmpegdec->next_out != -1) {
> ++ out_timestamp = ffmpegdec->next_out;
> ++ GST_LOG_OBJECT (ffmpegdec, "using next timestamp %" GST_TIME_FORMAT,
> ++ GST_TIME_ARGS (out_timestamp));
> ++ }
> ++ if (!GST_CLOCK_TIME_IS_VALID (out_timestamp)) {
> ++ out_timestamp = dec_info->timestamp;
> ++ GST_LOG_OBJECT (ffmpegdec, "using in timestamp %" GST_TIME_FORMAT,
> ++ GST_TIME_ARGS (out_timestamp));
> ++ }
> ++ GST_BUFFER_TIMESTAMP (*outbuf) = out_timestamp;
> ++
> ++ /*
> ++ * Offset:
> ++ * 0) Use stored input offset (from opaque)
> ++ * 1) Use value converted from timestamp if valid
> ++ * 2) Use input offset if valid
> ++ */
> ++ if (out_offset != GST_BUFFER_OFFSET_NONE) {
> ++ /* out_offset already contains the offset from ts_info */
> ++ GST_LOG_OBJECT (ffmpegdec, "Using offset returned by ffmpeg");
> ++ } else if (out_timestamp != GST_CLOCK_TIME_NONE) {
> ++ GstFormat out_fmt = GST_FORMAT_DEFAULT;
> ++ GST_LOG_OBJECT (ffmpegdec, "Using offset converted from timestamp");
> ++ /* FIXME, we should really remove this as it's not nice at all to do
> ++ * upstream queries for each frame to get the frame offset. We also can't
> ++ * really remove this because it is the only way of setting frame offsets
> ++ * on outgoing buffers. We should have metadata so that the upstream peer
> ++ * can set a frame number on the encoded data. */
> ++ gst_pad_query_peer_convert (ffmpegdec->sinkpad,
> ++ GST_FORMAT_TIME, out_timestamp, &out_fmt, &out_offset);
> ++ } else if (dec_info->offset != GST_BUFFER_OFFSET_NONE) {
> ++ /* FIXME, the input offset is input media specific and might not
> ++ * be the same for the output media. (byte offset as input, frame number
> ++ * as output, for example) */
> ++ GST_LOG_OBJECT (ffmpegdec, "using in_offset %" G_GINT64_FORMAT,
> ++ dec_info->offset);
> ++ out_offset = dec_info->offset;
> ++ } else {
> ++ GST_LOG_OBJECT (ffmpegdec, "no valid offset found");
> ++ out_offset = GST_BUFFER_OFFSET_NONE;
> ++ }
> ++ GST_BUFFER_OFFSET (*outbuf) = out_offset;
> ++
> ++ /*
> ++ * Duration:
> ++ *
> ++ * 1) Use reordered input duration if valid
> ++ * 2) Else use input duration
> ++ * 3) else use input framerate
> ++ * 4) else use ffmpeg framerate
> ++ */
> ++ if (GST_CLOCK_TIME_IS_VALID (out_duration)) {
> ++ /* We have a valid (reordered) duration */
> ++ GST_LOG_OBJECT (ffmpegdec, "Using duration returned by ffmpeg");
> ++ } else if (GST_CLOCK_TIME_IS_VALID (dec_info->duration)) {
> ++ GST_LOG_OBJECT (ffmpegdec, "using in_duration");
> ++ out_duration = dec_info->duration;
> ++ } else if (GST_CLOCK_TIME_IS_VALID (ffmpegdec->last_diff)) {
> ++ GST_LOG_OBJECT (ffmpegdec, "using last-diff");
> ++ out_duration = ffmpegdec->last_diff;
> ++ } else {
> ++ /* if we have an input framerate, use that */
> ++ if (ffmpegdec->format.video.fps_n != -1 &&
> ++ (ffmpegdec->format.video.fps_n != 1000 &&
> ++ ffmpegdec->format.video.fps_d != 1)) {
> ++ GST_LOG_OBJECT (ffmpegdec, "using input framerate for duration");
> ++ out_duration = gst_util_uint64_scale_int (GST_SECOND,
> ++ ffmpegdec->format.video.fps_d, ffmpegdec->format.video.fps_n);
> ++ } else {
> ++ /* don't try to use the decoder's framerate when it seems a bit abnormal,
> ++ * which we assume when den >= 1000... */
> ++ if (ffmpegdec->context->time_base.num != 0 &&
> ++ (ffmpegdec->context->time_base.den > 0 &&
> ++ ffmpegdec->context->time_base.den < 1000)) {
> ++ GST_LOG_OBJECT (ffmpegdec, "using decoder's framerate for duration");
> ++ out_duration = gst_util_uint64_scale_int (GST_SECOND,
> ++ ffmpegdec->context->time_base.num *
> ++ ffmpegdec->context->ticks_per_frame,
> ++ ffmpegdec->context->time_base.den);
> ++ } else {
> ++ GST_LOG_OBJECT (ffmpegdec, "no valid duration found");
> ++ }
> ++ }
> ++ }
> ++
> ++ /* Take repeat_pict into account */
> ++ if (GST_CLOCK_TIME_IS_VALID (out_duration)) {
> ++ out_duration += out_duration * ffmpegdec->picture->repeat_pict / 2;
> ++ }
> ++ GST_BUFFER_DURATION (*outbuf) = out_duration;
> ++
> ++ if (out_timestamp != -1 && out_duration != -1 && out_duration != 0)
> ++ ffmpegdec->next_out = out_timestamp + out_duration;
> ++ else
> ++ ffmpegdec->next_out = -1;
> ++
> ++ /* now see if we need to clip the buffer against the segment boundaries. */
> ++ if (G_UNLIKELY (!clip_video_buffer (ffmpegdec, *outbuf, out_timestamp,
> ++ out_duration)))
> ++ goto clipped;
> ++
> ++ if (ffmpegdec->picture->top_field_first)
> ++ GST_BUFFER_FLAG_SET (*outbuf, GST_VIDEO_BUFFER_TFF);
> ++
> ++
> ++beach:
> ++ GST_DEBUG_OBJECT (ffmpegdec, "return flow %d, out %p, len %d",
> ++ *ret, *outbuf, len);
> ++ return len;
> ++
> ++ /* special cases */
> ++no_output:
> ++ {
> ++ GST_DEBUG_OBJECT (ffmpegdec, "no output buffer");
> ++ len = -1;
> ++ goto beach;
> ++ }
> ++clipped:
> ++ {
> ++ GST_DEBUG_OBJECT (ffmpegdec, "buffer clipped");
> ++ gst_buffer_unref (*outbuf);
> ++ *outbuf = NULL;
> ++ goto beach;
> ++ }
> ++}
> ++
> ++/* returns TRUE if buffer is within segment, else FALSE.
> ++ * if Buffer is on segment border, it's timestamp and duration will be clipped */
> ++static gboolean
> ++clip_audio_buffer (GstFFMpegDec * dec, GstBuffer * buf, GstClockTime in_ts,
> ++ GstClockTime in_dur)
> ++{
> ++ GstClockTime stop;
> ++ gint64 diff, ctime, cstop;
> ++ gboolean res = TRUE;
> ++
> ++ GST_LOG_OBJECT (dec,
> ++ "timestamp:%" GST_TIME_FORMAT ", duration:%" GST_TIME_FORMAT
> ++ ", size %u", GST_TIME_ARGS (in_ts), GST_TIME_ARGS (in_dur),
> ++ GST_BUFFER_SIZE (buf));
> ++
> ++ /* can't clip without TIME segment */
> ++ if (G_UNLIKELY (dec->segment.format != GST_FORMAT_TIME))
> ++ goto beach;
> ++
> ++ /* we need a start time */
> ++ if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (in_ts)))
> ++ goto beach;
> ++
> ++ /* trust duration */
> ++ stop = in_ts + in_dur;
> ++
> ++ res = gst_segment_clip (&dec->segment, GST_FORMAT_TIME, in_ts, stop, &ctime,
> ++ &cstop);
> ++ if (G_UNLIKELY (!res))
> ++ goto out_of_segment;
> ++
> ++ /* see if some clipping happened */
> ++ if (G_UNLIKELY ((diff = ctime - in_ts) > 0)) {
> ++ /* bring clipped time to bytes */
> ++ diff =
> ++ gst_util_uint64_scale_int (diff, dec->format.audio.samplerate,
> ++ GST_SECOND) * (dec->format.audio.depth * dec->format.audio.channels);
> ++
> ++ GST_DEBUG_OBJECT (dec, "clipping start to %" GST_TIME_FORMAT " %"
> ++ G_GINT64_FORMAT " bytes", GST_TIME_ARGS (ctime), diff);
> ++
> ++ GST_BUFFER_SIZE (buf) -= diff;
> ++ GST_BUFFER_DATA (buf) += diff;
> ++ }
> ++ if (G_UNLIKELY ((diff = stop - cstop) > 0)) {
> ++ /* bring clipped time to bytes */
> ++ diff =
> ++ gst_util_uint64_scale_int (diff, dec->format.audio.samplerate,
> ++ GST_SECOND) * (dec->format.audio.depth * dec->format.audio.channels);
> ++
> ++ GST_DEBUG_OBJECT (dec, "clipping stop to %" GST_TIME_FORMAT " %"
> ++ G_GINT64_FORMAT " bytes", GST_TIME_ARGS (cstop), diff);
> ++
> ++ GST_BUFFER_SIZE (buf) -= diff;
> ++ }
> ++ GST_BUFFER_TIMESTAMP (buf) = ctime;
> ++ GST_BUFFER_DURATION (buf) = cstop - ctime;
> ++
> ++beach:
> ++ GST_LOG_OBJECT (dec, "%sdropping", (res ? "not " : ""));
> ++ return res;
> ++
> ++ /* ERRORS */
> ++out_of_segment:
> ++ {
> ++ GST_LOG_OBJECT (dec, "out of segment");
> ++ goto beach;
> ++ }
> ++}
> ++
> ++static gint
> ++gst_ffmpegdec_audio_frame (GstFFMpegDec * ffmpegdec,
> ++ AVCodec * in_plugin, guint8 * data, guint size,
> ++ const GstTSInfo * dec_info, GstBuffer ** outbuf, GstFlowReturn * ret)
> ++{
> ++ gint len = -1;
> ++ gint have_data = AVCODEC_MAX_AUDIO_FRAME_SIZE;
> ++ GstClockTime out_timestamp, out_duration;
> ++ gint64 out_offset;
> ++ AVPacket packet;
> ++
> ++ GST_DEBUG_OBJECT (ffmpegdec,
> ++ "size:%d, offset:%" G_GINT64_FORMAT ", ts:%" GST_TIME_FORMAT ", dur:%"
> ++ GST_TIME_FORMAT ", ffmpegdec->next_out:%" GST_TIME_FORMAT, size,
> ++ dec_info->offset, GST_TIME_ARGS (dec_info->timestamp),
> ++ GST_TIME_ARGS (dec_info->duration), GST_TIME_ARGS (ffmpegdec->next_out));
> ++
> ++ *outbuf =
> ++ new_aligned_buffer (AVCODEC_MAX_AUDIO_FRAME_SIZE,
> ++ GST_PAD_CAPS (ffmpegdec->srcpad));
> ++
> ++ gst_avpacket_init (&packet, data, size);
> ++ len = avcodec_decode_audio3 (ffmpegdec->context,
> ++ (int16_t *) GST_BUFFER_DATA (*outbuf), &have_data, &packet);
> ++ GST_DEBUG_OBJECT (ffmpegdec,
> ++ "Decode audio: len=%d, have_data=%d", len, have_data);
> ++
> ++ if (len >= 0 && have_data > 0) {
> ++ GST_DEBUG_OBJECT (ffmpegdec, "Creating output buffer");
> ++ if (!gst_ffmpegdec_negotiate (ffmpegdec, FALSE)) {
> ++ gst_buffer_unref (*outbuf);
> ++ *outbuf = NULL;
> ++ len = -1;
> ++ goto beach;
> ++ }
> ++
> ++ /* Buffer size */
> ++ GST_BUFFER_SIZE (*outbuf) = have_data;
> ++
> ++ /*
> ++ * Timestamps:
> ++ *
> ++ * 1) Copy input timestamp if valid
> ++ * 2) else interpolate from previous input timestamp
> ++ */
> ++ /* always take timestamps from the input buffer if any */
> ++ if (GST_CLOCK_TIME_IS_VALID (dec_info->timestamp)) {
> ++ out_timestamp = dec_info->timestamp;
> ++ } else {
> ++ out_timestamp = ffmpegdec->next_out;
> ++ }
> ++
> ++ /*
> ++ * Duration:
> ++ *
> ++ * 1) calculate based on number of samples
> ++ */
> ++ out_duration = gst_util_uint64_scale (have_data, GST_SECOND,
> ++ ffmpegdec->format.audio.depth * ffmpegdec->format.audio.channels *
> ++ ffmpegdec->format.audio.samplerate);
> ++
> ++ /* offset:
> ++ *
> ++ * Just copy
> ++ */
> ++ out_offset = dec_info->offset;
> ++
> ++ GST_DEBUG_OBJECT (ffmpegdec,
> ++ "Buffer created. Size:%d , timestamp:%" GST_TIME_FORMAT " , duration:%"
> ++ GST_TIME_FORMAT, have_data,
> ++ GST_TIME_ARGS (out_timestamp), GST_TIME_ARGS (out_duration));
> ++
> ++ GST_BUFFER_TIMESTAMP (*outbuf) = out_timestamp;
> ++ GST_BUFFER_DURATION (*outbuf) = out_duration;
> ++ GST_BUFFER_OFFSET (*outbuf) = out_offset;
> ++ gst_buffer_set_caps (*outbuf, GST_PAD_CAPS (ffmpegdec->srcpad));
> ++
> ++ /* the next timestamp we'll use when interpolating */
> ++ if (GST_CLOCK_TIME_IS_VALID (out_timestamp))
> ++ ffmpegdec->next_out = out_timestamp + out_duration;
> ++
> ++ /* now see if we need to clip the buffer against the segment boundaries. */
> ++ if (G_UNLIKELY (!clip_audio_buffer (ffmpegdec, *outbuf, out_timestamp,
> ++ out_duration)))
> ++ goto clipped;
> ++
> ++ } else {
> ++ gst_buffer_unref (*outbuf);
> ++ *outbuf = NULL;
> ++ }
> ++
> ++ /* If we don't error out after the first failed read with the AAC decoder,
> ++ * we must *not* carry on pushing data, else we'll cause segfaults... */
> ++ if (len == -1 && (in_plugin->id == CODEC_ID_AAC
> ++ || in_plugin->id == CODEC_ID_AAC_LATM)) {
> ++ GST_ELEMENT_ERROR (ffmpegdec, STREAM, DECODE, (NULL),
> ++ ("Decoding of AAC stream by FFMPEG failed."));
> ++ *ret = GST_FLOW_ERROR;
> ++ }
> ++
> ++beach:
> ++ GST_DEBUG_OBJECT (ffmpegdec, "return flow %d, out %p, len %d",
> ++ *ret, *outbuf, len);
> ++ return len;
> ++
> ++ /* ERRORS */
> ++clipped:
> ++ {
> ++ GST_DEBUG_OBJECT (ffmpegdec, "buffer clipped");
> ++ gst_buffer_unref (*outbuf);
> ++ *outbuf = NULL;
> ++ goto beach;
> ++ }
> ++}
> ++
> ++/* gst_ffmpegdec_frame:
> ++ * ffmpegdec:
> ++ * data: pointer to the data to decode
> ++ * size: size of data in bytes
> ++ * got_data: 0 if no data was decoded, != 0 otherwise.
> ++ * in_time: timestamp of data
> ++ * in_duration: duration of data
> ++ * ret: GstFlowReturn to return in the chain function
> ++ *
> ++ * Decode the given frame and pushes it downstream.
> ++ *
> ++ * Returns: Number of bytes used in decoding, -1 on error/failure.
> ++ */
> ++
> ++static gint
> ++gst_ffmpegdec_frame (GstFFMpegDec * ffmpegdec,
> ++ guint8 * data, guint size, gint * got_data, const GstTSInfo * dec_info,
> ++ GstFlowReturn * ret)
> ++{
> ++ GstFFMpegDecClass *oclass;
> ++ GstBuffer *outbuf = NULL;
> ++ gint have_data = 0, len = 0;
> ++
> ++ if (G_UNLIKELY (ffmpegdec->context->codec == NULL))
> ++ goto no_codec;
> ++
> ++ GST_LOG_OBJECT (ffmpegdec, "data:%p, size:%d, id:%d", data, size,
> ++ dec_info->idx);
> ++
> ++ *ret = GST_FLOW_OK;
> ++ ffmpegdec->context->frame_number++;
> ++
> ++ oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
> ++
> ++ switch (oclass->in_plugin->type) {
> ++ case AVMEDIA_TYPE_VIDEO:
> ++ len =
> ++ gst_ffmpegdec_video_frame (ffmpegdec, data, size, dec_info, &outbuf,
> ++ ret);
> ++ break;
> ++ case AVMEDIA_TYPE_AUDIO:
> ++ len =
> ++ gst_ffmpegdec_audio_frame (ffmpegdec, oclass->in_plugin, data, size,
> ++ dec_info, &outbuf, ret);
> ++
> ++ /* if we did not get an output buffer and we have a pending discont, don't
> ++ * clear the input timestamps, we will put them on the next buffer because
> ++ * else we might create the first buffer with a very big timestamp gap. */
> ++ if (outbuf == NULL && ffmpegdec->discont) {
> ++ GST_DEBUG_OBJECT (ffmpegdec, "no buffer but keeping timestamp");
> ++ ffmpegdec->clear_ts = FALSE;
> ++ }
> ++ break;
> ++ default:
> ++ GST_ERROR_OBJECT (ffmpegdec, "Asked to decode non-audio/video frame !");
> ++ g_assert_not_reached ();
> ++ break;
> ++ }
> ++
> ++ if (outbuf)
> ++ have_data = 1;
> ++
> ++ if (len < 0 || have_data < 0) {
> ++ GST_WARNING_OBJECT (ffmpegdec,
> ++ "ffdec_%s: decoding error (len: %d, have_data: %d)",
> ++ oclass->in_plugin->name, len, have_data);
> ++ *got_data = 0;
> ++ goto beach;
> ++ } else if (len == 0 && have_data == 0) {
> ++ *got_data = 0;
> ++ goto beach;
> ++ } else {
> ++ /* this is where I lost my last clue on ffmpeg... */
> ++ *got_data = 1;
> ++ }
> ++
> ++ if (outbuf) {
> ++ GST_LOG_OBJECT (ffmpegdec,
> ++ "Decoded data, now pushing buffer %p with offset %" G_GINT64_FORMAT
> ++ ", timestamp %" GST_TIME_FORMAT " and duration %" GST_TIME_FORMAT,
> ++ outbuf, GST_BUFFER_OFFSET (outbuf),
> ++ GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (outbuf)),
> ++ GST_TIME_ARGS (GST_BUFFER_DURATION (outbuf)));
> ++
> ++ /* mark pending discont */
> ++ if (ffmpegdec->discont) {
> ++ GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DISCONT);
> ++ ffmpegdec->discont = FALSE;
> ++ }
> ++
> ++ if (ffmpegdec->segment.rate > 0.0) {
> ++ /* and off we go */
> ++ *ret = gst_pad_push (ffmpegdec->srcpad, outbuf);
> ++ } else {
> ++ /* reverse playback, queue frame till later when we get a discont. */
> ++ GST_DEBUG_OBJECT (ffmpegdec, "queued frame");
> ++ ffmpegdec->queued = g_list_prepend (ffmpegdec->queued, outbuf);
> ++ *ret = GST_FLOW_OK;
> ++ }
> ++ } else {
> ++ GST_DEBUG_OBJECT (ffmpegdec, "We didn't get a decoded buffer");
> ++ }
> ++
> ++beach:
> ++ return len;
> ++
> ++ /* ERRORS */
> ++no_codec:
> ++ {
> ++ GST_ERROR_OBJECT (ffmpegdec, "no codec context");
> ++ return -1;
> ++ }
> ++}
> ++
> ++static void
> ++gst_ffmpegdec_drain (GstFFMpegDec * ffmpegdec)
> ++{
> ++ GstFFMpegDecClass *oclass;
> ++
> ++ oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
> ++
> ++ if (oclass->in_plugin->capabilities & CODEC_CAP_DELAY) {
> ++ gint have_data, len, try = 0;
> ++
> ++ GST_LOG_OBJECT (ffmpegdec,
> ++ "codec has delay capabilities, calling until ffmpeg has drained everything");
> ++
> ++ do {
> ++ GstFlowReturn ret;
> ++
> ++ len =
> ++ gst_ffmpegdec_frame (ffmpegdec, NULL, 0, &have_data, &ts_info_none,
> ++ &ret);
> ++ if (len < 0 || have_data == 0)
> ++ break;
> ++ } while (try++ < 10);
> ++ }
> ++ if (ffmpegdec->segment.rate < 0.0) {
> ++ /* if we have some queued frames for reverse playback, flush them now */
> ++ flush_queued (ffmpegdec);
> ++ }
> ++}
> ++
> ++static void
> ++gst_ffmpegdec_flush_pcache (GstFFMpegDec * ffmpegdec)
> ++{
> ++ if (ffmpegdec->pctx) {
> ++ gint size, bsize;
> ++ guint8 *data;
> ++ guint8 bdata[FF_INPUT_BUFFER_PADDING_SIZE];
> ++
> ++ bsize = FF_INPUT_BUFFER_PADDING_SIZE;
> ++ memset (bdata, 0, bsize);
> ++
> ++ /* parse some dummy data to work around some ffmpeg weirdness where it keeps
> ++ * the previous pts around */
> ++ av_parser_parse2 (ffmpegdec->pctx, ffmpegdec->context,
> ++ &data, &size, bdata, bsize, -1, -1, -1);
> ++ ffmpegdec->pctx->pts = -1;
> ++ ffmpegdec->pctx->dts = -1;
> ++ }
> ++
> ++ if (ffmpegdec->pcache) {
> ++ gst_buffer_unref (ffmpegdec->pcache);
> ++ ffmpegdec->pcache = NULL;
> ++ }
> ++}
> ++
> ++static gboolean
> ++gst_ffmpegdec_sink_event (GstPad * pad, GstEvent * event)
> ++{
> ++ GstFFMpegDec *ffmpegdec;
> ++ gboolean ret = FALSE;
> ++
> ++ ffmpegdec = (GstFFMpegDec *) gst_pad_get_parent (pad);
> ++
> ++ GST_DEBUG_OBJECT (ffmpegdec, "Handling %s event",
> ++ GST_EVENT_TYPE_NAME (event));
> ++
> ++ switch (GST_EVENT_TYPE (event)) {
> ++ case GST_EVENT_EOS:
> ++ {
> ++ gst_ffmpegdec_drain (ffmpegdec);
> ++ break;
> ++ }
> ++ case GST_EVENT_FLUSH_STOP:
> ++ {
> ++ if (ffmpegdec->opened) {
> ++ avcodec_flush_buffers (ffmpegdec->context);
> ++ }
> ++ gst_ffmpegdec_reset_ts (ffmpegdec);
> ++ gst_ffmpegdec_reset_qos (ffmpegdec);
> ++ gst_ffmpegdec_flush_pcache (ffmpegdec);
> ++ gst_segment_init (&ffmpegdec->segment, GST_FORMAT_TIME);
> ++ clear_queued (ffmpegdec);
> ++ break;
> ++ }
> ++ case GST_EVENT_NEWSEGMENT:
> ++ {
> ++ gboolean update;
> ++ GstFormat fmt;
> ++ gint64 start, stop, time;
> ++ gdouble rate, arate;
> ++
> ++ gst_event_parse_new_segment_full (event, &update, &rate, &arate, &fmt,
> ++ &start, &stop, &time);
> ++
> ++ switch (fmt) {
> ++ case GST_FORMAT_TIME:
> ++ /* fine, our native segment format */
> ++ break;
> ++ case GST_FORMAT_BYTES:
> ++ {
> ++ gint bit_rate;
> ++
> ++ bit_rate = ffmpegdec->context->bit_rate;
> ++
> ++ /* convert to time or fail */
> ++ if (!bit_rate)
> ++ goto no_bitrate;
> ++
> ++ GST_DEBUG_OBJECT (ffmpegdec, "bitrate: %d", bit_rate);
> ++
> ++ /* convert values to TIME */
> ++ if (start != -1)
> ++ start = gst_util_uint64_scale_int (start, GST_SECOND, bit_rate);
> ++ if (stop != -1)
> ++ stop = gst_util_uint64_scale_int (stop, GST_SECOND, bit_rate);
> ++ if (time != -1)
> ++ time = gst_util_uint64_scale_int (time, GST_SECOND, bit_rate);
> ++
> ++ /* unref old event */
> ++ gst_event_unref (event);
> ++
> ++ /* create new converted time segment */
> ++ fmt = GST_FORMAT_TIME;
> ++ /* FIXME, bitrate is not good enough too find a good stop, let's
> ++ * hope start and time were 0... meh. */
> ++ stop = -1;
> ++ event = gst_event_new_new_segment (update, rate, fmt,
> ++ start, stop, time);
> ++ break;
> ++ }
> ++ default:
> ++ /* invalid format */
> ++ goto invalid_format;
> ++ }
> ++
> ++ /* drain pending frames before trying to use the new segment, queued
> ++ * buffers belonged to the previous segment. */
> ++ if (ffmpegdec->context->codec)
> ++ gst_ffmpegdec_drain (ffmpegdec);
> ++
> ++ GST_DEBUG_OBJECT (ffmpegdec,
> ++ "NEWSEGMENT in time start %" GST_TIME_FORMAT " -- stop %"
> ++ GST_TIME_FORMAT, GST_TIME_ARGS (start), GST_TIME_ARGS (stop));
> ++
> ++ /* and store the values */
> ++ gst_segment_set_newsegment_full (&ffmpegdec->segment, update,
> ++ rate, arate, fmt, start, stop, time);
> ++ break;
> ++ }
> ++ default:
> ++ break;
> ++ }
> ++
> ++ /* and push segment downstream */
> ++ ret = gst_pad_push_event (ffmpegdec->srcpad, event);
> ++
> ++done:
> ++ gst_object_unref (ffmpegdec);
> ++
> ++ return ret;
> ++
> ++ /* ERRORS */
> ++no_bitrate:
> ++ {
> ++ GST_WARNING_OBJECT (ffmpegdec, "no bitrate to convert BYTES to TIME");
> ++ gst_event_unref (event);
> ++ goto done;
> ++ }
> ++invalid_format:
> ++ {
> ++ GST_WARNING_OBJECT (ffmpegdec, "unknown format received in NEWSEGMENT");
> ++ gst_event_unref (event);
> ++ goto done;
> ++ }
> ++}
> ++
> ++static GstFlowReturn
> ++gst_ffmpegdec_chain (GstPad * pad, GstBuffer * inbuf)
> ++{
> ++ GstFFMpegDec *ffmpegdec;
> ++ GstFFMpegDecClass *oclass;
> ++ guint8 *data, *bdata;
> ++ gint size, bsize, len, have_data;
> ++ GstFlowReturn ret = GST_FLOW_OK;
> ++ GstClockTime in_timestamp;
> ++ GstClockTime in_duration;
> ++ gboolean discont;
> ++ gint64 in_offset;
> ++ const GstTSInfo *in_info;
> ++ const GstTSInfo *dec_info;
> ++
> ++ ffmpegdec = (GstFFMpegDec *) (GST_PAD_PARENT (pad));
> ++
> ++ if (G_UNLIKELY (!ffmpegdec->opened))
> ++ goto not_negotiated;
> ++
> ++ discont = GST_BUFFER_IS_DISCONT (inbuf);
> ++
> ++ /* The discont flags marks a buffer that is not continuous with the previous
> ++ * buffer. This means we need to clear whatever data we currently have. We
> ++ * currently also wait for a new keyframe, which might be suboptimal in the
> ++ * case of a network error, better show the errors than to drop all data.. */
> ++ if (G_UNLIKELY (discont)) {
> ++ GST_DEBUG_OBJECT (ffmpegdec, "received DISCONT");
> ++ /* drain what we have queued */
> ++ gst_ffmpegdec_drain (ffmpegdec);
> ++ gst_ffmpegdec_flush_pcache (ffmpegdec);
> ++ avcodec_flush_buffers (ffmpegdec->context);
> ++ ffmpegdec->discont = TRUE;
> ++ gst_ffmpegdec_reset_ts (ffmpegdec);
> ++ }
> ++ /* by default we clear the input timestamp after decoding each frame so that
> ++ * interpollation can work. */
> ++ ffmpegdec->clear_ts = TRUE;
> ++
> ++ oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
> ++
> ++ /* parse cache joining. If there is cached data */
> ++ if (ffmpegdec->pcache) {
> ++ /* join with previous data */
> ++ GST_LOG_OBJECT (ffmpegdec, "join parse cache");
> ++ inbuf = gst_buffer_join (ffmpegdec->pcache, inbuf);
> ++ /* no more cached data, we assume we can consume the complete cache */
> ++ ffmpegdec->pcache = NULL;
> ++ }
> ++
> ++ in_timestamp = GST_BUFFER_TIMESTAMP (inbuf);
> ++ in_duration = GST_BUFFER_DURATION (inbuf);
> ++ in_offset = GST_BUFFER_OFFSET (inbuf);
> ++
> ++ /* get handle to timestamp info, we can pass this around to ffmpeg */
> ++ in_info = gst_ts_info_store (ffmpegdec, in_timestamp, in_duration, in_offset);
> ++
> ++ if (in_timestamp != -1) {
> ++ /* check for increasing timestamps if they are jumping backwards, we
> ++ * probably are dealing with PTS as timestamps */
> ++ if (!ffmpegdec->reordered_in && ffmpegdec->last_in != -1) {
> ++ if (in_timestamp < ffmpegdec->last_in) {
> ++ GST_LOG_OBJECT (ffmpegdec, "detected reordered input timestamps");
> ++ ffmpegdec->reordered_in = TRUE;
> ++ ffmpegdec->last_diff = GST_CLOCK_TIME_NONE;
> ++ } else if (in_timestamp > ffmpegdec->last_in) {
> ++ GstClockTime diff;
> ++ /* keep track of timestamp diff to estimate duration */
> ++ diff = in_timestamp - ffmpegdec->last_in;
> ++ /* need to scale with amount of frames in the interval */
> ++ if (ffmpegdec->last_frames)
> ++ diff /= ffmpegdec->last_frames;
> ++
> ++ GST_LOG_OBJECT (ffmpegdec, "estimated duration %" GST_TIME_FORMAT " %u",
> ++ GST_TIME_ARGS (diff), ffmpegdec->last_frames);
> ++
> ++ ffmpegdec->last_diff = diff;
> ++ }
> ++ }
> ++ ffmpegdec->last_in = in_timestamp;
> ++ ffmpegdec->last_frames = 0;
> ++ }
> ++
> ++ GST_LOG_OBJECT (ffmpegdec,
> ++ "Received new data of size %u, offset:%" G_GUINT64_FORMAT ", ts:%"
> ++ GST_TIME_FORMAT ", dur:%" GST_TIME_FORMAT ", info %d",
> ++ GST_BUFFER_SIZE (inbuf), GST_BUFFER_OFFSET (inbuf),
> ++ GST_TIME_ARGS (in_timestamp), GST_TIME_ARGS (in_duration), in_info->idx);
> ++
> ++ /* workarounds, functions write to buffers:
> ++ * libavcodec/svq1.c:svq1_decode_frame writes to the given buffer.
> ++ * libavcodec/svq3.c:svq3_decode_slice_header too.
> ++ * ffmpeg devs know about it and will fix it (they said). */
> ++ if (oclass->in_plugin->id == CODEC_ID_SVQ1 ||
> ++ oclass->in_plugin->id == CODEC_ID_SVQ3) {
> ++ inbuf = gst_buffer_make_writable (inbuf);
> ++ }
> ++
> ++ bdata = GST_BUFFER_DATA (inbuf);
> ++ bsize = GST_BUFFER_SIZE (inbuf);
> ++
> ++ if (ffmpegdec->do_padding) {
> ++ /* add padding */
> ++ if (ffmpegdec->padded_size < bsize + FF_INPUT_BUFFER_PADDING_SIZE) {
> ++ ffmpegdec->padded_size = bsize + FF_INPUT_BUFFER_PADDING_SIZE;
> ++ ffmpegdec->padded = g_realloc (ffmpegdec->padded, ffmpegdec->padded_size);
> ++ GST_LOG_OBJECT (ffmpegdec, "resized padding buffer to %d",
> ++ ffmpegdec->padded_size);
> ++ }
> ++ memcpy (ffmpegdec->padded, bdata, bsize);
> ++ memset (ffmpegdec->padded + bsize, 0, FF_INPUT_BUFFER_PADDING_SIZE);
> ++
> ++ bdata = ffmpegdec->padded;
> ++ }
> ++
> ++ do {
> ++ guint8 tmp_padding[FF_INPUT_BUFFER_PADDING_SIZE];
> ++
> ++ /* parse, if at all possible */
> ++ if (ffmpegdec->pctx) {
> ++ gint res;
> ++
> ++ GST_LOG_OBJECT (ffmpegdec,
> ++ "Calling av_parser_parse2 with offset %" G_GINT64_FORMAT ", ts:%"
> ++ GST_TIME_FORMAT " size %d", in_offset, GST_TIME_ARGS (in_timestamp),
> ++ bsize);
> ++
> ++ /* feed the parser. We pass the timestamp info so that we can recover all
> ++ * info again later */
> ++ res = av_parser_parse2 (ffmpegdec->pctx, ffmpegdec->context,
> ++ &data, &size, bdata, bsize, in_info->idx, in_info->idx, in_offset);
> ++
> ++ GST_LOG_OBJECT (ffmpegdec,
> ++ "parser returned res %d and size %d, id %" G_GINT64_FORMAT, res, size,
> ++ ffmpegdec->pctx->pts);
> ++
> ++ /* store pts for decoding */
> ++ if (ffmpegdec->pctx->pts != AV_NOPTS_VALUE && ffmpegdec->pctx->pts != -1)
> ++ dec_info = gst_ts_info_get (ffmpegdec, ffmpegdec->pctx->pts);
> ++ else {
> ++ /* ffmpeg sometimes loses track after a flush, help it by feeding a
> ++ * valid start time */
> ++ ffmpegdec->pctx->pts = in_info->idx;
> ++ ffmpegdec->pctx->dts = in_info->idx;
> ++ dec_info = in_info;
> ++ }
> ++
> ++ GST_LOG_OBJECT (ffmpegdec, "consuming %d bytes. id %d", size,
> ++ dec_info->idx);
> ++
> ++ if (res) {
> ++ /* there is output, set pointers for next round. */
> ++ bsize -= res;
> ++ bdata += res;
> ++ } else {
> ++ /* Parser did not consume any data, make sure we don't clear the
> ++ * timestamp for the next round */
> ++ ffmpegdec->clear_ts = FALSE;
> ++ }
> ++
> ++ /* if there is no output, we must break and wait for more data. also the
> ++ * timestamp in the context is not updated. */
> ++ if (size == 0) {
> ++ if (bsize > 0)
> ++ continue;
> ++ else
> ++ break;
> ++ }
> ++ } else {
> ++ data = bdata;
> ++ size = bsize;
> ++
> ++ dec_info = in_info;
> ++ }
> ++
> ++ if (ffmpegdec->do_padding) {
> ++ /* add temporary padding */
> ++ memcpy (tmp_padding, data + size, FF_INPUT_BUFFER_PADDING_SIZE);
> ++ memset (data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
> ++ }
> ++
> ++ /* decode a frame of audio/video now */
> ++ len =
> ++ gst_ffmpegdec_frame (ffmpegdec, data, size, &have_data, dec_info, &ret);
> ++
> ++ if (ffmpegdec->do_padding) {
> ++ memcpy (data + size, tmp_padding, FF_INPUT_BUFFER_PADDING_SIZE);
> ++ }
> ++
> ++ if (ret != GST_FLOW_OK) {
> ++ GST_LOG_OBJECT (ffmpegdec, "breaking because of flow ret %s",
> ++ gst_flow_get_name (ret));
> ++ /* bad flow retun, make sure we discard all data and exit */
> ++ bsize = 0;
> ++ break;
> ++ }
> ++ if (!ffmpegdec->pctx) {
> ++ if (len == 0 && !have_data) {
> ++ /* nothing was decoded, this could be because no data was available or
> ++ * because we were skipping frames.
> ++ * If we have no context we must exit and wait for more data, we keep the
> ++ * data we tried. */
> ++ GST_LOG_OBJECT (ffmpegdec, "Decoding didn't return any data, breaking");
> ++ break;
> ++ } else if (len < 0) {
> ++ /* a decoding error happened, we must break and try again with next data. */
> ++ GST_LOG_OBJECT (ffmpegdec, "Decoding error, breaking");
> ++ bsize = 0;
> ++ break;
> ++ }
> ++ /* prepare for the next round, for codecs with a context we did this
> ++ * already when using the parser. */
> ++ bsize -= len;
> ++ bdata += len;
> ++ } else {
> ++ if (len == 0) {
> ++ /* nothing was decoded, this could be because no data was available or
> ++ * because we were skipping frames. Since we have a parser we can
> ++ * continue with the next frame */
> ++ GST_LOG_OBJECT (ffmpegdec,
> ++ "Decoding didn't return any data, trying next");
> ++ } else if (len < 0) {
> ++ /* we have a context that will bring us to the next frame */
> ++ GST_LOG_OBJECT (ffmpegdec, "Decoding error, trying next");
> ++ }
> ++ }
> ++
> ++ /* make sure we don't use the same old timestamp for the next frame and let
> ++ * the interpollation take care of it. */
> ++ if (ffmpegdec->clear_ts) {
> ++ in_timestamp = GST_CLOCK_TIME_NONE;
> ++ in_duration = GST_CLOCK_TIME_NONE;
> ++ in_offset = GST_BUFFER_OFFSET_NONE;
> ++ in_info = GST_TS_INFO_NONE;
> ++ } else {
> ++ ffmpegdec->clear_ts = TRUE;
> ++ }
> ++ ffmpegdec->last_frames++;
> ++
> ++ GST_LOG_OBJECT (ffmpegdec, "Before (while bsize>0). bsize:%d , bdata:%p",
> ++ bsize, bdata);
> ++ } while (bsize > 0);
> ++
> ++ /* keep left-over */
> ++ if (ffmpegdec->pctx && bsize > 0) {
> ++ in_timestamp = GST_BUFFER_TIMESTAMP (inbuf);
> ++ in_offset = GST_BUFFER_OFFSET (inbuf);
> ++
> ++ GST_LOG_OBJECT (ffmpegdec,
> ++ "Keeping %d bytes of data with offset %" G_GINT64_FORMAT ", timestamp %"
> ++ GST_TIME_FORMAT, bsize, in_offset, GST_TIME_ARGS (in_timestamp));
> ++
> ++ ffmpegdec->pcache = gst_buffer_create_sub (inbuf,
> ++ GST_BUFFER_SIZE (inbuf) - bsize, bsize);
> ++ /* we keep timestamp, even though all we really know is that the correct
> ++ * timestamp is not below the one from inbuf */
> ++ GST_BUFFER_TIMESTAMP (ffmpegdec->pcache) = in_timestamp;
> ++ GST_BUFFER_OFFSET (ffmpegdec->pcache) = in_offset;
> ++ } else if (bsize > 0) {
> ++ GST_DEBUG_OBJECT (ffmpegdec, "Dropping %d bytes of data", bsize);
> ++ }
> ++ gst_buffer_unref (inbuf);
> ++
> ++ return ret;
> ++
> ++ /* ERRORS */
> ++not_negotiated:
> ++ {
> ++ oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
> ++ GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION, (NULL),
> ++ ("ffdec_%s: input format was not set before data start",
> ++ oclass->in_plugin->name));
> ++ gst_buffer_unref (inbuf);
> ++ return GST_FLOW_NOT_NEGOTIATED;
> ++ }
> ++}
> ++
> ++static GstStateChangeReturn
> ++gst_ffmpegdec_change_state (GstElement * element, GstStateChange transition)
> ++{
> ++ GstFFMpegDec *ffmpegdec = (GstFFMpegDec *) element;
> ++ GstStateChangeReturn ret;
> ++
> ++ ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
> ++
> ++ switch (transition) {
> ++ case GST_STATE_CHANGE_PAUSED_TO_READY:
> ++ GST_OBJECT_LOCK (ffmpegdec);
> ++ gst_ffmpegdec_close (ffmpegdec);
> ++ GST_OBJECT_UNLOCK (ffmpegdec);
> ++ clear_queued (ffmpegdec);
> ++ g_free (ffmpegdec->padded);
> ++ ffmpegdec->padded = NULL;
> ++ ffmpegdec->padded_size = 0;
> ++ ffmpegdec->can_allocate_aligned = TRUE;
> ++ break;
> ++ default:
> ++ break;
> ++ }
> ++
> ++ return ret;
> ++}
> ++
> ++static void
> ++gst_ffmpegdec_set_property (GObject * object,
> ++ guint prop_id, const GValue * value, GParamSpec * pspec)
> ++{
> ++ GstFFMpegDec *ffmpegdec = (GstFFMpegDec *) object;
> ++
> ++ switch (prop_id) {
> ++ case PROP_LOWRES:
> ++ ffmpegdec->lowres = ffmpegdec->context->lowres = g_value_get_enum (value);
> ++ break;
> ++ case PROP_SKIPFRAME:
> ++ ffmpegdec->skip_frame = ffmpegdec->context->skip_frame =
> ++ g_value_get_enum (value);
> ++ break;
> ++ case PROP_DIRECT_RENDERING:
> ++ ffmpegdec->direct_rendering = g_value_get_boolean (value);
> ++ break;
> ++ case PROP_DO_PADDING:
> ++ ffmpegdec->do_padding = g_value_get_boolean (value);
> ++ break;
> ++ case PROP_DEBUG_MV:
> ++ ffmpegdec->debug_mv = ffmpegdec->context->debug_mv =
> ++ g_value_get_boolean (value);
> ++ break;
> ++ case PROP_CROP:
> ++ ffmpegdec->crop = g_value_get_boolean (value);
> ++ break;
> ++ case PROP_MAX_THREADS:
> ++ ffmpegdec->max_threads = g_value_get_int (value);
> ++ break;
> ++ default:
> ++ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
> ++ break;
> ++ }
> ++}
> ++
> ++static void
> ++gst_ffmpegdec_get_property (GObject * object,
> ++ guint prop_id, GValue * value, GParamSpec * pspec)
> ++{
> ++ GstFFMpegDec *ffmpegdec = (GstFFMpegDec *) object;
> ++
> ++ switch (prop_id) {
> ++ case PROP_LOWRES:
> ++ g_value_set_enum (value, ffmpegdec->context->lowres);
> ++ break;
> ++ case PROP_SKIPFRAME:
> ++ g_value_set_enum (value, ffmpegdec->context->skip_frame);
> ++ break;
> ++ case PROP_DIRECT_RENDERING:
> ++ g_value_set_boolean (value, ffmpegdec->direct_rendering);
> ++ break;
> ++ case PROP_DO_PADDING:
> ++ g_value_set_boolean (value, ffmpegdec->do_padding);
> ++ break;
> ++ case PROP_DEBUG_MV:
> ++ g_value_set_boolean (value, ffmpegdec->context->debug_mv);
> ++ break;
> ++ case PROP_CROP:
> ++ g_value_set_boolean (value, ffmpegdec->crop);
> ++ break;
> ++ case PROP_MAX_THREADS:
> ++ g_value_set_int (value, ffmpegdec->max_threads);
> ++ break;
> ++ default:
> ++ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
> ++ break;
> ++ }
> ++}
> ++
> ++gboolean
> ++gst_ffmpegdec_register (GstPlugin * plugin)
> ++{
> ++ GTypeInfo typeinfo = {
> ++ sizeof (GstFFMpegDecClass),
> ++ (GBaseInitFunc) gst_ffmpegdec_base_init,
> ++ NULL,
> ++ (GClassInitFunc) gst_ffmpegdec_class_init,
> ++ NULL,
> ++ NULL,
> ++ sizeof (GstFFMpegDec),
> ++ 0,
> ++ (GInstanceInitFunc) gst_ffmpegdec_init,
> ++ };
> ++ GType type;
> ++ AVCodec *in_plugin;
> ++ gint rank;
> ++
> ++ in_plugin = av_codec_next (NULL);
> ++
> ++ GST_LOG ("Registering decoders");
> ++
> ++ while (in_plugin) {
> ++ gchar *type_name;
> ++ gchar *plugin_name;
> ++
> ++ /* only decoders */
> ++ if (!in_plugin->decode) {
> ++ goto next;
> ++ }
> ++
> ++ /* no quasi-codecs, please */
> ++ if (in_plugin->id == CODEC_ID_RAWVIDEO ||
> ++ in_plugin->id == CODEC_ID_V210 ||
> ++ in_plugin->id == CODEC_ID_V210X ||
> ++ in_plugin->id == CODEC_ID_R210 ||
> ++ (in_plugin->id >= CODEC_ID_PCM_S16LE &&
> ++ in_plugin->id <= CODEC_ID_PCM_BLURAY)) {
> ++ goto next;
> ++ }
> ++
> ++ /* No decoders depending on external libraries (we don't build them, but
> ++ * people who build against an external ffmpeg might have them.
> ++ * We have native gstreamer plugins for all of those libraries anyway. */
> ++ if (!strncmp (in_plugin->name, "lib", 3)) {
> ++ GST_DEBUG
> ++ ("Not using external library decoder %s. Use the gstreamer-native ones instead.",
> ++ in_plugin->name);
> ++ goto next;
> ++ }
> ++
> ++ /* No vdpau plugins until we can figure out how to properly use them
> ++ * outside of ffmpeg. */
> ++ if (g_str_has_suffix (in_plugin->name, "_vdpau")) {
> ++ GST_DEBUG
> ++ ("Ignoring VDPAU decoder %s. We can't handle this outside of ffmpeg",
> ++ in_plugin->name);
> ++ goto next;
> ++ }
> ++
> ++ if (g_str_has_suffix (in_plugin->name, "_xvmc")) {
> ++ GST_DEBUG
> ++ ("Ignoring XVMC decoder %s. We can't handle this outside of ffmpeg",
> ++ in_plugin->name);
> ++ goto next;
> ++ }
> ++
> ++ GST_DEBUG ("Trying plugin %s [%s]", in_plugin->name, in_plugin->long_name);
> ++
> ++ /* no codecs for which we're GUARANTEED to have better alternatives */
> ++ /* MPEG1VIDEO : the mpeg2video decoder is preferred */
> ++ /* MP1 : Use MP3 for decoding */
> ++ /* MP2 : Use MP3 for decoding */
> ++ /* Theora: Use libtheora based theoradec */
> ++ if (!strcmp (in_plugin->name, "gif") ||
> ++ !strcmp (in_plugin->name, "vorbis") ||
> ++ !strcmp (in_plugin->name, "theora") ||
> ++ !strcmp (in_plugin->name, "mpeg1video") ||
> ++ !strcmp (in_plugin->name, "wavpack") ||
> ++ !strcmp (in_plugin->name, "mp1") ||
> ++ !strcmp (in_plugin->name, "mp2") ||
> ++ !strcmp (in_plugin->name, "libfaad") ||
> ++ !strcmp (in_plugin->name, "mpeg4aac") ||
> ++ !strcmp (in_plugin->name, "ass") ||
> ++ !strcmp (in_plugin->name, "srt") ||
> ++ !strcmp (in_plugin->name, "pgssub") ||
> ++ !strcmp (in_plugin->name, "dvdsub") ||
> ++ !strcmp (in_plugin->name, "dvbsub")) {
> ++ GST_LOG ("Ignoring decoder %s", in_plugin->name);
> ++ goto next;
> ++ }
> ++
> ++ /* construct the type */
> ++ plugin_name = g_strdup ((gchar *) in_plugin->name);
> ++ g_strdelimit (plugin_name, NULL, '_');
> ++ type_name = g_strdup_printf ("ffdec_%s", plugin_name);
> ++ g_free (plugin_name);
> ++
> ++ type = g_type_from_name (type_name);
> ++
> ++ if (!type) {
> ++ /* create the gtype now */
> ++ type = g_type_register_static (GST_TYPE_ELEMENT, type_name, &typeinfo, 0);
> ++ g_type_set_qdata (type, GST_FFDEC_PARAMS_QDATA, (gpointer) in_plugin);
> ++ }
> ++
> ++ /* (Ronald) MPEG-4 gets a higher priority because it has been well-
> ++ * tested and by far outperforms divxdec/xviddec - so we prefer it.
> ++ * msmpeg4v3 same, as it outperforms divxdec for divx3 playback.
> ++ * VC1/WMV3 are not working and thus unpreferred for now. */
> ++ switch (in_plugin->id) {
> ++ case CODEC_ID_MPEG4:
> ++ case CODEC_ID_MSMPEG4V3:
> ++ case CODEC_ID_H264:
> ++ case CODEC_ID_RA_144:
> ++ case CODEC_ID_RA_288:
> ++ case CODEC_ID_RV10:
> ++ case CODEC_ID_RV20:
> ++ case CODEC_ID_RV30:
> ++ case CODEC_ID_RV40:
> ++ case CODEC_ID_COOK:
> ++ rank = GST_RANK_SECONDARY;
> ++ break;
> ++ /* DVVIDEO: we have a good dv decoder, fast on both ppc as well as x86.
> ++ * They say libdv's quality is better though. leave as secondary.
> ++ * note: if you change this, see the code in gstdv.c in good/ext/dv.
> ++ *
> ++ * SIPR: decoder should have a higher rank than realaudiodec.
> ++ */
> ++ case CODEC_ID_DVVIDEO:
> ++ case CODEC_ID_SIPR:
> ++ rank = GST_RANK_SECONDARY;
> ++ break;
> ++ case CODEC_ID_MP3:
> ++ rank = GST_RANK_NONE;
> ++ break;
> ++ /* TEMPORARILY DISABLING AC3/EAC3/DTS for 0.10.12 release
> ++ * due to downmixing failure.
> ++ * See Bug #608892 for more details */
> ++ case CODEC_ID_EAC3:
> ++ case CODEC_ID_AC3:
> ++ case CODEC_ID_DTS:
> ++ rank = GST_RANK_NONE;
> ++ break;
> ++ default:
> ++ rank = GST_RANK_MARGINAL;
> ++ break;
> ++ }
> ++ if (!gst_element_register (plugin, type_name, rank, type)) {
> ++ g_warning ("Failed to register %s", type_name);
> ++ g_free (type_name);
> ++ return FALSE;
> ++ }
> ++
> ++ g_free (type_name);
> ++
> ++ next:
> ++ in_plugin = av_codec_next (in_plugin);
> ++ }
> ++
> ++ GST_LOG ("Finished Registering decoders");
> ++
> ++ return TRUE;
> ++}
> +diff -uNr gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegdec.c.rej gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegdec.c.rej
> +--- gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegdec.c.rej 1970-01-01 01:00:00.000000000 +0100
> ++++ gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegdec.c.rej 2014-08-08 15:26:38.471858652 +0200
> +@@ -0,0 +1,11 @@
> ++--- ext/ffmpeg/gstffmpegdec.c
> +++++ ext/ffmpeg/gstffmpegdec.c
> ++@@ -1565,7 +1564,7 @@
> ++ gst_message_new_latency (GST_OBJECT_CAST (ffmpegdec)));
> ++ }
> ++
> ++- is_itype = (ffmpegdec->picture->pict_type == FF_I_TYPE);
> +++ is_itype = (ffmpegdec->picture->pict_type == AV_PICTURE_TYPE_I);
> ++ is_reference = (ffmpegdec->picture->reference == 1);
> ++
> ++ iskeyframe = (is_itype || is_reference || ffmpegdec->picture->key_frame)
> +diff -uNr gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegdemux.c gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegdemux.c
> +--- gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegdemux.c 2011-07-13 11:07:28.000000000 +0200
> ++++ gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegdemux.c 2014-08-08 15:26:07.874857555 +0200
> +@@ -343,8 +343,11 @@
> + demux->audiopads = 0;
> +
> + /* close demuxer context from ffmpeg */
> +- av_close_input_file (demux->context);
> +- demux->context = NULL;
> ++ if (demux->seekable)
> ++ gst_ffmpegdata_close (demux->context->pb);
> ++ else
> ++ gst_ffmpeg_pipe_close (demux->context->pb);
> ++ avformat_close_input (&demux->context);
> +
> + GST_OBJECT_LOCK (demux);
> + demux->opened = FALSE;
> +@@ -1146,9 +1149,9 @@
> + static gboolean
> + gst_ffmpegdemux_open (GstFFMpegDemux * demux)
> + {
> ++ AVIOContext *iocontext = NULL;
> + GstFFMpegDemuxClass *oclass =
> + (GstFFMpegDemuxClass *) G_OBJECT_GET_CLASS (demux);
> +- gchar *location;
> + gint res, n_streams, i;
> + #if 0
> + /* Re-enable once converted to new AVMetaData API
> +@@ -1164,15 +1167,14 @@
> +
> + /* open via our input protocol hack */
> + if (demux->seekable)
> +- location = g_strdup_printf ("gstreamer://%p", demux->sinkpad);
> ++ res = gst_ffmpegdata_open (demux->sinkpad, AVIO_FLAG_READ, &iocontext);
> + else
> +- location = g_strdup_printf ("gstpipe://%p", &demux->ffpipe);
> +- GST_DEBUG_OBJECT (demux, "about to call av_open_input_file %s", location);
> ++ res = gst_ffmpeg_pipe_open (&demux->ffpipe, AVIO_FLAG_READ, &iocontext);
> +
> +- res = av_open_input_file (&demux->context, location,
> +- oclass->in_plugin, 0, NULL);
> ++ demux->context = avformat_alloc_context ();
> ++ demux->context->pb = iocontext;
> ++ res = avformat_open_input (&demux->context, NULL, oclass->in_plugin, NULL);
> +
> +- g_free (location);
> + GST_DEBUG_OBJECT (demux, "av_open_input returned %d", res);
> + if (res < 0)
> + goto open_failed;
> +diff -uNr gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegenc.c gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegenc.c
> +--- gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegenc.c 2011-10-31 11:14:03.000000000 +0100
> ++++ gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegenc.c 2014-08-08 15:32:18.608870847 +0200
> +@@ -770,7 +770,7 @@
> + GST_OBJECT_UNLOCK (ffmpegenc);
> +
> + if (force_keyframe)
> +- ffmpegenc->picture->pict_type = FF_I_TYPE;
> ++ ffmpegenc->picture->pict_type = AV_PICTURE_TYPE_I;
> +
> + frame_size = gst_ffmpeg_avpicture_fill ((AVPicture *) ffmpegenc->picture,
> + GST_BUFFER_DATA (inbuf),
> +@@ -1136,7 +1136,7 @@
> + const GstStructure *s;
> + s = gst_event_get_structure (event);
> + if (gst_structure_has_name (s, "GstForceKeyUnit")) {
> +- ffmpegenc->picture->pict_type = FF_I_TYPE;
> ++ ffmpegenc->picture->pict_type = AV_PICTURE_TYPE_I;
> + }
> + break;
> + }
> +@@ -1339,7 +1339,7 @@
> + }
> +
> + /* only encoders */
> +- if (!in_plugin->encode) {
> ++ if (!av_codec_is_encoder (in_plugin)) {
> + goto next;
> + }
> +
> +diff -uNr gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegmux.c gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegmux.c
> +--- gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegmux.c 2011-07-13 11:07:28.000000000 +0200
> ++++ gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegmux.c 2014-08-08 15:26:07.874857555 +0200
> +@@ -24,8 +24,10 @@
> + #include <string.h>
> + #ifdef HAVE_FFMPEG_UNINSTALLED
> + #include <avformat.h>
> ++#include <opt.h>
> + #else
> + #include <libavformat/avformat.h>
> ++#include <libavutil/opt.h>
> + #endif
> +
> + #include <gst/gst.h>
> +@@ -336,9 +338,6 @@
> + ffmpegmux->context = g_new0 (AVFormatContext, 1);
> + ffmpegmux->context->oformat = oclass->in_plugin;
> + ffmpegmux->context->nb_streams = 0;
> +- g_snprintf (ffmpegmux->context->filename,
> +- sizeof (ffmpegmux->context->filename),
> +- "gstreamer://%p", ffmpegmux->srcpad);
> + ffmpegmux->opened = FALSE;
> +
> + ffmpegmux->videopads = 0;
> +@@ -450,10 +449,10 @@
> + gst_element_add_pad (element, pad);
> +
> + /* AVStream needs to be created */
> +- st = av_new_stream (ffmpegmux->context, collect_pad->padnum);
> ++ st = avformat_new_stream (ffmpegmux->context, NULL);
> ++ st->id = collect_pad->padnum;
> + st->codec->codec_type = type;
> + st->codec->codec_id = CODEC_ID_NONE; /* this is a check afterwards */
> +- st->stream_copy = 1; /* we're not the actual encoder */
> + st->codec->bit_rate = bitrate;
> + st->codec->frame_size = framesize;
> + /* we fill in codec during capsnego */
> +@@ -485,7 +484,7 @@
> + collect_pad = (GstFFMpegMuxPad *) gst_pad_get_element_private (pad);
> +
> + st = ffmpegmux->context->streams[collect_pad->padnum];
> +- ffmpegmux->context->preload = ffmpegmux->preload;
> ++ av_opt_set_int (&ffmpegmux->context, "preload", ffmpegmux->preload, 0);
> + ffmpegmux->context->max_delay = ffmpegmux->max_delay;
> +
> + /* for the format-specific guesses, we'll go to
> +@@ -552,7 +551,7 @@
> +
> + /* open "file" (gstreamer protocol to next element) */
> + if (!ffmpegmux->opened) {
> +- int open_flags = URL_WRONLY;
> ++ int open_flags = AVIO_FLAG_WRITE;
> +
> + /* we do need all streams to have started capsnego,
> + * or things will go horribly wrong */
> +@@ -646,19 +645,13 @@
> + open_flags |= GST_FFMPEG_URL_STREAMHEADER;
> + }
> +
> +- if (url_fopen (&ffmpegmux->context->pb,
> +- ffmpegmux->context->filename, open_flags) < 0) {
> ++ if (gst_ffmpegdata_open (ffmpegmux->srcpad, open_flags,
> ++ &ffmpegmux->context->pb) < 0) {
> + GST_ELEMENT_ERROR (ffmpegmux, LIBRARY, TOO_LAZY, (NULL),
> + ("Failed to open stream context in ffmux"));
> + return GST_FLOW_ERROR;
> + }
> +
> +- if (av_set_parameters (ffmpegmux->context, NULL) < 0) {
> +- GST_ELEMENT_ERROR (ffmpegmux, LIBRARY, INIT, (NULL),
> +- ("Failed to initialize muxer"));
> +- return GST_FLOW_ERROR;
> +- }
> +-
> + /* now open the mux format */
> + if (av_write_header (ffmpegmux->context) < 0) {
> + GST_ELEMENT_ERROR (ffmpegmux, LIBRARY, SETTINGS, (NULL),
> +@@ -670,7 +663,7 @@
> + ffmpegmux->opened = TRUE;
> +
> + /* flush the header so it will be used as streamheader */
> +- put_flush_packet (ffmpegmux->context->pb);
> ++ avio_flush (ffmpegmux->context->pb);
> + }
> +
> + /* take the one with earliest timestamp,
> +@@ -770,8 +763,8 @@
> + /* close down */
> + av_write_trailer (ffmpegmux->context);
> + ffmpegmux->opened = FALSE;
> +- put_flush_packet (ffmpegmux->context->pb);
> +- url_fclose (ffmpegmux->context->pb);
> ++ avio_flush (ffmpegmux->context->pb);
> ++ gst_ffmpegdata_close (ffmpegmux->context->pb);
> + gst_pad_push_event (ffmpegmux->srcpad, gst_event_new_eos ());
> + return GST_FLOW_UNEXPECTED;
> + }
> +@@ -795,6 +788,10 @@
> + break;
> + case GST_STATE_CHANGE_PAUSED_TO_READY:
> + gst_collect_pads_stop (ffmpegmux->collect);
> ++ if (ffmpegmux->opened) {
> ++ ffmpegmux->opened = FALSE;
> ++ gst_ffmpegdata_close (ffmpegmux->context->pb);
> ++ }
> + break;
> + default:
> + break;
> +@@ -809,7 +806,7 @@
> + gst_tag_setter_reset_tags (GST_TAG_SETTER (ffmpegmux));
> + if (ffmpegmux->opened) {
> + ffmpegmux->opened = FALSE;
> +- url_fclose (ffmpegmux->context->pb);
> ++ avio_close (ffmpegmux->context->pb);
> + }
> + break;
> + case GST_STATE_CHANGE_READY_TO_NULL:
> +diff -uNr gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegmux.c.orig gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegmux.c.orig
> +--- gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegmux.c.orig 1970-01-01 01:00:00.000000000 +0100
> ++++ gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegmux.c.orig 2011-07-13 11:07:28.000000000 +0200
> +@@ -0,0 +1,970 @@
> ++/* GStreamer
> ++ * Copyright (C) <1999> Erik Walthinsen <omega at cse.ogi.edu>
> ++ *
> ++ * This library is free software; you can redistribute it and/or
> ++ * modify it under the terms of the GNU Library General Public
> ++ * License as published by the Free Software Foundation; either
> ++ * version 2 of the License, or (at your option) any later version.
> ++ *
> ++ * This library is distributed in the hope that it will be useful,
> ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
> ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> ++ * Library General Public License for more details.
> ++ *
> ++ * You should have received a copy of the GNU Library General Public
> ++ * License along with this library; if not, write to the
> ++ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
> ++ * Boston, MA 02111-1307, USA.
> ++ */
> ++
> ++#ifdef HAVE_CONFIG_H
> ++#include "config.h"
> ++#endif
> ++
> ++#include <string.h>
> ++#ifdef HAVE_FFMPEG_UNINSTALLED
> ++#include <avformat.h>
> ++#else
> ++#include <libavformat/avformat.h>
> ++#endif
> ++
> ++#include <gst/gst.h>
> ++#include <gst/base/gstcollectpads.h>
> ++
> ++#include "gstffmpeg.h"
> ++#include "gstffmpegcodecmap.h"
> ++#include "gstffmpegutils.h"
> ++
> ++typedef struct _GstFFMpegMux GstFFMpegMux;
> ++typedef struct _GstFFMpegMuxPad GstFFMpegMuxPad;
> ++
> ++struct _GstFFMpegMuxPad
> ++{
> ++ GstCollectData collect; /* we extend the CollectData */
> ++
> ++ gint padnum;
> ++};
> ++
> ++struct _GstFFMpegMux
> ++{
> ++ GstElement element;
> ++
> ++ GstCollectPads *collect;
> ++ /* We need to keep track of our pads, so we do so here. */
> ++ GstPad *srcpad;
> ++
> ++ AVFormatContext *context;
> ++ gboolean opened;
> ++
> ++ gint videopads, audiopads;
> ++
> ++ /*< private > */
> ++ /* event_function is the collectpads default eventfunction */
> ++ GstPadEventFunction event_function;
> ++ int preload;
> ++ int max_delay;
> ++};
> ++
> ++typedef struct _GstFFMpegMuxClass GstFFMpegMuxClass;
> ++
> ++struct _GstFFMpegMuxClass
> ++{
> ++ GstElementClass parent_class;
> ++
> ++ AVOutputFormat *in_plugin;
> ++};
> ++
> ++#define GST_TYPE_FFMPEGMUX \
> ++ (gst_ffmpegdec_get_type())
> ++#define GST_FFMPEGMUX(obj) \
> ++ (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_FFMPEGMUX,GstFFMpegMux))
> ++#define GST_FFMPEGMUX_CLASS(klass) \
> ++ (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_FFMPEGMUX,GstFFMpegMuxClass))
> ++#define GST_IS_FFMPEGMUX(obj) \
> ++ (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_FFMPEGMUX))
> ++#define GST_IS_FFMPEGMUX_CLASS(klass) \
> ++ (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_FFMPEGMUX))
> ++
> ++enum
> ++{
> ++ /* FILL ME */
> ++ LAST_SIGNAL
> ++};
> ++
> ++enum
> ++{
> ++ ARG_0,
> ++ /* FILL ME */
> ++};
> ++
> ++enum
> ++{
> ++ PROP_0,
> ++ PROP_PRELOAD,
> ++ PROP_MAXDELAY
> ++};
> ++
> ++/* A number of function prototypes are given so we can refer to them later. */
> ++static void gst_ffmpegmux_class_init (GstFFMpegMuxClass * klass);
> ++static void gst_ffmpegmux_base_init (gpointer g_class);
> ++static void gst_ffmpegmux_init (GstFFMpegMux * ffmpegmux,
> ++ GstFFMpegMuxClass * g_class);
> ++static void gst_ffmpegmux_finalize (GObject * object);
> ++
> ++static gboolean gst_ffmpegmux_setcaps (GstPad * pad, GstCaps * caps);
> ++static GstPad *gst_ffmpegmux_request_new_pad (GstElement * element,
> ++ GstPadTemplate * templ, const gchar * name);
> ++static GstFlowReturn gst_ffmpegmux_collected (GstCollectPads * pads,
> ++ gpointer user_data);
> ++
> ++static gboolean gst_ffmpegmux_sink_event (GstPad * pad, GstEvent * event);
> ++
> ++static GstStateChangeReturn gst_ffmpegmux_change_state (GstElement * element,
> ++ GstStateChange transition);
> ++
> ++static void gst_ffmpegmux_set_property (GObject * object, guint prop_id,
> ++ const GValue * value, GParamSpec * pspec);
> ++static void gst_ffmpegmux_get_property (GObject * object, guint prop_id,
> ++ GValue * value, GParamSpec * pspec);
> ++
> ++static GstCaps *gst_ffmpegmux_get_id_caps (enum CodecID *id_list);
> ++static void gst_ffmpeg_mux_simple_caps_set_int_list (GstCaps * caps,
> ++ const gchar * field, guint num, const gint * values);
> ++
> ++#define GST_FFMUX_PARAMS_QDATA g_quark_from_static_string("ffmux-params")
> ++
> ++static GstElementClass *parent_class = NULL;
> ++
> ++/*static guint gst_ffmpegmux_signals[LAST_SIGNAL] = { 0 }; */
> ++
> ++typedef struct
> ++{
> ++ const char *name;
> ++ const char *replacement;
> ++} GstFFMpegMuxReplacement;
> ++
> ++static const char *
> ++gst_ffmpegmux_get_replacement (const char *name)
> ++{
> ++ static const GstFFMpegMuxReplacement blacklist[] = {
> ++ {"avi", "avimux"},
> ++ {"matroska", "matroskamux"},
> ++ {"mov", "qtmux"},
> ++ {"mpegts", "mpegtsmux"},
> ++ {"mp4", "mp4mux"},
> ++ {"mpjpeg", "multipartmux"},
> ++ {"ogg", "oggmux"},
> ++ {"wav", "wavenc"},
> ++ {"webm", "webmmux"},
> ++ {"mxf", "mxfmux"},
> ++ {"3gp", "gppmux"},
> ++ {"yuv4mpegpipe", "y4menc"},
> ++ {"aiff", "aiffmux"},
> ++ {"adts", "aacparse"},
> ++ {"asf", "asfmux"},
> ++ {"asf_stream", "asfmux"},
> ++ {"flv", "flvmux"},
> ++ {"mp3", "id3v2mux"},
> ++ {"mp2", "id3v2mux"}
> ++ };
> ++ int i;
> ++
> ++ for (i = 0; i < sizeof (blacklist) / sizeof (blacklist[0]); i++) {
> ++ if (strcmp (blacklist[i].name, name) == 0) {
> ++ return blacklist[i].replacement;
> ++ }
> ++ }
> ++
> ++ return NULL;
> ++}
> ++
> ++static gboolean
> ++gst_ffmpegmux_is_formatter (const char *name)
> ++{
> ++ static const char *replace[] = {
> ++ "mp2", "mp3", NULL
> ++ };
> ++ int i;
> ++
> ++ for (i = 0; replace[i]; i++)
> ++ if (strcmp (replace[i], name) == 0)
> ++ return TRUE;
> ++ return FALSE;
> ++}
> ++
> ++static void
> ++gst_ffmpegmux_base_init (gpointer g_class)
> ++{
> ++ GstFFMpegMuxClass *klass = (GstFFMpegMuxClass *) g_class;
> ++ GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
> ++ GstPadTemplate *videosinktempl, *audiosinktempl, *srctempl;
> ++ AVOutputFormat *in_plugin;
> ++ GstCaps *srccaps, *audiosinkcaps, *videosinkcaps;
> ++ enum CodecID *video_ids = NULL, *audio_ids = NULL;
> ++ gchar *longname, *description;
> ++ const char *replacement;
> ++ gboolean is_formatter;
> ++
> ++ in_plugin =
> ++ (AVOutputFormat *) g_type_get_qdata (G_OBJECT_CLASS_TYPE (klass),
> ++ GST_FFMUX_PARAMS_QDATA);
> ++ g_assert (in_plugin != NULL);
> ++
> ++ /* construct the element details struct */
> ++ replacement = gst_ffmpegmux_get_replacement (in_plugin->name);
> ++ is_formatter = gst_ffmpegmux_is_formatter (in_plugin->name);
> ++ if (replacement != NULL) {
> ++ longname =
> ++ g_strdup_printf ("FFmpeg %s %s (not recommended, use %s instead)",
> ++ in_plugin->long_name, is_formatter ? "formatter" : "muxer",
> ++ replacement);
> ++ description =
> ++ g_strdup_printf ("FFmpeg %s %s (not recommended, use %s instead)",
> ++ in_plugin->long_name, is_formatter ? "formatter" : "muxer",
> ++ replacement);
> ++ } else {
> ++ longname = g_strdup_printf ("FFmpeg %s %s", in_plugin->long_name,
> ++ is_formatter ? "formatter" : "muxer");
> ++ description = g_strdup_printf ("FFmpeg %s %s", in_plugin->long_name,
> ++ is_formatter ? "formatter" : "muxer");
> ++ }
> ++ gst_element_class_set_details_simple (element_class, longname,
> ++ is_formatter ? "Formatter/Metadata" : "Codec/Muxer", description,
> ++ "Wim Taymans <wim.taymans at chello.be>, "
> ++ "Ronald Bultje <rbultje at ronald.bitfreak.net>");
> ++ g_free (longname);
> ++ g_free (description);
> ++
> ++ /* Try to find the caps that belongs here */
> ++ srccaps = gst_ffmpeg_formatid_to_caps (in_plugin->name);
> ++ if (!srccaps) {
> ++ GST_DEBUG ("Couldn't get source caps for muxer '%s', skipping format",
> ++ in_plugin->name);
> ++ goto beach;
> ++ }
> ++
> ++ if (!gst_ffmpeg_formatid_get_codecids (in_plugin->name,
> ++ &video_ids, &audio_ids, in_plugin)) {
> ++ gst_caps_unref (srccaps);
> ++ GST_DEBUG
> ++ ("Couldn't get sink caps for muxer '%s'. Most likely because no input format mapping exists.",
> ++ in_plugin->name);
> ++ goto beach;
> ++ }
> ++
> ++ videosinkcaps = video_ids ? gst_ffmpegmux_get_id_caps (video_ids) : NULL;
> ++ audiosinkcaps = audio_ids ? gst_ffmpegmux_get_id_caps (audio_ids) : NULL;
> ++
> ++ /* fix up allowed caps for some muxers */
> ++ /* FIXME : This should be in gstffmpegcodecmap.c ! */
> ++ if (strcmp (in_plugin->name, "flv") == 0) {
> ++ const gint rates[] = { 44100, 22050, 11025 };
> ++
> ++ gst_ffmpeg_mux_simple_caps_set_int_list (audiosinkcaps, "rate", 3, rates);
> ++ } else if (strcmp (in_plugin->name, "gif") == 0) {
> ++ if (videosinkcaps)
> ++ gst_caps_unref (videosinkcaps);
> ++
> ++ videosinkcaps =
> ++ gst_caps_from_string ("video/x-raw-rgb, bpp=(int)24, depth=(int)24");
> ++ }
> ++
> ++ /* pad templates */
> ++ srctempl = gst_pad_template_new ("src", GST_PAD_SRC, GST_PAD_ALWAYS, srccaps);
> ++ gst_element_class_add_pad_template (element_class, srctempl);
> ++
> ++ if (audiosinkcaps) {
> ++ audiosinktempl = gst_pad_template_new ("audio_%d",
> ++ GST_PAD_SINK, GST_PAD_REQUEST, audiosinkcaps);
> ++ gst_element_class_add_pad_template (element_class, audiosinktempl);
> ++ }
> ++
> ++ if (videosinkcaps) {
> ++ videosinktempl = gst_pad_template_new ("video_%d",
> ++ GST_PAD_SINK, GST_PAD_REQUEST, videosinkcaps);
> ++ gst_element_class_add_pad_template (element_class, videosinktempl);
> ++ }
> ++
> ++beach:
> ++ klass->in_plugin = in_plugin;
> ++}
> ++
> ++static void
> ++gst_ffmpegmux_class_init (GstFFMpegMuxClass * klass)
> ++{
> ++ GObjectClass *gobject_class;
> ++ GstElementClass *gstelement_class;
> ++
> ++ gobject_class = (GObjectClass *) klass;
> ++ gstelement_class = (GstElementClass *) klass;
> ++
> ++ parent_class = g_type_class_peek_parent (klass);
> ++
> ++ gobject_class->set_property = GST_DEBUG_FUNCPTR (gst_ffmpegmux_set_property);
> ++ gobject_class->get_property = GST_DEBUG_FUNCPTR (gst_ffmpegmux_get_property);
> ++
> ++ g_object_class_install_property (gobject_class, PROP_PRELOAD,
> ++ g_param_spec_int ("preload", "preload",
> ++ "Set the initial demux-decode delay (in microseconds)", 0, G_MAXINT,
> ++ 0, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
> ++
> ++ g_object_class_install_property (gobject_class, PROP_MAXDELAY,
> ++ g_param_spec_int ("maxdelay", "maxdelay",
> ++ "Set the maximum demux-decode delay (in microseconds)", 0, G_MAXINT,
> ++ 0, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
> ++
> ++ gstelement_class->request_new_pad = gst_ffmpegmux_request_new_pad;
> ++ gstelement_class->change_state = gst_ffmpegmux_change_state;
> ++ gobject_class->finalize = gst_ffmpegmux_finalize;
> ++}
> ++
> ++static void
> ++gst_ffmpegmux_init (GstFFMpegMux * ffmpegmux, GstFFMpegMuxClass * g_class)
> ++{
> ++ GstElementClass *klass = GST_ELEMENT_CLASS (g_class);
> ++ GstFFMpegMuxClass *oclass = (GstFFMpegMuxClass *) klass;
> ++ GstPadTemplate *templ = gst_element_class_get_pad_template (klass, "src");
> ++
> ++ ffmpegmux->srcpad = gst_pad_new_from_template (templ, "src");
> ++ gst_pad_set_caps (ffmpegmux->srcpad, gst_pad_template_get_caps (templ));
> ++ gst_element_add_pad (GST_ELEMENT (ffmpegmux), ffmpegmux->srcpad);
> ++
> ++ ffmpegmux->collect = gst_collect_pads_new ();
> ++ gst_collect_pads_set_function (ffmpegmux->collect,
> ++ (GstCollectPadsFunction) gst_ffmpegmux_collected, ffmpegmux);
> ++
> ++ ffmpegmux->context = g_new0 (AVFormatContext, 1);
> ++ ffmpegmux->context->oformat = oclass->in_plugin;
> ++ ffmpegmux->context->nb_streams = 0;
> ++ g_snprintf (ffmpegmux->context->filename,
> ++ sizeof (ffmpegmux->context->filename),
> ++ "gstreamer://%p", ffmpegmux->srcpad);
> ++ ffmpegmux->opened = FALSE;
> ++
> ++ ffmpegmux->videopads = 0;
> ++ ffmpegmux->audiopads = 0;
> ++ ffmpegmux->preload = 0;
> ++ ffmpegmux->max_delay = 0;
> ++}
> ++
> ++static void
> ++gst_ffmpegmux_set_property (GObject * object, guint prop_id,
> ++ const GValue * value, GParamSpec * pspec)
> ++{
> ++ GstFFMpegMux *src;
> ++
> ++ src = (GstFFMpegMux *) object;
> ++
> ++ switch (prop_id) {
> ++ case PROP_PRELOAD:
> ++ src->preload = g_value_get_int (value);
> ++ break;
> ++ case PROP_MAXDELAY:
> ++ src->max_delay = g_value_get_int (value);
> ++ break;
> ++ default:
> ++ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
> ++ break;
> ++ }
> ++}
> ++
> ++static void
> ++gst_ffmpegmux_get_property (GObject * object, guint prop_id, GValue * value,
> ++ GParamSpec * pspec)
> ++{
> ++ GstFFMpegMux *src;
> ++
> ++ src = (GstFFMpegMux *) object;
> ++
> ++ switch (prop_id) {
> ++ case PROP_PRELOAD:
> ++ g_value_set_int (value, src->preload);
> ++ break;
> ++ case PROP_MAXDELAY:
> ++ g_value_set_int (value, src->max_delay);
> ++ break;
> ++ default:
> ++ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
> ++ break;
> ++ }
> ++}
> ++
> ++
> ++static void
> ++gst_ffmpegmux_finalize (GObject * object)
> ++{
> ++ GstFFMpegMux *ffmpegmux = (GstFFMpegMux *) object;
> ++
> ++ g_free (ffmpegmux->context);
> ++ gst_object_unref (ffmpegmux->collect);
> ++
> ++ if (G_OBJECT_CLASS (parent_class)->finalize)
> ++ G_OBJECT_CLASS (parent_class)->finalize (object);
> ++}
> ++
> ++static GstPad *
> ++gst_ffmpegmux_request_new_pad (GstElement * element,
> ++ GstPadTemplate * templ, const gchar * name)
> ++{
> ++ GstFFMpegMux *ffmpegmux = (GstFFMpegMux *) element;
> ++ GstElementClass *klass = GST_ELEMENT_GET_CLASS (element);
> ++ GstFFMpegMuxPad *collect_pad;
> ++ gchar *padname;
> ++ GstPad *pad;
> ++ AVStream *st;
> ++ enum AVMediaType type;
> ++ gint bitrate = 0, framesize = 0;
> ++
> ++ g_return_val_if_fail (templ != NULL, NULL);
> ++ g_return_val_if_fail (templ->direction == GST_PAD_SINK, NULL);
> ++ g_return_val_if_fail (ffmpegmux->opened == FALSE, NULL);
> ++
> ++ /* figure out a name that *we* like */
> ++ if (templ == gst_element_class_get_pad_template (klass, "video_%d")) {
> ++ padname = g_strdup_printf ("video_%d", ffmpegmux->videopads++);
> ++ type = AVMEDIA_TYPE_VIDEO;
> ++ bitrate = 64 * 1024;
> ++ framesize = 1152;
> ++ } else if (templ == gst_element_class_get_pad_template (klass, "audio_%d")) {
> ++ padname = g_strdup_printf ("audio_%d", ffmpegmux->audiopads++);
> ++ type = AVMEDIA_TYPE_AUDIO;
> ++ bitrate = 285 * 1024;
> ++ } else {
> ++ g_warning ("ffmux: unknown pad template!");
> ++ return NULL;
> ++ }
> ++
> ++ /* create pad */
> ++ pad = gst_pad_new_from_template (templ, padname);
> ++ collect_pad = (GstFFMpegMuxPad *)
> ++ gst_collect_pads_add_pad (ffmpegmux->collect, pad,
> ++ sizeof (GstFFMpegMuxPad));
> ++ collect_pad->padnum = ffmpegmux->context->nb_streams;
> ++
> ++ /* small hack to put our own event pad function and chain up to collect pad */
> ++ ffmpegmux->event_function = GST_PAD_EVENTFUNC (pad);
> ++ gst_pad_set_event_function (pad,
> ++ GST_DEBUG_FUNCPTR (gst_ffmpegmux_sink_event));
> ++
> ++ gst_pad_set_setcaps_function (pad, GST_DEBUG_FUNCPTR (gst_ffmpegmux_setcaps));
> ++ gst_element_add_pad (element, pad);
> ++
> ++ /* AVStream needs to be created */
> ++ st = av_new_stream (ffmpegmux->context, collect_pad->padnum);
> ++ st->codec->codec_type = type;
> ++ st->codec->codec_id = CODEC_ID_NONE; /* this is a check afterwards */
> ++ st->stream_copy = 1; /* we're not the actual encoder */
> ++ st->codec->bit_rate = bitrate;
> ++ st->codec->frame_size = framesize;
> ++ /* we fill in codec during capsnego */
> ++
> ++ /* we love debug output (c) (tm) (r) */
> ++ GST_DEBUG ("Created %s pad for ffmux_%s element",
> ++ padname, ((GstFFMpegMuxClass *) klass)->in_plugin->name);
> ++ g_free (padname);
> ++
> ++ return pad;
> ++}
> ++
> ++/**
> ++ * gst_ffmpegmux_setcaps
> ++ * @pad: #GstPad
> ++ * @caps: New caps.
> ++ *
> ++ * Set caps to pad.
> ++ *
> ++ * Returns: #TRUE on success.
> ++ */
> ++static gboolean
> ++gst_ffmpegmux_setcaps (GstPad * pad, GstCaps * caps)
> ++{
> ++ GstFFMpegMux *ffmpegmux = (GstFFMpegMux *) (gst_pad_get_parent (pad));
> ++ GstFFMpegMuxPad *collect_pad;
> ++ AVStream *st;
> ++
> ++ collect_pad = (GstFFMpegMuxPad *) gst_pad_get_element_private (pad);
> ++
> ++ st = ffmpegmux->context->streams[collect_pad->padnum];
> ++ ffmpegmux->context->preload = ffmpegmux->preload;
> ++ ffmpegmux->context->max_delay = ffmpegmux->max_delay;
> ++
> ++ /* for the format-specific guesses, we'll go to
> ++ * our famous codec mapper */
> ++ if (gst_ffmpeg_caps_to_codecid (caps, st->codec) == CODEC_ID_NONE)
> ++ goto not_accepted;
> ++
> ++ /* copy over the aspect ratios, ffmpeg expects the stream aspect to match the
> ++ * codec aspect. */
> ++ st->sample_aspect_ratio = st->codec->sample_aspect_ratio;
> ++
> ++ GST_LOG_OBJECT (pad, "accepted caps %" GST_PTR_FORMAT, caps);
> ++ return TRUE;
> ++
> ++ /* ERRORS */
> ++not_accepted:
> ++ {
> ++ GST_LOG_OBJECT (pad, "rejecting caps %" GST_PTR_FORMAT, caps);
> ++ return FALSE;
> ++ }
> ++}
> ++
> ++
> ++static gboolean
> ++gst_ffmpegmux_sink_event (GstPad * pad, GstEvent * event)
> ++{
> ++ GstFFMpegMux *ffmpegmux = (GstFFMpegMux *) gst_pad_get_parent (pad);
> ++ gboolean res = TRUE;
> ++
> ++ switch (GST_EVENT_TYPE (event)) {
> ++ case GST_EVENT_TAG:{
> ++ GstTagList *taglist;
> ++ GstTagSetter *setter = GST_TAG_SETTER (ffmpegmux);
> ++ const GstTagMergeMode mode = gst_tag_setter_get_tag_merge_mode (setter);
> ++
> ++ gst_event_parse_tag (event, &taglist);
> ++ gst_tag_setter_merge_tags (setter, taglist, mode);
> ++ break;
> ++ }
> ++ default:
> ++ break;
> ++ }
> ++
> ++ /* chaining up to collectpads default event function */
> ++ res = ffmpegmux->event_function (pad, event);
> ++
> ++ gst_object_unref (ffmpegmux);
> ++ return res;
> ++}
> ++
> ++static GstFlowReturn
> ++gst_ffmpegmux_collected (GstCollectPads * pads, gpointer user_data)
> ++{
> ++ GstFFMpegMux *ffmpegmux = (GstFFMpegMux *) user_data;
> ++ GSList *collected;
> ++ GstFFMpegMuxPad *best_pad;
> ++ GstClockTime best_time;
> ++#if 0
> ++ /* Re-enable once converted to new AVMetaData API
> ++ * See #566605
> ++ */
> ++ const GstTagList *tags;
> ++#endif
> ++
> ++ /* open "file" (gstreamer protocol to next element) */
> ++ if (!ffmpegmux->opened) {
> ++ int open_flags = URL_WRONLY;
> ++
> ++ /* we do need all streams to have started capsnego,
> ++ * or things will go horribly wrong */
> ++ for (collected = ffmpegmux->collect->data; collected;
> ++ collected = g_slist_next (collected)) {
> ++ GstFFMpegMuxPad *collect_pad = (GstFFMpegMuxPad *) collected->data;
> ++ AVStream *st = ffmpegmux->context->streams[collect_pad->padnum];
> ++
> ++ /* check whether the pad has successfully completed capsnego */
> ++ if (st->codec->codec_id == CODEC_ID_NONE) {
> ++ GST_ELEMENT_ERROR (ffmpegmux, CORE, NEGOTIATION, (NULL),
> ++ ("no caps set on stream %d (%s)", collect_pad->padnum,
> ++ (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) ?
> ++ "video" : "audio"));
> ++ return GST_FLOW_ERROR;
> ++ }
> ++ /* set framerate for audio */
> ++ if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
> ++ switch (st->codec->codec_id) {
> ++ case CODEC_ID_PCM_S16LE:
> ++ case CODEC_ID_PCM_S16BE:
> ++ case CODEC_ID_PCM_U16LE:
> ++ case CODEC_ID_PCM_U16BE:
> ++ case CODEC_ID_PCM_S8:
> ++ case CODEC_ID_PCM_U8:
> ++ st->codec->frame_size = 1;
> ++ break;
> ++ default:
> ++ {
> ++ GstBuffer *buffer;
> ++
> ++ /* FIXME : This doesn't work for RAW AUDIO...
> ++ * in fact I'm wondering if it even works for any kind of audio... */
> ++ buffer = gst_collect_pads_peek (ffmpegmux->collect,
> ++ (GstCollectData *) collect_pad);
> ++ if (buffer) {
> ++ st->codec->frame_size =
> ++ st->codec->sample_rate *
> ++ GST_BUFFER_DURATION (buffer) / GST_SECOND;
> ++ gst_buffer_unref (buffer);
> ++ }
> ++ }
> ++ }
> ++ }
> ++ }
> ++
> ++#if 0
> ++ /* Re-enable once converted to new AVMetaData API
> ++ * See #566605
> ++ */
> ++
> ++ /* tags */
> ++ tags = gst_tag_setter_get_tag_list (GST_TAG_SETTER (ffmpegmux));
> ++ if (tags) {
> ++ gint i;
> ++ gchar *s;
> ++
> ++ /* get the interesting ones */
> ++ if (gst_tag_list_get_string (tags, GST_TAG_TITLE, &s)) {
> ++ strncpy (ffmpegmux->context->title, s,
> ++ sizeof (ffmpegmux->context->title));
> ++ }
> ++ if (gst_tag_list_get_string (tags, GST_TAG_ARTIST, &s)) {
> ++ strncpy (ffmpegmux->context->author, s,
> ++ sizeof (ffmpegmux->context->author));
> ++ }
> ++ if (gst_tag_list_get_string (tags, GST_TAG_COPYRIGHT, &s)) {
> ++ strncpy (ffmpegmux->context->copyright, s,
> ++ sizeof (ffmpegmux->context->copyright));
> ++ }
> ++ if (gst_tag_list_get_string (tags, GST_TAG_COMMENT, &s)) {
> ++ strncpy (ffmpegmux->context->comment, s,
> ++ sizeof (ffmpegmux->context->comment));
> ++ }
> ++ if (gst_tag_list_get_string (tags, GST_TAG_ALBUM, &s)) {
> ++ strncpy (ffmpegmux->context->album, s,
> ++ sizeof (ffmpegmux->context->album));
> ++ }
> ++ if (gst_tag_list_get_string (tags, GST_TAG_GENRE, &s)) {
> ++ strncpy (ffmpegmux->context->genre, s,
> ++ sizeof (ffmpegmux->context->genre));
> ++ }
> ++ if (gst_tag_list_get_int (tags, GST_TAG_TRACK_NUMBER, &i)) {
> ++ ffmpegmux->context->track = i;
> ++ }
> ++ }
> ++#endif
> ++
> ++ /* set the streamheader flag for gstffmpegprotocol if codec supports it */
> ++ if (!strcmp (ffmpegmux->context->oformat->name, "flv")) {
> ++ open_flags |= GST_FFMPEG_URL_STREAMHEADER;
> ++ }
> ++
> ++ if (url_fopen (&ffmpegmux->context->pb,
> ++ ffmpegmux->context->filename, open_flags) < 0) {
> ++ GST_ELEMENT_ERROR (ffmpegmux, LIBRARY, TOO_LAZY, (NULL),
> ++ ("Failed to open stream context in ffmux"));
> ++ return GST_FLOW_ERROR;
> ++ }
> ++
> ++ if (av_set_parameters (ffmpegmux->context, NULL) < 0) {
> ++ GST_ELEMENT_ERROR (ffmpegmux, LIBRARY, INIT, (NULL),
> ++ ("Failed to initialize muxer"));
> ++ return GST_FLOW_ERROR;
> ++ }
> ++
> ++ /* now open the mux format */
> ++ if (av_write_header (ffmpegmux->context) < 0) {
> ++ GST_ELEMENT_ERROR (ffmpegmux, LIBRARY, SETTINGS, (NULL),
> ++ ("Failed to write file header - check codec settings"));
> ++ return GST_FLOW_ERROR;
> ++ }
> ++
> ++ /* we're now opened */
> ++ ffmpegmux->opened = TRUE;
> ++
> ++ /* flush the header so it will be used as streamheader */
> ++ put_flush_packet (ffmpegmux->context->pb);
> ++ }
> ++
> ++ /* take the one with earliest timestamp,
> ++ * and push it forward */
> ++ best_pad = NULL;
> ++ best_time = GST_CLOCK_TIME_NONE;
> ++ for (collected = ffmpegmux->collect->data; collected;
> ++ collected = g_slist_next (collected)) {
> ++ GstFFMpegMuxPad *collect_pad = (GstFFMpegMuxPad *) collected->data;
> ++ GstBuffer *buffer = gst_collect_pads_peek (ffmpegmux->collect,
> ++ (GstCollectData *) collect_pad);
> ++
> ++ /* if there's no buffer, just continue */
> ++ if (buffer == NULL) {
> ++ continue;
> ++ }
> ++
> ++ /* if we have no buffer yet, just use the first one */
> ++ if (best_pad == NULL) {
> ++ best_pad = collect_pad;
> ++ best_time = GST_BUFFER_TIMESTAMP (buffer);
> ++ goto next_pad;
> ++ }
> ++
> ++ /* if we do have one, only use this one if it's older */
> ++ if (GST_BUFFER_TIMESTAMP (buffer) < best_time) {
> ++ best_time = GST_BUFFER_TIMESTAMP (buffer);
> ++ best_pad = collect_pad;
> ++ }
> ++
> ++ next_pad:
> ++ gst_buffer_unref (buffer);
> ++
> ++ /* Mux buffers with invalid timestamp first */
> ++ if (!GST_CLOCK_TIME_IS_VALID (best_time))
> ++ break;
> ++ }
> ++
> ++ /* now handle the buffer, or signal EOS if we have
> ++ * no buffers left */
> ++ if (best_pad != NULL) {
> ++ GstBuffer *buf;
> ++ AVPacket pkt;
> ++ gboolean need_free = FALSE;
> ++
> ++ /* push out current buffer */
> ++ buf = gst_collect_pads_pop (ffmpegmux->collect,
> ++ (GstCollectData *) best_pad);
> ++
> ++ ffmpegmux->context->streams[best_pad->padnum]->codec->frame_number++;
> ++
> ++ /* set time */
> ++ pkt.pts = gst_ffmpeg_time_gst_to_ff (GST_BUFFER_TIMESTAMP (buf),
> ++ ffmpegmux->context->streams[best_pad->padnum]->time_base);
> ++ pkt.dts = pkt.pts;
> ++
> ++ if (strcmp (ffmpegmux->context->oformat->name, "gif") == 0) {
> ++ AVStream *st = ffmpegmux->context->streams[best_pad->padnum];
> ++ AVPicture src, dst;
> ++
> ++ need_free = TRUE;
> ++ pkt.size = st->codec->width * st->codec->height * 3;
> ++ pkt.data = g_malloc (pkt.size);
> ++
> ++ dst.data[0] = pkt.data;
> ++ dst.data[1] = NULL;
> ++ dst.data[2] = NULL;
> ++ dst.linesize[0] = st->codec->width * 3;
> ++
> ++ gst_ffmpeg_avpicture_fill (&src, GST_BUFFER_DATA (buf),
> ++ PIX_FMT_RGB24, st->codec->width, st->codec->height);
> ++
> ++ av_picture_copy (&dst, &src, PIX_FMT_RGB24,
> ++ st->codec->width, st->codec->height);
> ++ } else {
> ++ pkt.data = GST_BUFFER_DATA (buf);
> ++ pkt.size = GST_BUFFER_SIZE (buf);
> ++ }
> ++
> ++ pkt.stream_index = best_pad->padnum;
> ++ pkt.flags = 0;
> ++
> ++ if (!GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT))
> ++ pkt.flags |= AV_PKT_FLAG_KEY;
> ++
> ++ if (GST_BUFFER_DURATION_IS_VALID (buf))
> ++ pkt.duration =
> ++ gst_ffmpeg_time_gst_to_ff (GST_BUFFER_DURATION (buf),
> ++ ffmpegmux->context->streams[best_pad->padnum]->time_base);
> ++ else
> ++ pkt.duration = 0;
> ++ av_write_frame (ffmpegmux->context, &pkt);
> ++ gst_buffer_unref (buf);
> ++ if (need_free)
> ++ g_free (pkt.data);
> ++ } else {
> ++ /* close down */
> ++ av_write_trailer (ffmpegmux->context);
> ++ ffmpegmux->opened = FALSE;
> ++ put_flush_packet (ffmpegmux->context->pb);
> ++ url_fclose (ffmpegmux->context->pb);
> ++ gst_pad_push_event (ffmpegmux->srcpad, gst_event_new_eos ());
> ++ return GST_FLOW_UNEXPECTED;
> ++ }
> ++
> ++ return GST_FLOW_OK;
> ++}
> ++
> ++static GstStateChangeReturn
> ++gst_ffmpegmux_change_state (GstElement * element, GstStateChange transition)
> ++{
> ++ GstFlowReturn ret;
> ++ GstFFMpegMux *ffmpegmux = (GstFFMpegMux *) (element);
> ++
> ++ switch (transition) {
> ++ case GST_STATE_CHANGE_NULL_TO_READY:
> ++ break;
> ++ case GST_STATE_CHANGE_READY_TO_PAUSED:
> ++ gst_collect_pads_start (ffmpegmux->collect);
> ++ break;
> ++ case GST_STATE_CHANGE_PAUSED_TO_PLAYING:
> ++ break;
> ++ case GST_STATE_CHANGE_PAUSED_TO_READY:
> ++ gst_collect_pads_stop (ffmpegmux->collect);
> ++ break;
> ++ default:
> ++ break;
> ++ }
> ++
> ++ ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
> ++
> ++ switch (transition) {
> ++ case GST_STATE_CHANGE_PLAYING_TO_PAUSED:
> ++ break;
> ++ case GST_STATE_CHANGE_PAUSED_TO_READY:
> ++ gst_tag_setter_reset_tags (GST_TAG_SETTER (ffmpegmux));
> ++ if (ffmpegmux->opened) {
> ++ ffmpegmux->opened = FALSE;
> ++ url_fclose (ffmpegmux->context->pb);
> ++ }
> ++ break;
> ++ case GST_STATE_CHANGE_READY_TO_NULL:
> ++ break;
> ++ default:
> ++ break;
> ++ }
> ++
> ++ return ret;
> ++}
> ++
> ++static GstCaps *
> ++gst_ffmpegmux_get_id_caps (enum CodecID *id_list)
> ++{
> ++ GstCaps *caps, *t;
> ++ gint i;
> ++
> ++ caps = gst_caps_new_empty ();
> ++ for (i = 0; id_list[i] != CODEC_ID_NONE; i++) {
> ++ if ((t = gst_ffmpeg_codecid_to_caps (id_list[i], NULL, TRUE)))
> ++ gst_caps_append (caps, t);
> ++ }
> ++ if (gst_caps_is_empty (caps)) {
> ++ gst_caps_unref (caps);
> ++ return NULL;
> ++ }
> ++
> ++ return caps;
> ++}
> ++
> ++/* set a list of integer values on the caps, e.g. for sample rates */
> ++static void
> ++gst_ffmpeg_mux_simple_caps_set_int_list (GstCaps * caps, const gchar * field,
> ++ guint num, const gint * values)
> ++{
> ++ GValue list = { 0, };
> ++ GValue val = { 0, };
> ++ gint i;
> ++
> ++ g_return_if_fail (GST_CAPS_IS_SIMPLE (caps));
> ++
> ++ g_value_init (&list, GST_TYPE_LIST);
> ++ g_value_init (&val, G_TYPE_INT);
> ++
> ++ for (i = 0; i < num; ++i) {
> ++ g_value_set_int (&val, values[i]);
> ++ gst_value_list_append_value (&list, &val);
> ++ }
> ++
> ++ gst_structure_set_value (gst_caps_get_structure (caps, 0), field, &list);
> ++
> ++ g_value_unset (&val);
> ++ g_value_unset (&list);
> ++}
> ++
> ++gboolean
> ++gst_ffmpegmux_register (GstPlugin * plugin)
> ++{
> ++ GTypeInfo typeinfo = {
> ++ sizeof (GstFFMpegMuxClass),
> ++ (GBaseInitFunc) gst_ffmpegmux_base_init,
> ++ NULL,
> ++ (GClassInitFunc) gst_ffmpegmux_class_init,
> ++ NULL,
> ++ NULL,
> ++ sizeof (GstFFMpegMux),
> ++ 0,
> ++ (GInstanceInitFunc) gst_ffmpegmux_init,
> ++ };
> ++ static const GInterfaceInfo tag_setter_info = {
> ++ NULL, NULL, NULL
> ++ };
> ++ GType type;
> ++ AVOutputFormat *in_plugin;
> ++
> ++ in_plugin = av_oformat_next (NULL);
> ++
> ++ GST_LOG ("Registering muxers");
> ++
> ++ while (in_plugin) {
> ++ gchar *type_name;
> ++ gchar *p;
> ++ GstRank rank = GST_RANK_MARGINAL;
> ++
> ++ if ((!strncmp (in_plugin->name, "u16", 3)) ||
> ++ (!strncmp (in_plugin->name, "s16", 3)) ||
> ++ (!strncmp (in_plugin->name, "u24", 3)) ||
> ++ (!strncmp (in_plugin->name, "s24", 3)) ||
> ++ (!strncmp (in_plugin->name, "u8", 2)) ||
> ++ (!strncmp (in_plugin->name, "s8", 2)) ||
> ++ (!strncmp (in_plugin->name, "u32", 3)) ||
> ++ (!strncmp (in_plugin->name, "s32", 3)) ||
> ++ (!strncmp (in_plugin->name, "f32", 3)) ||
> ++ (!strncmp (in_plugin->name, "f64", 3)) ||
> ++ (!strncmp (in_plugin->name, "raw", 3)) ||
> ++ (!strncmp (in_plugin->name, "crc", 3)) ||
> ++ (!strncmp (in_plugin->name, "null", 4)) ||
> ++ (!strncmp (in_plugin->name, "gif", 3)) ||
> ++ (!strncmp (in_plugin->name, "frame", 5)) ||
> ++ (!strncmp (in_plugin->name, "image", 5)) ||
> ++ (!strncmp (in_plugin->name, "mulaw", 5)) ||
> ++ (!strncmp (in_plugin->name, "alaw", 4)) ||
> ++ (!strncmp (in_plugin->name, "h26", 3)) ||
> ++ (!strncmp (in_plugin->name, "rtp", 3)) ||
> ++ (!strncmp (in_plugin->name, "ass", 3)) ||
> ++ (!strncmp (in_plugin->name, "ffmetadata", 10)) ||
> ++ (!strncmp (in_plugin->name, "srt", 3))
> ++ ) {
> ++ GST_LOG ("Ignoring muxer %s", in_plugin->name);
> ++ goto next;
> ++ }
> ++
> ++ if ((!strncmp (in_plugin->long_name, "raw ", 4))) {
> ++ GST_LOG ("Ignoring raw muxer %s", in_plugin->name);
> ++ goto next;
> ++ }
> ++
> ++ if (gst_ffmpegmux_get_replacement (in_plugin->name))
> ++ rank = GST_RANK_NONE;
> ++
> ++ /* FIXME : We need a fast way to know whether we have mappings for this
> ++ * muxer type. */
> ++
> ++ /* construct the type */
> ++ type_name = g_strdup_printf ("ffmux_%s", in_plugin->name);
> ++
> ++ p = type_name;
> ++
> ++ while (*p) {
> ++ if (*p == '.')
> ++ *p = '_';
> ++ p++;
> ++ }
> ++
> ++ type = g_type_from_name (type_name);
> ++
> ++ if (!type) {
> ++ /* create the type now */
> ++ type = g_type_register_static (GST_TYPE_ELEMENT, type_name, &typeinfo, 0);
> ++ g_type_set_qdata (type, GST_FFMUX_PARAMS_QDATA, (gpointer) in_plugin);
> ++ g_type_add_interface_static (type, GST_TYPE_TAG_SETTER, &tag_setter_info);
> ++ }
> ++
> ++ if (!gst_element_register (plugin, type_name, rank, type)) {
> ++ g_free (type_name);
> ++ return FALSE;
> ++ }
> ++
> ++ g_free (type_name);
> ++
> ++ next:
> ++ in_plugin = av_oformat_next (in_plugin);
> ++ }
> ++
> ++ GST_LOG ("Finished registering muxers");
> ++
> ++ return TRUE;
> ++}
> +diff -uNr gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegprotocol.c gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegprotocol.c
> +--- gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegprotocol.c 2011-07-12 16:35:28.000000000 +0200
> ++++ gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegprotocol.c 2014-08-08 15:26:07.875857555 +0200
> +@@ -46,63 +46,14 @@
> + };
> +
> + static int
> +-gst_ffmpegdata_open (URLContext * h, const char *filename, int flags)
> +-{
> +- GstProtocolInfo *info;
> +- GstPad *pad;
> +-
> +- GST_LOG ("Opening %s", filename);
> +-
> +- info = g_new0 (GstProtocolInfo, 1);
> +-
> +- info->set_streamheader = flags & GST_FFMPEG_URL_STREAMHEADER;
> +- flags &= ~GST_FFMPEG_URL_STREAMHEADER;
> +- h->flags &= ~GST_FFMPEG_URL_STREAMHEADER;
> +-
> +- /* we don't support R/W together */
> +- if (flags != URL_RDONLY && flags != URL_WRONLY) {
> +- GST_WARNING ("Only read-only or write-only are supported");
> +- return -EINVAL;
> +- }
> +-
> +- if (sscanf (&filename[12], "%p", &pad) != 1) {
> +- GST_WARNING ("could not decode pad from %s", filename);
> +- return -EIO;
> +- }
> +-
> +- /* make sure we're a pad and that we're of the right type */
> +- g_return_val_if_fail (GST_IS_PAD (pad), -EINVAL);
> +-
> +- switch (flags) {
> +- case URL_RDONLY:
> +- g_return_val_if_fail (GST_PAD_IS_SINK (pad), -EINVAL);
> +- break;
> +- case URL_WRONLY:
> +- g_return_val_if_fail (GST_PAD_IS_SRC (pad), -EINVAL);
> +- break;
> +- }
> +-
> +- info->eos = FALSE;
> +- info->pad = pad;
> +- info->offset = 0;
> +-
> +- h->priv_data = (void *) info;
> +- h->is_streamed = FALSE;
> +- h->max_packet_size = 0;
> +-
> +- return 0;
> +-}
> +-
> +-static int
> +-gst_ffmpegdata_peek (URLContext * h, unsigned char *buf, int size)
> ++gst_ffmpegdata_peek (void *priv_data, unsigned char *buf, int size)
> + {
> + GstProtocolInfo *info;
> + GstBuffer *inbuf = NULL;
> + GstFlowReturn ret;
> + int total = 0;
> +
> +- g_return_val_if_fail (h->flags == URL_RDONLY, AVERROR (EIO));
> +- info = (GstProtocolInfo *) h->priv_data;
> ++ info = (GstProtocolInfo *) priv_data;
> +
> + GST_DEBUG ("Pulling %d bytes at position %" G_GUINT64_FORMAT, size,
> + info->offset);
> +@@ -134,17 +85,17 @@
> + }
> +
> + static int
> +-gst_ffmpegdata_read (URLContext * h, unsigned char *buf, int size)
> ++gst_ffmpegdata_read (void *priv_data, unsigned char *buf, int size)
> + {
> + gint res;
> + GstProtocolInfo *info;
> +
> +- info = (GstProtocolInfo *) h->priv_data;
> ++ info = (GstProtocolInfo *) priv_data;
> +
> + GST_DEBUG ("Reading %d bytes of data at position %" G_GUINT64_FORMAT, size,
> + info->offset);
> +
> +- res = gst_ffmpegdata_peek (h, buf, size);
> ++ res = gst_ffmpegdata_peek (priv_data, buf, size);
> + if (res >= 0)
> + info->offset += res;
> +
> +@@ -154,15 +105,13 @@
> + }
> +
> + static int
> +-gst_ffmpegdata_write (URLContext * h, const unsigned char *buf, int size)
> ++gst_ffmpegdata_write (void *priv_data, const unsigned char *buf, int size)
> + {
> + GstProtocolInfo *info;
> + GstBuffer *outbuf;
> +
> + GST_DEBUG ("Writing %d bytes", size);
> +- info = (GstProtocolInfo *) h->priv_data;
> +-
> +- g_return_val_if_fail (h->flags != URL_RDONLY, -EIO);
> ++ info = (GstProtocolInfo *) priv_data;
> +
> + /* create buffer and push data further */
> + if (gst_pad_alloc_buffer_and_set_caps (info->pad,
> +@@ -179,7 +128,7 @@
> + }
> +
> + static int64_t
> +-gst_ffmpegdata_seek (URLContext * h, int64_t pos, int whence)
> ++gst_ffmpegdata_seek (void *priv_data, int64_t pos, int whence)
> + {
> + GstProtocolInfo *info;
> + guint64 newpos = 0;
> +@@ -187,70 +136,62 @@
> + GST_DEBUG ("Seeking to %" G_GINT64_FORMAT ", whence=%d",
> + (gint64) pos, whence);
> +
> +- info = (GstProtocolInfo *) h->priv_data;
> ++ info = (GstProtocolInfo *) priv_data;
> +
> + /* TODO : if we are push-based, we need to return sensible info */
> +
> +- switch (h->flags) {
> +- case URL_RDONLY:
> +- {
> +- /* sinkpad */
> +- switch (whence) {
> +- case SEEK_SET:
> +- newpos = (guint64) pos;
> +- break;
> +- case SEEK_CUR:
> +- newpos = info->offset + pos;
> +- break;
> +- case SEEK_END:
> +- case AVSEEK_SIZE:
> +- /* ffmpeg wants to know the current end position in bytes ! */
> +- {
> +- GstFormat format = GST_FORMAT_BYTES;
> +- gint64 duration;
> +-
> +- GST_DEBUG ("Seek end");
> +-
> +- if (gst_pad_is_linked (info->pad))
> +- if (gst_pad_query_duration (GST_PAD_PEER (info->pad), &format,
> +- &duration))
> +- newpos = ((guint64) duration) + pos;
> +- }
> +- break;
> +- default:
> +- g_assert (0);
> +- break;
> ++ if (GST_PAD_IS_SINK (info->pad)) {
> ++ /* sinkpad */
> ++ switch (whence) {
> ++ case SEEK_SET:
> ++ newpos = (guint64) pos;
> ++ break;
> ++ case SEEK_CUR:
> ++ newpos = info->offset + pos;
> ++ break;
> ++ case SEEK_END:
> ++ case AVSEEK_SIZE:
> ++ /* ffmpeg wants to know the current end position in bytes ! */
> ++ {
> ++ GstFormat format = GST_FORMAT_BYTES;
> ++ gint64 duration;
> ++
> ++ GST_DEBUG ("Seek end");
> ++
> ++ if (gst_pad_is_linked (info->pad))
> ++ if (gst_pad_query_duration (GST_PAD_PEER (info->pad), &format,
> ++ &duration))
> ++ newpos = ((guint64) duration) + pos;
> + }
> +- /* FIXME : implement case for push-based behaviour */
> +- if (whence != AVSEEK_SIZE)
> +- info->offset = newpos;
> ++ break;
> ++ default:
> ++ g_assert (0);
> ++ break;
> + }
> +- break;
> +- case URL_WRONLY:
> +- {
> +- /* srcpad */
> +- switch (whence) {
> +- case SEEK_SET:
> +- info->offset = (guint64) pos;
> +- gst_pad_push_event (info->pad, gst_event_new_new_segment
> +- (TRUE, 1.0, GST_FORMAT_BYTES, info->offset,
> +- GST_CLOCK_TIME_NONE, info->offset));
> +- break;
> +- case SEEK_CUR:
> +- info->offset += pos;
> +- gst_pad_push_event (info->pad, gst_event_new_new_segment
> +- (TRUE, 1.0, GST_FORMAT_BYTES, info->offset,
> +- GST_CLOCK_TIME_NONE, info->offset));
> +- break;
> +- default:
> +- break;
> +- }
> +- newpos = info->offset;
> ++ /* FIXME : implement case for push-based behaviour */
> ++ if (whence != AVSEEK_SIZE)
> ++ info->offset = newpos;
> ++ } else if (GST_PAD_IS_SRC (info->pad)) {
> ++ /* srcpad */
> ++ switch (whence) {
> ++ case SEEK_SET:
> ++ info->offset = (guint64) pos;
> ++ gst_pad_push_event (info->pad, gst_event_new_new_segment
> ++ (TRUE, 1.0, GST_FORMAT_BYTES, info->offset,
> ++ GST_CLOCK_TIME_NONE, info->offset));
> ++ break;
> ++ case SEEK_CUR:
> ++ info->offset += pos;
> ++ gst_pad_push_event (info->pad, gst_event_new_new_segment
> ++ (TRUE, 1.0, GST_FORMAT_BYTES, info->offset,
> ++ GST_CLOCK_TIME_NONE, info->offset));
> ++ break;
> ++ default:
> ++ break;
> + }
> +- break;
> +- default:
> +- g_assert (0);
> +- break;
> ++ newpos = info->offset;
> ++ } else {
> ++ g_assert_not_reached ();
> + }
> +
> + GST_DEBUG ("Now at offset %" G_GUINT64_FORMAT " (returning %" G_GUINT64_FORMAT
> +@@ -258,85 +199,91 @@
> + return newpos;
> + }
> +
> +-static int
> +-gst_ffmpegdata_close (URLContext * h)
> ++int
> ++gst_ffmpegdata_close (AVIOContext * h)
> + {
> + GstProtocolInfo *info;
> +
> +- info = (GstProtocolInfo *) h->priv_data;
> ++ info = (GstProtocolInfo *) h->opaque;
> + if (info == NULL)
> + return 0;
> +
> + GST_LOG ("Closing file");
> +
> +- switch (h->flags) {
> +- case URL_WRONLY:
> +- {
> +- /* send EOS - that closes down the stream */
> +- gst_pad_push_event (info->pad, gst_event_new_eos ());
> +- break;
> +- }
> +- default:
> +- break;
> ++ if (GST_PAD_IS_SRC (info->pad)) {
> ++ /* send EOS - that closes down the stream */
> ++ gst_pad_push_event (info->pad, gst_event_new_eos ());
> + }
> +
> + /* clean up data */
> + g_free (info);
> +- h->priv_data = NULL;
> ++ h->opaque = NULL;
> ++
> ++ av_freep (&h->buffer);
> ++ av_free (h);
> +
> + return 0;
> + }
> +
> ++int
> ++gst_ffmpegdata_open (GstPad * pad, int flags, AVIOContext ** context)
> ++{
> ++ GstProtocolInfo *info;
> ++ static const int buffer_size = 4096;
> ++ unsigned char *buffer = NULL;
> +
> +-URLProtocol gstreamer_protocol = {
> +- /*.name = */ "gstreamer",
> +- /*.url_open = */ gst_ffmpegdata_open,
> +- /*.url_read = */ gst_ffmpegdata_read,
> +- /*.url_write = */ gst_ffmpegdata_write,
> +- /*.url_seek = */ gst_ffmpegdata_seek,
> +- /*.url_close = */ gst_ffmpegdata_close,
> +-};
> ++ info = g_new0 (GstProtocolInfo, 1);
> +
> ++ info->set_streamheader = flags & GST_FFMPEG_URL_STREAMHEADER;
> ++ flags &= ~GST_FFMPEG_URL_STREAMHEADER;
> +
> +-/* specialized protocol for cross-thread pushing,
> +- * based on ffmpeg's pipe protocol */
> ++ /* we don't support R/W together */
> ++ if ((flags & AVIO_FLAG_WRITE) && (flags & AVIO_FLAG_READ)) {
> ++ GST_WARNING ("Only read-only or write-only are supported");
> ++ return -EINVAL;
> ++ }
> +
> +-static int
> +-gst_ffmpeg_pipe_open (URLContext * h, const char *filename, int flags)
> +-{
> +- GstFFMpegPipe *ffpipe;
> ++ /* make sure we're a pad and that we're of the right type */
> ++ g_return_val_if_fail (GST_IS_PAD (pad), -EINVAL);
> +
> +- GST_LOG ("Opening %s", filename);
> ++ if ((flags & AVIO_FLAG_READ))
> ++ g_return_val_if_fail (GST_PAD_IS_SINK (pad), -EINVAL);
> ++ if ((flags & AVIO_FLAG_WRITE))
> ++ g_return_val_if_fail (GST_PAD_IS_SRC (pad), -EINVAL);
> +
> +- /* we don't support W together */
> +- if (flags != URL_RDONLY) {
> +- GST_WARNING ("Only read-only is supported");
> +- return -EINVAL;
> +- }
> ++ info->eos = FALSE;
> ++ info->pad = pad;
> ++ info->offset = 0;
> +
> +- if (sscanf (&filename[10], "%p", &ffpipe) != 1) {
> +- GST_WARNING ("could not decode pipe info from %s", filename);
> +- return -EIO;
> ++ buffer = av_malloc (buffer_size);
> ++ if (buffer == NULL) {
> ++ GST_WARNING ("Failed to allocate buffer");
> ++ return -ENOMEM;
> + }
> +
> +- /* sanity check */
> +- g_return_val_if_fail (GST_IS_ADAPTER (ffpipe->adapter), -EINVAL);
> +-
> +- h->priv_data = (void *) ffpipe;
> +- h->is_streamed = TRUE;
> +- h->max_packet_size = 0;
> ++ *context =
> ++ avio_alloc_context (buffer, buffer_size, flags, (void *) info,
> ++ gst_ffmpegdata_read, gst_ffmpegdata_write, gst_ffmpegdata_seek);
> ++ (*context)->seekable = AVIO_SEEKABLE_NORMAL;
> ++ if (!(flags & AVIO_FLAG_WRITE)) {
> ++ (*context)->buf_ptr = (*context)->buf_end;
> ++ (*context)->write_flag = 0;
> ++ }
> +
> + return 0;
> + }
> +
> ++/* specialized protocol for cross-thread pushing,
> ++ * based on ffmpeg's pipe protocol */
> ++
> + static int
> +-gst_ffmpeg_pipe_read (URLContext * h, unsigned char *buf, int size)
> ++gst_ffmpeg_pipe_read (void *priv_data, unsigned char *buf, int size)
> + {
> + GstFFMpegPipe *ffpipe;
> + const guint8 *data;
> + guint available;
> +
> +- ffpipe = (GstFFMpegPipe *) h->priv_data;
> ++ ffpipe = (GstFFMpegPipe *) priv_data;
> +
> + GST_LOG ("requested size %d", size);
> +
> +@@ -367,21 +314,38 @@
> + return size;
> + }
> +
> +-static int
> +-gst_ffmpeg_pipe_close (URLContext * h)
> ++int
> ++gst_ffmpeg_pipe_close (AVIOContext * h)
> + {
> + GST_LOG ("Closing pipe");
> +
> +- h->priv_data = NULL;
> ++ h->opaque = NULL;
> ++ av_freep (&h->buffer);
> ++ av_free (h);
> +
> + return 0;
> + }
> +
> +-URLProtocol gstpipe_protocol = {
> +- "gstpipe",
> +- gst_ffmpeg_pipe_open,
> +- gst_ffmpeg_pipe_read,
> +- NULL,
> +- NULL,
> +- gst_ffmpeg_pipe_close,
> +-};
> ++int
> ++gst_ffmpeg_pipe_open (GstFFMpegPipe * ffpipe, int flags, AVIOContext ** context)
> ++{
> ++ static const int buffer_size = 4096;
> ++ unsigned char *buffer = NULL;
> ++
> ++ /* sanity check */
> ++ g_return_val_if_fail (GST_IS_ADAPTER (ffpipe->adapter), -EINVAL);
> ++
> ++ buffer = av_malloc (buffer_size);
> ++ if (buffer == NULL) {
> ++ GST_WARNING ("Failed to allocate buffer");
> ++ return -ENOMEM;
> ++ }
> ++
> ++ *context =
> ++ avio_alloc_context (buffer, buffer_size, 0, (void *) ffpipe,
> ++ gst_ffmpeg_pipe_read, NULL, NULL);
> ++ (*context)->seekable = 0;
> ++ (*context)->buf_ptr = (*context)->buf_end;
> ++
> ++ return 0;
> ++}
> +diff -uNr gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegutils.c gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegutils.c
> +--- gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegutils.c 2011-07-13 11:07:28.000000000 +0200
> ++++ gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegutils.c 2014-08-08 15:34:04.007874626 +0200
> +@@ -25,6 +25,11 @@
> + #ifdef __APPLE__
> + #include <sys/sysctl.h>
> + #endif
> ++#ifdef HAVE_FFMPEG_UNINSTALLED
> ++#include <avformat.h>
> ++#else
> ++#include <libavformat/avformat.h>
> ++#endif
> +
> + G_CONST_RETURN gchar *
> + gst_ffmpeg_get_codecid_longname (enum CodecID codec_id)
> +@@ -39,21 +44,21 @@
> + }
> +
> + gint
> +-av_smp_format_depth (enum SampleFormat smp_fmt)
> ++av_smp_format_depth (enum AVSampleFormat smp_fmt)
> + {
> + gint depth = -1;
> + switch (smp_fmt) {
> +- case SAMPLE_FMT_U8:
> ++ case AV_SAMPLE_FMT_U8:
> + depth = 1;
> + break;
> +- case SAMPLE_FMT_S16:
> ++ case AV_SAMPLE_FMT_S16:
> + depth = 2;
> + break;
> +- case SAMPLE_FMT_S32:
> +- case SAMPLE_FMT_FLT:
> ++ case AV_SAMPLE_FMT_S32:
> ++ case AV_SAMPLE_FMT_FLT:
> + depth = 4;
> + break;
> +- case SAMPLE_FMT_DBL:
> ++ case AV_SAMPLE_FMT_DBL:
> + depth = 8;
> + break;
> + default:
> +diff -uNr gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegutils.c.orig gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegutils.c.orig
> +--- gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegutils.c.orig 1970-01-01 01:00:00.000000000 +0100
> ++++ gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegutils.c.orig 2011-07-13 11:07:28.000000000 +0200
> +@@ -0,0 +1,483 @@
> ++/* GStreamer
> ++ * Copyright (c) 2009 Edward Hervey <bilboed at bilboed.com>
> ++ *
> ++ * This library is free software; you can redistribute it and/or
> ++ * modify it under the terms of the GNU Library General Public
> ++ * License as published by the Free Software Foundation; either
> ++ * version 2 of the License, or (at your option) any later version.
> ++ *
> ++ * This library is distributed in the hope that it will be useful,
> ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
> ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> ++ * Library General Public License for more details.
> ++ *
> ++ * You should have received a copy of the GNU Library General Public
> ++ * License along with this library; if not, write to the
> ++ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
> ++ * Boston, MA 02111-1307, USA.
> ++ */
> ++
> ++#ifdef HAVE_CONFIG_H
> ++#include "config.h"
> ++#endif
> ++#include "gstffmpegutils.h"
> ++#include <unistd.h>
> ++#ifdef __APPLE__
> ++#include <sys/sysctl.h>
> ++#endif
> ++
> ++G_CONST_RETURN gchar *
> ++gst_ffmpeg_get_codecid_longname (enum CodecID codec_id)
> ++{
> ++ AVCodec *codec;
> ++ /* Let's use what ffmpeg can provide us */
> ++
> ++ if ((codec = avcodec_find_decoder (codec_id)) ||
> ++ (codec = avcodec_find_encoder (codec_id)))
> ++ return codec->long_name;
> ++ return NULL;
> ++}
> ++
> ++gint
> ++av_smp_format_depth (enum SampleFormat smp_fmt)
> ++{
> ++ gint depth = -1;
> ++ switch (smp_fmt) {
> ++ case SAMPLE_FMT_U8:
> ++ depth = 1;
> ++ break;
> ++ case SAMPLE_FMT_S16:
> ++ depth = 2;
> ++ break;
> ++ case SAMPLE_FMT_S32:
> ++ case SAMPLE_FMT_FLT:
> ++ depth = 4;
> ++ break;
> ++ case SAMPLE_FMT_DBL:
> ++ depth = 8;
> ++ break;
> ++ default:
> ++ GST_ERROR ("UNHANDLED SAMPLE FORMAT !");
> ++ break;
> ++ }
> ++ return depth;
> ++}
> ++
> ++
> ++/*
> ++ * Fill in pointers to memory in a AVPicture, where
> ++ * everything is aligned by 4 (as required by X).
> ++ * This is mostly a copy from imgconvert.c with some
> ++ * small changes.
> ++ */
> ++
> ++#define FF_COLOR_RGB 0 /* RGB color space */
> ++#define FF_COLOR_GRAY 1 /* gray color space */
> ++#define FF_COLOR_YUV 2 /* YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
> ++#define FF_COLOR_YUV_JPEG 3 /* YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
> ++
> ++#define FF_PIXEL_PLANAR 0 /* each channel has one component in AVPicture */
> ++#define FF_PIXEL_PACKED 1 /* only one components containing all the channels */
> ++#define FF_PIXEL_PALETTE 2 /* one components containing indexes for a palette */
> ++
> ++typedef struct PixFmtInfo
> ++{
> ++ const char *name;
> ++ uint8_t nb_channels; /* number of channels (including alpha) */
> ++ uint8_t color_type; /* color type (see FF_COLOR_xxx constants) */
> ++ uint8_t pixel_type; /* pixel storage type (see FF_PIXEL_xxx constants) */
> ++ uint8_t is_alpha:1; /* true if alpha can be specified */
> ++ uint8_t x_chroma_shift; /* X chroma subsampling factor is 2 ^ shift */
> ++ uint8_t y_chroma_shift; /* Y chroma subsampling factor is 2 ^ shift */
> ++ uint8_t depth; /* bit depth of the color components */
> ++} PixFmtInfo;
> ++
> ++
> ++/* this table gives more information about formats */
> ++static PixFmtInfo pix_fmt_info[PIX_FMT_NB];
> ++void
> ++gst_ffmpeg_init_pix_fmt_info (void)
> ++{
> ++ /* YUV formats */
> ++ pix_fmt_info[PIX_FMT_YUV420P].name = g_strdup ("yuv420p");
> ++ pix_fmt_info[PIX_FMT_YUV420P].nb_channels = 3;
> ++ pix_fmt_info[PIX_FMT_YUV420P].color_type = FF_COLOR_YUV;
> ++ pix_fmt_info[PIX_FMT_YUV420P].pixel_type = FF_PIXEL_PLANAR;
> ++ pix_fmt_info[PIX_FMT_YUV420P].depth = 8,
> ++ pix_fmt_info[PIX_FMT_YUV420P].x_chroma_shift = 1,
> ++ pix_fmt_info[PIX_FMT_YUV420P].y_chroma_shift = 1;
> ++
> ++ pix_fmt_info[PIX_FMT_YUV422P].name = g_strdup ("yuv422p");
> ++ pix_fmt_info[PIX_FMT_YUV422P].nb_channels = 3;
> ++ pix_fmt_info[PIX_FMT_YUV422P].color_type = FF_COLOR_YUV;
> ++ pix_fmt_info[PIX_FMT_YUV422P].pixel_type = FF_PIXEL_PLANAR;
> ++ pix_fmt_info[PIX_FMT_YUV422P].depth = 8;
> ++ pix_fmt_info[PIX_FMT_YUV422P].x_chroma_shift = 1;
> ++ pix_fmt_info[PIX_FMT_YUV422P].y_chroma_shift = 0;
> ++
> ++ pix_fmt_info[PIX_FMT_YUV444P].name = g_strdup ("yuv444p");
> ++ pix_fmt_info[PIX_FMT_YUV444P].nb_channels = 3;
> ++ pix_fmt_info[PIX_FMT_YUV444P].color_type = FF_COLOR_YUV;
> ++ pix_fmt_info[PIX_FMT_YUV444P].pixel_type = FF_PIXEL_PLANAR;
> ++ pix_fmt_info[PIX_FMT_YUV444P].depth = 8;
> ++ pix_fmt_info[PIX_FMT_YUV444P].x_chroma_shift = 0;
> ++ pix_fmt_info[PIX_FMT_YUV444P].y_chroma_shift = 0;
> ++
> ++ pix_fmt_info[PIX_FMT_YUYV422].name = g_strdup ("yuv422");
> ++ pix_fmt_info[PIX_FMT_YUYV422].nb_channels = 1;
> ++ pix_fmt_info[PIX_FMT_YUYV422].color_type = FF_COLOR_YUV;
> ++ pix_fmt_info[PIX_FMT_YUYV422].pixel_type = FF_PIXEL_PACKED;
> ++ pix_fmt_info[PIX_FMT_YUYV422].depth = 8;
> ++ pix_fmt_info[PIX_FMT_YUYV422].x_chroma_shift = 1;
> ++ pix_fmt_info[PIX_FMT_YUYV422].y_chroma_shift = 0;
> ++
> ++ pix_fmt_info[PIX_FMT_YUV410P].name = g_strdup ("yuv410p");
> ++ pix_fmt_info[PIX_FMT_YUV410P].nb_channels = 3;
> ++ pix_fmt_info[PIX_FMT_YUV410P].color_type = FF_COLOR_YUV;
> ++ pix_fmt_info[PIX_FMT_YUV410P].pixel_type = FF_PIXEL_PLANAR;
> ++ pix_fmt_info[PIX_FMT_YUV410P].depth = 8;
> ++ pix_fmt_info[PIX_FMT_YUV410P].x_chroma_shift = 2;
> ++ pix_fmt_info[PIX_FMT_YUV410P].y_chroma_shift = 2;
> ++
> ++ pix_fmt_info[PIX_FMT_YUV411P].name = g_strdup ("yuv411p");
> ++ pix_fmt_info[PIX_FMT_YUV411P].nb_channels = 3;
> ++ pix_fmt_info[PIX_FMT_YUV411P].color_type = FF_COLOR_YUV;
> ++ pix_fmt_info[PIX_FMT_YUV411P].pixel_type = FF_PIXEL_PLANAR;
> ++ pix_fmt_info[PIX_FMT_YUV411P].depth = 8;
> ++ pix_fmt_info[PIX_FMT_YUV411P].x_chroma_shift = 2;
> ++ pix_fmt_info[PIX_FMT_YUV411P].y_chroma_shift = 0;
> ++
> ++ /* JPEG YUV */
> ++ pix_fmt_info[PIX_FMT_YUVJ420P].name = g_strdup ("yuvj420p");
> ++ pix_fmt_info[PIX_FMT_YUVJ420P].nb_channels = 3;
> ++ pix_fmt_info[PIX_FMT_YUVJ420P].color_type = FF_COLOR_YUV_JPEG;
> ++ pix_fmt_info[PIX_FMT_YUVJ420P].pixel_type = FF_PIXEL_PLANAR;
> ++ pix_fmt_info[PIX_FMT_YUVJ420P].depth = 8;
> ++ pix_fmt_info[PIX_FMT_YUVJ420P].x_chroma_shift = 1;
> ++ pix_fmt_info[PIX_FMT_YUVJ420P].y_chroma_shift = 1;
> ++
> ++ pix_fmt_info[PIX_FMT_YUVJ422P].name = g_strdup ("yuvj422p");
> ++ pix_fmt_info[PIX_FMT_YUVJ422P].nb_channels = 3;
> ++ pix_fmt_info[PIX_FMT_YUVJ422P].color_type = FF_COLOR_YUV_JPEG;
> ++ pix_fmt_info[PIX_FMT_YUVJ422P].pixel_type = FF_PIXEL_PLANAR;
> ++ pix_fmt_info[PIX_FMT_YUVJ422P].depth = 8;
> ++ pix_fmt_info[PIX_FMT_YUVJ422P].x_chroma_shift = 1;
> ++ pix_fmt_info[PIX_FMT_YUVJ422P].y_chroma_shift = 0;
> ++
> ++ pix_fmt_info[PIX_FMT_YUVJ444P].name = g_strdup ("yuvj444p");
> ++ pix_fmt_info[PIX_FMT_YUVJ444P].nb_channels = 3;
> ++ pix_fmt_info[PIX_FMT_YUVJ444P].color_type = FF_COLOR_YUV_JPEG;
> ++ pix_fmt_info[PIX_FMT_YUVJ444P].pixel_type = FF_PIXEL_PLANAR;
> ++ pix_fmt_info[PIX_FMT_YUVJ444P].depth = 8;
> ++ pix_fmt_info[PIX_FMT_YUVJ444P].x_chroma_shift = 0;
> ++ pix_fmt_info[PIX_FMT_YUVJ444P].y_chroma_shift = 0;
> ++
> ++ /* RGB formats */
> ++ pix_fmt_info[PIX_FMT_RGB24].name = g_strdup ("rgb24");
> ++ pix_fmt_info[PIX_FMT_RGB24].nb_channels = 3;
> ++ pix_fmt_info[PIX_FMT_RGB24].color_type = FF_COLOR_RGB;
> ++ pix_fmt_info[PIX_FMT_RGB24].pixel_type = FF_PIXEL_PACKED;
> ++ pix_fmt_info[PIX_FMT_RGB24].depth = 8;
> ++ pix_fmt_info[PIX_FMT_RGB24].x_chroma_shift = 0;
> ++ pix_fmt_info[PIX_FMT_RGB24].y_chroma_shift = 0;
> ++
> ++ pix_fmt_info[PIX_FMT_BGR24].name = g_strdup ("bgr24");
> ++ pix_fmt_info[PIX_FMT_BGR24].nb_channels = 3;
> ++ pix_fmt_info[PIX_FMT_BGR24].color_type = FF_COLOR_RGB;
> ++ pix_fmt_info[PIX_FMT_BGR24].pixel_type = FF_PIXEL_PACKED;
> ++ pix_fmt_info[PIX_FMT_BGR24].depth = 8;
> ++ pix_fmt_info[PIX_FMT_BGR24].x_chroma_shift = 0;
> ++ pix_fmt_info[PIX_FMT_BGR24].y_chroma_shift = 0;
> ++
> ++ pix_fmt_info[PIX_FMT_RGB32].name = g_strdup ("rgba32");
> ++ pix_fmt_info[PIX_FMT_RGB32].nb_channels = 4;
> ++ pix_fmt_info[PIX_FMT_RGB32].is_alpha = 1;
> ++ pix_fmt_info[PIX_FMT_RGB32].color_type = FF_COLOR_RGB;
> ++ pix_fmt_info[PIX_FMT_RGB32].pixel_type = FF_PIXEL_PACKED;
> ++ pix_fmt_info[PIX_FMT_RGB32].depth = 8;
> ++ pix_fmt_info[PIX_FMT_RGB32].x_chroma_shift = 0;
> ++ pix_fmt_info[PIX_FMT_RGB32].y_chroma_shift = 0;
> ++
> ++ pix_fmt_info[PIX_FMT_RGB565].name = g_strdup ("rgb565");
> ++ pix_fmt_info[PIX_FMT_RGB565].nb_channels = 3;
> ++ pix_fmt_info[PIX_FMT_RGB565].color_type = FF_COLOR_RGB;
> ++ pix_fmt_info[PIX_FMT_RGB565].pixel_type = FF_PIXEL_PACKED;
> ++ pix_fmt_info[PIX_FMT_RGB565].depth = 5;
> ++ pix_fmt_info[PIX_FMT_RGB565].x_chroma_shift = 0;
> ++ pix_fmt_info[PIX_FMT_RGB565].y_chroma_shift = 0;
> ++
> ++ pix_fmt_info[PIX_FMT_RGB555].name = g_strdup ("rgb555");
> ++ pix_fmt_info[PIX_FMT_RGB555].nb_channels = 4;
> ++ pix_fmt_info[PIX_FMT_RGB555].is_alpha = 1;
> ++ pix_fmt_info[PIX_FMT_RGB555].color_type = FF_COLOR_RGB;
> ++ pix_fmt_info[PIX_FMT_RGB555].pixel_type = FF_PIXEL_PACKED;
> ++ pix_fmt_info[PIX_FMT_RGB555].depth = 5;
> ++ pix_fmt_info[PIX_FMT_RGB555].x_chroma_shift = 0;
> ++ pix_fmt_info[PIX_FMT_RGB555].y_chroma_shift = 0;
> ++
> ++ /* gray / mono formats */
> ++ pix_fmt_info[PIX_FMT_GRAY8].name = g_strdup ("gray");
> ++ pix_fmt_info[PIX_FMT_GRAY8].nb_channels = 1;
> ++ pix_fmt_info[PIX_FMT_GRAY8].color_type = FF_COLOR_GRAY;
> ++ pix_fmt_info[PIX_FMT_GRAY8].pixel_type = FF_PIXEL_PLANAR;
> ++ pix_fmt_info[PIX_FMT_GRAY8].depth = 8;
> ++
> ++ pix_fmt_info[PIX_FMT_MONOWHITE].name = g_strdup ("monow");
> ++ pix_fmt_info[PIX_FMT_MONOWHITE].nb_channels = 1;
> ++ pix_fmt_info[PIX_FMT_MONOWHITE].color_type = FF_COLOR_GRAY;
> ++ pix_fmt_info[PIX_FMT_MONOWHITE].pixel_type = FF_PIXEL_PLANAR;
> ++ pix_fmt_info[PIX_FMT_MONOWHITE].depth = 1;
> ++
> ++ pix_fmt_info[PIX_FMT_MONOBLACK].name = g_strdup ("monob");
> ++ pix_fmt_info[PIX_FMT_MONOBLACK].nb_channels = 1;
> ++ pix_fmt_info[PIX_FMT_MONOBLACK].color_type = FF_COLOR_GRAY;
> ++ pix_fmt_info[PIX_FMT_MONOBLACK].pixel_type = FF_PIXEL_PLANAR;
> ++ pix_fmt_info[PIX_FMT_MONOBLACK].depth = 1;
> ++
> ++ /* paletted formats */
> ++ pix_fmt_info[PIX_FMT_PAL8].name = g_strdup ("pal8");
> ++ pix_fmt_info[PIX_FMT_PAL8].nb_channels = 4;
> ++ pix_fmt_info[PIX_FMT_PAL8].is_alpha = 1;
> ++ pix_fmt_info[PIX_FMT_PAL8].color_type = FF_COLOR_RGB;
> ++ pix_fmt_info[PIX_FMT_PAL8].pixel_type = FF_PIXEL_PALETTE;
> ++ pix_fmt_info[PIX_FMT_PAL8].depth = 8;
> ++
> ++ pix_fmt_info[PIX_FMT_YUVA420P].name = g_strdup ("yuva420p");
> ++ pix_fmt_info[PIX_FMT_YUVA420P].nb_channels = 4;
> ++ pix_fmt_info[PIX_FMT_YUVA420P].is_alpha = 1;
> ++ pix_fmt_info[PIX_FMT_YUVA420P].color_type = FF_COLOR_YUV;
> ++ pix_fmt_info[PIX_FMT_YUVA420P].pixel_type = FF_PIXEL_PLANAR;
> ++ pix_fmt_info[PIX_FMT_YUVA420P].depth = 8,
> ++ pix_fmt_info[PIX_FMT_YUVA420P].x_chroma_shift = 1,
> ++ pix_fmt_info[PIX_FMT_YUVA420P].y_chroma_shift = 1;
> ++};
> ++
> ++int
> ++gst_ffmpeg_avpicture_get_size (int pix_fmt, int width, int height)
> ++{
> ++ AVPicture dummy_pict;
> ++
> ++ return gst_ffmpeg_avpicture_fill (&dummy_pict, NULL, pix_fmt, width, height);
> ++}
> ++
> ++#define GEN_MASK(x) ((1<<(x))-1)
> ++#define ROUND_UP_X(v,x) (((v) + GEN_MASK(x)) & ~GEN_MASK(x))
> ++#define ROUND_UP_2(x) ROUND_UP_X (x, 1)
> ++#define ROUND_UP_4(x) ROUND_UP_X (x, 2)
> ++#define ROUND_UP_8(x) ROUND_UP_X (x, 3)
> ++#define DIV_ROUND_UP_X(v,x) (((v) + GEN_MASK(x)) >> (x))
> ++
> ++int
> ++gst_ffmpeg_avpicture_fill (AVPicture * picture,
> ++ uint8_t * ptr, enum PixelFormat pix_fmt, int width, int height)
> ++{
> ++ int size, w2, h2, size2;
> ++ int stride, stride2;
> ++ PixFmtInfo *pinfo;
> ++
> ++ pinfo = &pix_fmt_info[pix_fmt];
> ++
> ++ switch (pix_fmt) {
> ++ case PIX_FMT_YUV420P:
> ++ case PIX_FMT_YUV422P:
> ++ case PIX_FMT_YUV444P:
> ++ case PIX_FMT_YUV410P:
> ++ case PIX_FMT_YUV411P:
> ++ case PIX_FMT_YUVJ420P:
> ++ case PIX_FMT_YUVJ422P:
> ++ case PIX_FMT_YUVJ444P:
> ++ stride = ROUND_UP_4 (width);
> ++ h2 = ROUND_UP_X (height, pinfo->y_chroma_shift);
> ++ size = stride * h2;
> ++ w2 = DIV_ROUND_UP_X (width, pinfo->x_chroma_shift);
> ++ stride2 = ROUND_UP_4 (w2);
> ++ h2 = DIV_ROUND_UP_X (height, pinfo->y_chroma_shift);
> ++ size2 = stride2 * h2;
> ++ picture->data[0] = ptr;
> ++ picture->data[1] = picture->data[0] + size;
> ++ picture->data[2] = picture->data[1] + size2;
> ++ picture->data[3] = NULL;
> ++ picture->linesize[0] = stride;
> ++ picture->linesize[1] = stride2;
> ++ picture->linesize[2] = stride2;
> ++ picture->linesize[3] = 0;
> ++ GST_DEBUG ("planes %d %d %d", 0, size, size + size2);
> ++ GST_DEBUG ("strides %d %d %d", stride, stride2, stride2);
> ++ return size + 2 * size2;
> ++ case PIX_FMT_YUVA420P:
> ++ stride = ROUND_UP_4 (width);
> ++ h2 = ROUND_UP_X (height, pinfo->y_chroma_shift);
> ++ size = stride * h2;
> ++ w2 = DIV_ROUND_UP_X (width, pinfo->x_chroma_shift);
> ++ stride2 = ROUND_UP_4 (w2);
> ++ h2 = DIV_ROUND_UP_X (height, pinfo->y_chroma_shift);
> ++ size2 = stride2 * h2;
> ++ picture->data[0] = ptr;
> ++ picture->data[1] = picture->data[0] + size;
> ++ picture->data[2] = picture->data[1] + size2;
> ++ picture->data[3] = picture->data[2] + size2;
> ++ picture->linesize[0] = stride;
> ++ picture->linesize[1] = stride2;
> ++ picture->linesize[2] = stride2;
> ++ picture->linesize[3] = stride;
> ++ GST_DEBUG ("planes %d %d %d %d", 0, size, size + size2, size + 2 * size2);
> ++ GST_DEBUG ("strides %d %d %d %d", stride, stride2, stride2, stride);
> ++ return 2 * size + 2 * size2;
> ++ case PIX_FMT_RGB24:
> ++ case PIX_FMT_BGR24:
> ++ stride = ROUND_UP_4 (width * 3);
> ++ size = stride * height;
> ++ picture->data[0] = ptr;
> ++ picture->data[1] = NULL;
> ++ picture->data[2] = NULL;
> ++ picture->data[3] = NULL;
> ++ picture->linesize[0] = stride;
> ++ picture->linesize[1] = 0;
> ++ picture->linesize[2] = 0;
> ++ picture->linesize[3] = 0;
> ++ return size;
> ++ /*case PIX_FMT_AYUV4444:
> ++ case PIX_FMT_BGR32:
> ++ case PIX_FMT_BGRA32:
> ++ case PIX_FMT_RGB32: */
> ++ case PIX_FMT_RGB32:
> ++ stride = width * 4;
> ++ size = stride * height;
> ++ picture->data[0] = ptr;
> ++ picture->data[1] = NULL;
> ++ picture->data[2] = NULL;
> ++ picture->data[3] = NULL;
> ++ picture->linesize[0] = stride;
> ++ picture->linesize[1] = 0;
> ++ picture->linesize[2] = 0;
> ++ picture->linesize[3] = 0;
> ++ return size;
> ++ case PIX_FMT_RGB555:
> ++ case PIX_FMT_RGB565:
> ++ case PIX_FMT_YUYV422:
> ++ case PIX_FMT_UYVY422:
> ++ stride = ROUND_UP_4 (width * 2);
> ++ size = stride * height;
> ++ picture->data[0] = ptr;
> ++ picture->data[1] = NULL;
> ++ picture->data[2] = NULL;
> ++ picture->data[3] = NULL;
> ++ picture->linesize[0] = stride;
> ++ picture->linesize[1] = 0;
> ++ picture->linesize[2] = 0;
> ++ picture->linesize[3] = 0;
> ++ return size;
> ++ case PIX_FMT_UYYVYY411:
> ++ /* FIXME, probably not the right stride */
> ++ stride = ROUND_UP_4 (width);
> ++ size = stride * height;
> ++ picture->data[0] = ptr;
> ++ picture->data[1] = NULL;
> ++ picture->data[2] = NULL;
> ++ picture->data[3] = NULL;
> ++ picture->linesize[0] = width + width / 2;
> ++ picture->linesize[1] = 0;
> ++ picture->linesize[2] = 0;
> ++ picture->linesize[3] = 0;
> ++ return size + size / 2;
> ++ case PIX_FMT_GRAY8:
> ++ stride = ROUND_UP_4 (width);
> ++ size = stride * height;
> ++ picture->data[0] = ptr;
> ++ picture->data[1] = NULL;
> ++ picture->data[2] = NULL;
> ++ picture->data[3] = NULL;
> ++ picture->linesize[0] = stride;
> ++ picture->linesize[1] = 0;
> ++ picture->linesize[2] = 0;
> ++ picture->linesize[3] = 0;
> ++ return size;
> ++ case PIX_FMT_MONOWHITE:
> ++ case PIX_FMT_MONOBLACK:
> ++ stride = ROUND_UP_4 ((width + 7) >> 3);
> ++ size = stride * height;
> ++ picture->data[0] = ptr;
> ++ picture->data[1] = NULL;
> ++ picture->data[2] = NULL;
> ++ picture->data[3] = NULL;
> ++ picture->linesize[0] = stride;
> ++ picture->linesize[1] = 0;
> ++ picture->linesize[2] = 0;
> ++ picture->linesize[3] = 0;
> ++ return size;
> ++ case PIX_FMT_PAL8:
> ++ /* already forced to be with stride, so same result as other function */
> ++ stride = ROUND_UP_4 (width);
> ++ size = stride * height;
> ++ picture->data[0] = ptr;
> ++ picture->data[1] = ptr + size; /* palette is stored here as 256 32 bit words */
> ++ picture->data[2] = NULL;
> ++ picture->data[3] = NULL;
> ++ picture->linesize[0] = stride;
> ++ picture->linesize[1] = 4;
> ++ picture->linesize[2] = 0;
> ++ picture->linesize[3] = 0;
> ++ return size + 256 * 4;
> ++ default:
> ++ picture->data[0] = NULL;
> ++ picture->data[1] = NULL;
> ++ picture->data[2] = NULL;
> ++ picture->data[3] = NULL;
> ++ return -1;
> ++ }
> ++
> ++ return 0;
> ++}
> ++
> ++/* Create a GstBuffer of the requested size and caps.
> ++ * The memory will be allocated by ffmpeg, making sure it's properly aligned
> ++ * for any processing. */
> ++
> ++GstBuffer *
> ++new_aligned_buffer (gint size, GstCaps * caps)
> ++{
> ++ GstBuffer *buf;
> ++
> ++ buf = gst_buffer_new ();
> ++ GST_BUFFER_DATA (buf) = GST_BUFFER_MALLOCDATA (buf) = av_malloc (size);
> ++ GST_BUFFER_SIZE (buf) = size;
> ++ GST_BUFFER_FREE_FUNC (buf) = av_free;
> ++ if (caps)
> ++ gst_buffer_set_caps (buf, caps);
> ++
> ++ return buf;
> ++}
> ++
> ++int
> ++gst_ffmpeg_auto_max_threads (void)
> ++{
> ++ static gsize n_threads = 0;
> ++ if (g_once_init_enter (&n_threads)) {
> ++ int n = 1;
> ++#if defined(_WIN32)
> ++ {
> ++ const char *s = getenv ("NUMBER_OF_PROCESSORS");
> ++ if (s) {
> ++ n = atoi (s);
> ++ }
> ++ }
> ++#elif defined(__APPLE__)
> ++ {
> ++ int mib[] = { CTL_HW, HW_NCPU };
> ++ size_t dataSize = sizeof (int);
> ++
> ++ if (sysctl (mib, 2, &n_threads, &dataSize, NULL, 0)) {
> ++ n = 1;
> ++ }
> ++ }
> ++#else
> ++ n = sysconf (_SC_NPROCESSORS_CONF);
> ++#endif
> ++ if (n < 1)
> ++ n = 1;
> ++
> ++ g_once_init_leave (&n_threads, n);
> ++ }
> ++
> ++ return (int) (n_threads);
> ++}
> +diff -uNr gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegutils.h gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegutils.h
> +--- gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegutils.h 2011-11-02 14:04:05.000000000 +0100
> ++++ gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegutils.h 2014-08-08 15:34:04.007874626 +0200
> +@@ -23,6 +23,7 @@
> + #ifdef HAVE_FFMPEG_UNINSTALLED
> + #include <avcodec.h>
> + #else
> ++#include <libavutil/mathematics.h>
> + #include <libavcodec/avcodec.h>
> + #endif
> + #include <gst/gst.h>
> +@@ -87,7 +88,7 @@
> + gst_ffmpeg_get_codecid_longname (enum CodecID codec_id);
> +
> + gint
> +-av_smp_format_depth(enum SampleFormat smp_fmt);
> ++av_smp_format_depth(enum AVSampleFormat smp_fmt);
> +
> + GstBuffer *
> + new_aligned_buffer (gint size, GstCaps * caps);
> +diff -uNr gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegutils.h.orig gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegutils.h.orig
> +--- gst-ffmpeg-0.10.13.orig/ext/ffmpeg/gstffmpegutils.h.orig 1970-01-01 01:00:00.000000000 +0100
> ++++ gst-ffmpeg-0.10.13/ext/ffmpeg/gstffmpegutils.h.orig 2014-08-08 15:26:38.473858652 +0200
> +@@ -0,0 +1,95 @@
> ++/* GStreamer
> ++ * Copyright (C) <2009> Edward Hervey <bilboed at bilboed.com>
> ++ *
> ++ * This library is free software; you can redistribute it and/or
> ++ * modify it under the terms of the GNU Library General Public
> ++ * License as published by the Free Software Foundation; either
> ++ * version 2 of the License, or (at your option) any later version.
> ++ *
> ++ * This library is distributed in the hope that it will be useful,
> ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
> ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> ++ * Library General Public License for more details.
> ++ *
> ++ * You should have received a copy of the GNU Library General Public
> ++ * License along with this library; if not, write to the
> ++ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
> ++ * Boston, MA 02111-1307, USA.
> ++ */
> ++
> ++#ifndef __GST_FFMPEG_UTILS_H__
> ++#define __GST_FFMPEG_UTILS_H__
> ++
> ++#ifdef HAVE_FFMPEG_UNINSTALLED
> ++#include <avcodec.h>
> ++#else
> ++#include <libavcodec/avcodec.h>
> ++#endif
> ++#include <gst/gst.h>
> ++
> ++/*
> ++ *Get the size of an picture
> ++ */
> ++int
> ++gst_ffmpeg_avpicture_get_size (int pix_fmt, int width, int height);
> ++
> ++/*
> ++ * Fill in pointers in an AVPicture, aligned by 4 (required by X).
> ++ */
> ++
> ++int
> ++gst_ffmpeg_avpicture_fill (AVPicture * picture,
> ++ uint8_t * ptr,
> ++ enum PixelFormat pix_fmt,
> ++ int width,
> ++ int height);
> ++
> ++/*
> ++ * Convert from/to a GStreamer <-> FFMpeg timestamp.
> ++ */
> ++static inline guint64
> ++gst_ffmpeg_time_ff_to_gst (gint64 pts, AVRational base)
> ++{
> ++ guint64 out;
> ++
> ++ if (pts == AV_NOPTS_VALUE){
> ++ out = GST_CLOCK_TIME_NONE;
> ++ } else {
> ++ AVRational bq = { 1, GST_SECOND };
> ++ out = av_rescale_q (pts, base, bq);
> ++ }
> ++
> ++ return out;
> ++}
> ++
> ++static inline gint64
> ++gst_ffmpeg_time_gst_to_ff (guint64 time, AVRational base)
> ++{
> ++ gint64 out;
> ++
> ++ if (!GST_CLOCK_TIME_IS_VALID (time) || base.num == 0) {
> ++ out = AV_NOPTS_VALUE;
> ++ } else {
> ++ AVRational bq = { 1, GST_SECOND };
> ++ out = av_rescale_q (time, bq, base);
> ++ }
> ++
> ++ return out;
> ++}
> ++
> ++void
> ++gst_ffmpeg_init_pix_fmt_info(void);
> ++
> ++int
> ++gst_ffmpeg_auto_max_threads(void);
> ++
> ++G_CONST_RETURN gchar *
> ++gst_ffmpeg_get_codecid_longname (enum CodecID codec_id);
> ++
> ++gint
> ++av_smp_format_depth(enum AVSampleFormat smp_fmt);
> ++
> ++GstBuffer *
> ++new_aligned_buffer (gint size, GstCaps * caps);
> ++
> ++#endif /* __GST_FFMPEG_UTILS_H__ */
> diff --git a/meta/recipes-multimedia/gstreamer/gst-ffmpeg_0.10.13.bb b/meta/recipes-multimedia/gstreamer/gst-ffmpeg_0.10.13.bb
> index 748157a..bd7b8ce 100644
> --- a/meta/recipes-multimedia/gstreamer/gst-ffmpeg_0.10.13.bb
> +++ b/meta/recipes-multimedia/gstreamer/gst-ffmpeg_0.10.13.bb
> @@ -53,6 +53,7 @@ SRC_URI = "http://gstreamer.freedesktop.org/src/${BPN}/${BPN}-${PV}.tar.bz2 \
> file://0001-qdm2-check-array-index-before-use-fix-out-of-array-a.patch \
> file://0001-lavf-compute-probe-buffer-size-more-reliably.patch \
> file://0001-ffserver-set-oformat.patch \
> + ${@bb.utils.contains('PACKAGECONFIG', 'libav9', 'file://libav-9.patch', '', d)} \
> "
>
> SRC_URI[md5sum] = "7f5beacaf1312db2db30a026b36888c4"
> @@ -79,6 +80,7 @@ EXTRA_OECONF = "${FFMPEG_EXTRA_CONFIGURE_COMMON}"
> PACKAGECONFIG ??= "external-libav"
> PACKAGECONFIG[external-libav] = "--with-system-ffmpeg,,libav"
> PACKAGECONFIG[orc] = "--enable-orc,--disable-orc,orc"
> +PACKAGECONFIG[libav9] = ",,,"
>
> FILES_${PN} += "${libdir}/gstreamer-0.10/*.so"
> FILES_${PN}-dbg += "${libdir}/gstreamer-0.10/.debug"
> --
> 2.0.4
>
--
Martin 'JaMa' Jansa jabber: Martin.Jansa at gmail.com
-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 181 bytes
Desc: Digital signature
URL: <http://lists.openembedded.org/pipermail/openembedded-core/attachments/20140816/f960cfdf/attachment-0002.sig>
More information about the Openembedded-core
mailing list